From 7181f2897be05de1c42fbeb651a952e84f2be82c Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Fri, 29 Sep 2023 16:21:18 +0200 Subject: Fix Aivvm and add new models in models.py --- g4f/Provider/Aivvm.py | 31 +++++++++++++++---------------- g4f/models.py | 16 +++++++++++++--- testing/test_chat_completion.py | 11 ++++++----- 3 files changed, 34 insertions(+), 24 deletions(-) diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 7a3d57bd..1ba6d6f1 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -4,6 +4,7 @@ import requests from .base_provider import BaseProvider from ..typing import CreateResult +# to recreate this easily, send a post request to https://chat.aivvm.com/api/models models = { 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'}, 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'}, @@ -32,29 +33,27 @@ class Aivvm(BaseProvider): if not model: model = "gpt-3.5-turbo" elif model not in models: - raise ValueError(f"Model are not supported: {model}") - + raise ValueError(f"Model is not supported: {model}") + headers = { - "authority" : "chat.aivvm.com", - "accept" : "*/*", - "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "content-type" : "application/json", - "origin" : "https://chat.aivvm.com", - "referer" : "https://chat.aivvm.com/", - "sec-ch-ua" : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', - "sec-ch-ua-mobile" : "?0", - "sec-ch-ua-platform" : '"macOS"', - "sec-fetch-dest" : "empty", - "sec-fetch-mode" : "cors", - "sec-fetch-site" : "same-origin", - "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36", + "accept" : "*/*", + "accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7", + "content-type" : "application/json", + "sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"", + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform": "\"Bandóz\"", + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "Referer" : "https://chat.aivvm.com/", + "Referrer-Policy" : "same-origin", } json_data = { "model" : models[model], "messages" : messages, "key" : "", - "prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", + "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), "temperature" : kwargs.get("temperature", 0.7) } diff --git a/g4f/models.py b/g4f/models.py index 7c2d6822..74d938b4 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -59,7 +59,7 @@ gpt_4 = Model( name = 'gpt-4', base_provider = 'openai', best_provider = RetryProvider([ - Myshell, AItianhuSpace, + Myshell, AItianhuSpace, Aivvm ]) ) @@ -149,7 +149,7 @@ code_davinci_002 = Model( gpt_35_turbo_16k = Model( name = 'gpt-3.5-turbo-16k', base_provider = 'openai', - best_provider = Vercel) + best_provider = Aivvm) gpt_35_turbo_16k_0613 = Model( name = 'gpt-3.5-turbo-16k-0613', @@ -166,7 +166,17 @@ gpt_35_turbo_0613 = Model( gpt_4_0613 = Model( name = 'gpt-4-0613', base_provider = 'openai', - best_provider = Vercel) + best_provider = Aivvm) + +gpt_4_32k = Model( + name = 'gpt-4-32k', + base_provider = 'openai', + best_provider = Aivvm) + +gpt_4_32k_0613 = Model( + name = 'gpt-4-32k-0613', + base_provider = 'openai', + best_provider = Aivvm) text_ada_001 = Model( name = 'text-ada-001', diff --git a/testing/test_chat_completion.py b/testing/test_chat_completion.py index d901e697..77774a2f 100644 --- a/testing/test_chat_completion.py +++ b/testing/test_chat_completion.py @@ -7,8 +7,8 @@ import g4f, asyncio print("create:", end=" ", flush=True) for response in g4f.ChatCompletion.create( - model=g4f.models.gpt_35_turbo, - provider=g4f.Provider.GptGo, + model=g4f.models.gpt_4, + provider=g4f.Provider.Vercel, messages=[{"role": "user", "content": "hello!"}], ): print(response, end="", flush=True) @@ -16,10 +16,11 @@ print() async def run_async(): response = await g4f.ChatCompletion.create_async( - model=g4f.models.gpt_35_turbo, - provider=g4f.Provider.GptGo, + model=g4f.models.gpt_4_32k_0613, + provider=g4f.Provider.Aivvm, messages=[{"role": "user", "content": "hello!"}], + temperature=0.0 ) print("create_async:", response) -asyncio.run(run_async()) +# asyncio.run(run_async()) -- cgit v1.2.3 From 6ba092469ad15a04c747e4d55083a18dac74c09d Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Fri, 29 Sep 2023 16:54:46 +0200 Subject: some more things --- g4f/models.py | 300 ++++++++++++++++++++-------------------- testing/test_chat_completion.py | 11 +- 2 files changed, 158 insertions(+), 153 deletions(-) diff --git a/g4f/models.py b/g4f/models.py index 74d938b4..5ddada3a 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -1,15 +1,15 @@ -from __future__ import annotations +from __future__ import annotations from dataclasses import dataclass -from .typing import Union -from .Provider import BaseProvider, RetryProvider -from .Provider import ( +from .typing import Union +from .Provider import BaseProvider, RetryProvider +from .Provider import ( ChatgptLogin, - ChatgptAi, - ChatBase, - Vercel, - DeepAi, - Aivvm, - Bard, + ChatgptAi, + ChatBase, + Vercel, + DeepAi, + Aivvm, + Bard, H2o, GptGo, Bing, @@ -24,237 +24,241 @@ from .Provider import ( ChatgptDuo, ) + @dataclass(unsafe_hash=True) class Model: name: str base_provider: str best_provider: Union[type[BaseProvider], RetryProvider] = None + # Config for HuggingChat, OpenAssistant # Works for Liaobots, H2o, OpenaiChat, Yqcloud, You default = Model( - name = "", - base_provider = "", - best_provider = RetryProvider([ - Bing, # Not fully GPT 3 or 4 - PerplexityAi, # Adds references to sources - Wewordle, # Responds with markdown - Yqcloud, # Answers short questions in chinese - ChatBase, # Don't want to answer creatively - ChatgptDuo, # Include search results + name="", + base_provider="", + best_provider=RetryProvider([ + Bing, # Not fully GPT 3 or 4 + PerplexityAi, # Adds references to sources + Wewordle, # Responds with markdown + Yqcloud, # Answers short questions in chinese + ChatBase, # Don't want to answer creatively + ChatgptDuo, # Include search results DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ]) ) # GPT-3.5 / GPT-4 gpt_35_turbo = Model( - name = 'gpt-3.5-turbo', - base_provider = 'openai', - best_provider = RetryProvider([ + name='gpt-3.5-turbo', + base_provider='openai', + best_provider=RetryProvider([ DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ]) ) gpt_4 = Model( - name = 'gpt-4', - base_provider = 'openai', - best_provider = RetryProvider([ + name='gpt-4', + base_provider='openai', + best_provider=RetryProvider([ Myshell, AItianhuSpace, Aivvm ]) ) # Bard palm = Model( - name = 'palm', - base_provider = 'google', - best_provider = Bard) + name='palm', + base_provider='google', + best_provider=Bard) # H2o falcon_7b = Model( - name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', - base_provider = 'huggingface', - best_provider = H2o) + name='h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', + base_provider='huggingface', + best_provider=H2o) falcon_40b = Model( - name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', - base_provider = 'huggingface', - best_provider = H2o) + name='h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', + base_provider='huggingface', + best_provider=H2o) llama_13b = Model( - name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', - base_provider = 'huggingface', - best_provider = H2o) + name='h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', + base_provider='huggingface', + best_provider=H2o) # Vercel claude_instant_v1 = Model( - name = 'claude-instant-v1', - base_provider = 'anthropic', - best_provider = Vercel) + name='claude-instant-v1', + base_provider='anthropic', + best_provider=Vercel) claude_v1 = Model( - name = 'claude-v1', - base_provider = 'anthropic', - best_provider = Vercel) + name='claude-v1', + base_provider='anthropic', + best_provider=Vercel) claude_v2 = Model( - name = 'claude-v2', - base_provider = 'anthropic', - best_provider = Vercel) + name='claude-v2', + base_provider='anthropic', + best_provider=Vercel) command_light_nightly = Model( - name = 'command-light-nightly', - base_provider = 'cohere', - best_provider = Vercel) + name='command-light-nightly', + base_provider='cohere', + best_provider=Vercel) command_nightly = Model( - name = 'command-nightly', - base_provider = 'cohere', - best_provider = Vercel) + name='command-nightly', + base_provider='cohere', + best_provider=Vercel) gpt_neox_20b = Model( - name = 'EleutherAI/gpt-neox-20b', - base_provider = 'huggingface', - best_provider = Vercel) + name='EleutherAI/gpt-neox-20b', + base_provider='huggingface', + best_provider=Vercel) oasst_sft_1_pythia_12b = Model( - name = 'OpenAssistant/oasst-sft-1-pythia-12b', - base_provider = 'huggingface', - best_provider = Vercel) + name='OpenAssistant/oasst-sft-1-pythia-12b', + base_provider='huggingface', + best_provider=Vercel) oasst_sft_4_pythia_12b_epoch_35 = Model( - name = 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', - base_provider = 'huggingface', - best_provider = Vercel) + name='OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', + base_provider='huggingface', + best_provider=Vercel) santacoder = Model( - name = 'bigcode/santacoder', - base_provider = 'huggingface', - best_provider = Vercel) + name='bigcode/santacoder', + base_provider='huggingface', + best_provider=Vercel) bloom = Model( - name = 'bigscience/bloom', - base_provider = 'huggingface', - best_provider = Vercel) + name='bigscience/bloom', + base_provider='huggingface', + best_provider=Vercel) flan_t5_xxl = Model( - name = 'google/flan-t5-xxl', - base_provider = 'huggingface', - best_provider = Vercel) + name='google/flan-t5-xxl', + base_provider='huggingface', + best_provider=Vercel) code_davinci_002 = Model( - name = 'code-davinci-002', - base_provider = 'openai', - best_provider = Vercel) + name='code-davinci-002', + base_provider='openai', + best_provider=Vercel) gpt_35_turbo_16k = Model( - name = 'gpt-3.5-turbo-16k', - base_provider = 'openai', - best_provider = Aivvm) + name='gpt-3.5-turbo-16k', + base_provider='openai', + best_provider=Aivvm) gpt_35_turbo_16k_0613 = Model( - name = 'gpt-3.5-turbo-16k-0613', - base_provider = 'openai') + name='gpt-3.5-turbo-16k-0613', + base_provider='openai') gpt_35_turbo_0613 = Model( - name = 'gpt-3.5-turbo-0613', - base_provider = 'openai', - best_provider = RetryProvider([ + name='gpt-3.5-turbo-0613', + base_provider='openai', + best_provider=RetryProvider([ Aivvm, ChatgptLogin ]) ) gpt_4_0613 = Model( - name = 'gpt-4-0613', - base_provider = 'openai', - best_provider = Aivvm) + name='gpt-4-0613', + base_provider='openai', + best_provider=Aivvm) gpt_4_32k = Model( - name = 'gpt-4-32k', - base_provider = 'openai', - best_provider = Aivvm) + name='gpt-4-32k', + base_provider='openai', + best_provider=Aivvm) gpt_4_32k_0613 = Model( - name = 'gpt-4-32k-0613', - base_provider = 'openai', - best_provider = Aivvm) + name='gpt-4-32k-0613', + base_provider='openai', + best_provider=Aivvm) text_ada_001 = Model( - name = 'text-ada-001', - base_provider = 'openai', - best_provider = Vercel) + name='text-ada-001', + base_provider='openai', + best_provider=Vercel) text_babbage_001 = Model( - name = 'text-babbage-001', - base_provider = 'openai', - best_provider = Vercel) + name='text-babbage-001', + base_provider='openai', + best_provider=Vercel) text_curie_001 = Model( - name = 'text-curie-001', - base_provider = 'openai', - best_provider = Vercel) + name='text-curie-001', + base_provider='openai', + best_provider=Vercel) text_davinci_002 = Model( - name = 'text-davinci-002', - base_provider = 'openai', - best_provider = Vercel) + name='text-davinci-002', + base_provider='openai', + best_provider=Vercel) text_davinci_003 = Model( - name = 'text-davinci-003', - base_provider = 'openai', - best_provider = Vercel) + name='text-davinci-003', + base_provider='openai', + best_provider=Vercel) llama13b_v2_chat = Model( - name = 'replicate:a16z-infra/llama13b-v2-chat', - base_provider = 'replicate', - best_provider = Vercel) + name='replicate:a16z-infra/llama13b-v2-chat', + base_provider='replicate', + best_provider=Vercel) llama7b_v2_chat = Model( - name = 'replicate:a16z-infra/llama7b-v2-chat', - base_provider = 'replicate', - best_provider = Vercel) + name='replicate:a16z-infra/llama7b-v2-chat', + base_provider='replicate', + best_provider=Vercel) class ModelUtils: convert: dict[str, Model] = { # gpt-3.5 / gpt-4 - 'gpt-3.5-turbo' : gpt_35_turbo, - 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, - 'gpt-4' : gpt_4, - 'gpt-4-0613' : gpt_4_0613, - 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, - + 'gpt-3.5-turbo': gpt_35_turbo, + 'gpt-3.5-turbo-16k': gpt_35_turbo_16k, + 'gpt-3.5-turbo-16k-0613': gpt_35_turbo_16k_0613, + 'gpt-4': gpt_4, + 'gpt-4-0613': gpt_4_0613, + 'gpt-4-32k': gpt_4_32k, + 'gpt-4-32k-0613': gpt_4_32k_0613, + # Bard - 'palm2' : palm, - 'palm' : palm, - 'google' : palm, - 'google-bard' : palm, - 'google-palm' : palm, - 'bard' : palm, - + 'palm2': palm, + 'palm': palm, + 'google': palm, + 'google-bard': palm, + 'google-palm': palm, + 'bard': palm, + # H2o - 'falcon-40b' : falcon_40b, - 'falcon-7b' : falcon_7b, - 'llama-13b' : llama_13b, - + 'falcon-40b': falcon_40b, + 'falcon-7b': falcon_7b, + 'llama-13b': llama_13b, + # Vercel - 'claude-instant-v1' : claude_instant_v1, - 'claude-v1' : claude_v1, - 'claude-v2' : claude_v2, - 'command-nightly' : command_nightly, - 'gpt-neox-20b' : gpt_neox_20b, - 'santacoder' : santacoder, - 'bloom' : bloom, - 'flan-t5-xxl' : flan_t5_xxl, - 'code-davinci-002' : code_davinci_002, - 'text-ada-001' : text_ada_001, - 'text-babbage-001' : text_babbage_001, - 'text-curie-001' : text_curie_001, - 'text-davinci-002' : text_davinci_002, - 'text-davinci-003' : text_davinci_003, - 'llama13b-v2-chat' : llama13b_v2_chat, - 'llama7b-v2-chat' : llama7b_v2_chat, - - 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b, - 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35, - 'command-light-nightly' : command_light_nightly, + 'claude-instant-v1': claude_instant_v1, + 'claude-v1': claude_v1, + 'claude-v2': claude_v2, + 'command-nightly': command_nightly, + 'gpt-neox-20b': gpt_neox_20b, + 'santacoder': santacoder, + 'bloom': bloom, + 'flan-t5-xxl': flan_t5_xxl, + 'code-davinci-002': code_davinci_002, + 'text-ada-001': text_ada_001, + 'text-babbage-001': text_babbage_001, + 'text-curie-001': text_curie_001, + 'text-davinci-002': text_davinci_002, + 'text-davinci-003': text_davinci_003, + 'llama13b-v2-chat': llama13b_v2_chat, + 'llama7b-v2-chat': llama7b_v2_chat, + + 'oasst-sft-1-pythia-12b': oasst_sft_1_pythia_12b, + 'oasst-sft-4-pythia-12b-epoch-3.5': oasst_sft_4_pythia_12b_epoch_35, + 'command-light-nightly': command_light_nightly, } \ No newline at end of file diff --git a/testing/test_chat_completion.py b/testing/test_chat_completion.py index 77774a2f..7600e46b 100644 --- a/testing/test_chat_completion.py +++ b/testing/test_chat_completion.py @@ -7,19 +7,20 @@ import g4f, asyncio print("create:", end=" ", flush=True) for response in g4f.ChatCompletion.create( - model=g4f.models.gpt_4, - provider=g4f.Provider.Vercel, - messages=[{"role": "user", "content": "hello!"}], + model=g4f.models.gpt_4_32k_0613, + provider=g4f.Provider.Aivvm, + messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}], + temperature=0.0, + stream=True ): print(response, end="", flush=True) print() async def run_async(): response = await g4f.ChatCompletion.create_async( - model=g4f.models.gpt_4_32k_0613, + model=g4f.models.gpt_35_turbo_16k_0613, provider=g4f.Provider.Aivvm, messages=[{"role": "user", "content": "hello!"}], - temperature=0.0 ) print("create_async:", response) -- cgit v1.2.3 From e465899801de653321f27a5dd31fc5ea1fd1bf32 Mon Sep 17 00:00:00 2001 From: Tekky <98614666+xtekky@users.noreply.github.com> Date: Sun, 1 Oct 2023 04:27:21 +0200 Subject: ~ --- g4f/models.py | 324 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 162 insertions(+), 162 deletions(-) diff --git a/g4f/models.py b/g4f/models.py index 5ddada3a..cca9e850 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -1,264 +1,264 @@ -from __future__ import annotations +from __future__ import annotations from dataclasses import dataclass -from .typing import Union -from .Provider import BaseProvider, RetryProvider -from .Provider import ( +from .typing import Union +from .Provider import BaseProvider, RetryProvider +from .Provider import ( + AItianhuSpace, ChatgptLogin, - ChatgptAi, - ChatBase, - Vercel, - DeepAi, - Aivvm, - Bard, - H2o, - GptGo, - Bing, PerplexityAi, + ChatgptDuo, + ChatgptAi, + ChatBase, + AItianhu, Wewordle, Yqcloud, - AItianhu, - AItianhuSpace, - Aichat, Myshell, + Vercel, + DeepAi, + Aichat, + Aivvm, + GptGo, + Bard, Aibn, - ChatgptDuo, + Bing, + H2o, ) - @dataclass(unsafe_hash=True) class Model: name: str base_provider: str best_provider: Union[type[BaseProvider], RetryProvider] = None - # Config for HuggingChat, OpenAssistant # Works for Liaobots, H2o, OpenaiChat, Yqcloud, You default = Model( - name="", - base_provider="", - best_provider=RetryProvider([ - Bing, # Not fully GPT 3 or 4 - PerplexityAi, # Adds references to sources - Wewordle, # Responds with markdown - Yqcloud, # Answers short questions in chinese - ChatBase, # Don't want to answer creatively - ChatgptDuo, # Include search results + name = "", + base_provider = "", + best_provider = RetryProvider([ + Bing, # Not fully GPT 3 or 4 + PerplexityAi, # Adds references to sources + Wewordle, # Responds with markdown + Yqcloud, # Answers short questions in chinese + ChatBase, # Don't want to answer creatively + ChatgptDuo, # Include search results DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ]) ) # GPT-3.5 / GPT-4 gpt_35_turbo = Model( - name='gpt-3.5-turbo', - base_provider='openai', - best_provider=RetryProvider([ + name = 'gpt-3.5-turbo', + base_provider = 'openai', + best_provider = RetryProvider([ DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ]) ) gpt_4 = Model( - name='gpt-4', - base_provider='openai', - best_provider=RetryProvider([ - Myshell, AItianhuSpace, Aivvm + name = 'gpt-4', + base_provider = 'openai', + best_provider = RetryProvider([ + Myshell, AItianhuSpace, ]) ) # Bard palm = Model( - name='palm', - base_provider='google', - best_provider=Bard) + name = 'palm', + base_provider = 'google', + best_provider = Bard) # H2o falcon_7b = Model( - name='h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', - base_provider='huggingface', - best_provider=H2o) + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', + base_provider = 'huggingface', + best_provider = H2o) falcon_40b = Model( - name='h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', - base_provider='huggingface', - best_provider=H2o) + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', + base_provider = 'huggingface', + best_provider = H2o) llama_13b = Model( - name='h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', - base_provider='huggingface', - best_provider=H2o) + name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', + base_provider = 'huggingface', + best_provider = H2o) # Vercel claude_instant_v1 = Model( - name='claude-instant-v1', - base_provider='anthropic', - best_provider=Vercel) + name = 'claude-instant-v1', + base_provider = 'anthropic', + best_provider = Vercel) claude_v1 = Model( - name='claude-v1', - base_provider='anthropic', - best_provider=Vercel) + name = 'claude-v1', + base_provider = 'anthropic', + best_provider = Vercel) claude_v2 = Model( - name='claude-v2', - base_provider='anthropic', - best_provider=Vercel) + name = 'claude-v2', + base_provider = 'anthropic', + best_provider = Vercel) command_light_nightly = Model( - name='command-light-nightly', - base_provider='cohere', - best_provider=Vercel) + name = 'command-light-nightly', + base_provider = 'cohere', + best_provider = Vercel) command_nightly = Model( - name='command-nightly', - base_provider='cohere', - best_provider=Vercel) + name = 'command-nightly', + base_provider = 'cohere', + best_provider = Vercel) gpt_neox_20b = Model( - name='EleutherAI/gpt-neox-20b', - base_provider='huggingface', - best_provider=Vercel) + name = 'EleutherAI/gpt-neox-20b', + base_provider = 'huggingface', + best_provider = Vercel) oasst_sft_1_pythia_12b = Model( - name='OpenAssistant/oasst-sft-1-pythia-12b', - base_provider='huggingface', - best_provider=Vercel) + name = 'OpenAssistant/oasst-sft-1-pythia-12b', + base_provider = 'huggingface', + best_provider = Vercel) oasst_sft_4_pythia_12b_epoch_35 = Model( - name='OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', - base_provider='huggingface', - best_provider=Vercel) + name = 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', + base_provider = 'huggingface', + best_provider = Vercel) santacoder = Model( - name='bigcode/santacoder', - base_provider='huggingface', - best_provider=Vercel) + name = 'bigcode/santacoder', + base_provider = 'huggingface', + best_provider = Vercel) bloom = Model( - name='bigscience/bloom', - base_provider='huggingface', - best_provider=Vercel) + name = 'bigscience/bloom', + base_provider = 'huggingface', + best_provider = Vercel) flan_t5_xxl = Model( - name='google/flan-t5-xxl', - base_provider='huggingface', - best_provider=Vercel) + name = 'google/flan-t5-xxl', + base_provider = 'huggingface', + best_provider = Vercel) code_davinci_002 = Model( - name='code-davinci-002', - base_provider='openai', - best_provider=Vercel) + name = 'code-davinci-002', + base_provider = 'openai', + best_provider = Vercel) gpt_35_turbo_16k = Model( - name='gpt-3.5-turbo-16k', - base_provider='openai', - best_provider=Aivvm) + name = 'gpt-3.5-turbo-16k', + base_provider = 'openai', + best_provider = Vercel) gpt_35_turbo_16k_0613 = Model( - name='gpt-3.5-turbo-16k-0613', - base_provider='openai') + name = 'gpt-3.5-turbo-16k-0613', + base_provider = 'openai') gpt_35_turbo_0613 = Model( - name='gpt-3.5-turbo-0613', - base_provider='openai', - best_provider=RetryProvider([ + name = 'gpt-3.5-turbo-0613', + base_provider = 'openai', + best_provider = RetryProvider([ Aivvm, ChatgptLogin ]) ) gpt_4_0613 = Model( - name='gpt-4-0613', - base_provider='openai', - best_provider=Aivvm) + name = 'gpt-4-0613', + base_provider = 'openai', + best_provider = Aivvm) gpt_4_32k = Model( - name='gpt-4-32k', - base_provider='openai', - best_provider=Aivvm) + name = 'gpt-4-32k', + base_provider = 'openai', + best_provider = Aivvm) gpt_4_32k_0613 = Model( - name='gpt-4-32k-0613', - base_provider='openai', - best_provider=Aivvm) + name = 'gpt-4-32k-0613', + base_provider = 'openai', + best_provider = Aivvm) text_ada_001 = Model( - name='text-ada-001', - base_provider='openai', - best_provider=Vercel) + name = 'text-ada-001', + base_provider = 'openai', + best_provider = Vercel) text_babbage_001 = Model( - name='text-babbage-001', - base_provider='openai', - best_provider=Vercel) + name = 'text-babbage-001', + base_provider = 'openai', + best_provider = Vercel) text_curie_001 = Model( - name='text-curie-001', - base_provider='openai', - best_provider=Vercel) + name = 'text-curie-001', + base_provider = 'openai', + best_provider = Vercel) text_davinci_002 = Model( - name='text-davinci-002', - base_provider='openai', - best_provider=Vercel) + name = 'text-davinci-002', + base_provider = 'openai', + best_provider = Vercel) text_davinci_003 = Model( - name='text-davinci-003', - base_provider='openai', - best_provider=Vercel) + name = 'text-davinci-003', + base_provider = 'openai', + best_provider = Vercel) llama13b_v2_chat = Model( - name='replicate:a16z-infra/llama13b-v2-chat', - base_provider='replicate', - best_provider=Vercel) + name = 'replicate:a16z-infra/llama13b-v2-chat', + base_provider = 'replicate', + best_provider = Vercel) llama7b_v2_chat = Model( - name='replicate:a16z-infra/llama7b-v2-chat', - base_provider='replicate', - best_provider=Vercel) + name = 'replicate:a16z-infra/llama7b-v2-chat', + base_provider = 'replicate', + best_provider = Vercel) class ModelUtils: convert: dict[str, Model] = { - # gpt-3.5 / gpt-4 - 'gpt-3.5-turbo': gpt_35_turbo, - 'gpt-3.5-turbo-16k': gpt_35_turbo_16k, - 'gpt-3.5-turbo-16k-0613': gpt_35_turbo_16k_0613, - 'gpt-4': gpt_4, - 'gpt-4-0613': gpt_4_0613, - 'gpt-4-32k': gpt_4_32k, - 'gpt-4-32k-0613': gpt_4_32k_0613, - + # gpt-3.5 + 'gpt-3.5-turbo' : gpt_35_turbo, + 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, + 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, + + # gpt-4 + 'gpt-4' : gpt_4, + 'gpt-4-0613' : gpt_4_0613, + 'gpt-4-32k' : gpt_4_32k, + 'gpt-4-32k-0613' : gpt_4_32k_0613, + # Bard - 'palm2': palm, - 'palm': palm, - 'google': palm, - 'google-bard': palm, - 'google-palm': palm, - 'bard': palm, - + 'palm2' : palm, + 'palm' : palm, + 'google' : palm, + 'google-bard' : palm, + 'google-palm' : palm, + 'bard' : palm, + # H2o - 'falcon-40b': falcon_40b, - 'falcon-7b': falcon_7b, - 'llama-13b': llama_13b, - + 'falcon-40b' : falcon_40b, + 'falcon-7b' : falcon_7b, + 'llama-13b' : llama_13b, + # Vercel - 'claude-instant-v1': claude_instant_v1, - 'claude-v1': claude_v1, - 'claude-v2': claude_v2, - 'command-nightly': command_nightly, - 'gpt-neox-20b': gpt_neox_20b, - 'santacoder': santacoder, - 'bloom': bloom, - 'flan-t5-xxl': flan_t5_xxl, - 'code-davinci-002': code_davinci_002, - 'text-ada-001': text_ada_001, - 'text-babbage-001': text_babbage_001, - 'text-curie-001': text_curie_001, - 'text-davinci-002': text_davinci_002, - 'text-davinci-003': text_davinci_003, - 'llama13b-v2-chat': llama13b_v2_chat, - 'llama7b-v2-chat': llama7b_v2_chat, - - 'oasst-sft-1-pythia-12b': oasst_sft_1_pythia_12b, - 'oasst-sft-4-pythia-12b-epoch-3.5': oasst_sft_4_pythia_12b_epoch_35, - 'command-light-nightly': command_light_nightly, - } \ No newline at end of file + 'claude-instant-v1' : claude_instant_v1, + 'claude-v1' : claude_v1, + 'claude-v2' : claude_v2, + 'command-nightly' : command_nightly, + 'gpt-neox-20b' : gpt_neox_20b, + 'santacoder' : santacoder, + 'bloom' : bloom, + 'flan-t5-xxl' : flan_t5_xxl, + 'code-davinci-002' : code_davinci_002, + 'text-ada-001' : text_ada_001, + 'text-babbage-001' : text_babbage_001, + 'text-curie-001' : text_curie_001, + 'text-davinci-002' : text_davinci_002, + 'text-davinci-003' : text_davinci_003, + 'llama13b-v2-chat' : llama13b_v2_chat, + 'llama7b-v2-chat' : llama7b_v2_chat, + + 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b, + 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35, + 'command-light-nightly' : command_light_nightly, + } -- cgit v1.2.3