diff options
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/FakeGpt.py | 2 | ||||
-rw-r--r-- | g4f/Provider/GeekGpt.py | 2 | ||||
-rw-r--r-- | g4f/Provider/HuggingChat.py | 3 | ||||
-rw-r--r-- | g4f/models.py | 9 |
4 files changed, 12 insertions, 4 deletions
diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/FakeGpt.py index a88f3682..ee14abf4 100644 --- a/g4f/Provider/FakeGpt.py +++ b/g4f/Provider/FakeGpt.py @@ -11,7 +11,7 @@ from .helper import format_prompt, get_random_string class FakeGpt(AsyncGeneratorProvider): url = "https://chat-shared2.zhile.io" supports_gpt_35_turbo = True - working = True + working = False _access_token = None _cookie_jar = None diff --git a/g4f/Provider/GeekGpt.py b/g4f/Provider/GeekGpt.py index f53ec9de..f1dea9b1 100644 --- a/g4f/Provider/GeekGpt.py +++ b/g4f/Provider/GeekGpt.py @@ -8,7 +8,7 @@ from json import dumps class GeekGpt(AbstractProvider): url = 'https://chat.geekgpt.org' - working = True + working = False supports_message_history = True supports_stream = True supports_gpt_35_turbo = True diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 9aa93878..3ea9f306 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -19,7 +19,8 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "codellama/CodeLlama-34b-Instruct-hf", "mistralai/Mistral-7B-Instruct-v0.2", - "openchat/openchat-3.5-0106" + "openchat/openchat-3.5-0106", + "codellama/CodeLlama-70b-Instruct-hf" ] model_aliases = { "openchat/openchat_3.5": "openchat/openchat-3.5-1210", diff --git a/g4f/models.py b/g4f/models.py index b971cf2d..ed86024e 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -123,6 +123,12 @@ codellama_34b_instruct = Model( best_provider = RetryProvider([HuggingChat, PerplexityLabs, DeepInfra]) ) +codellama_70b_instruct = Model( + name = "codellama/CodeLlama-70b-Instruct-hf", + base_provider = "huggingface", + best_provider = DeepInfra +) + # Mistral mixtral_8x7b = Model( name = "mistralai/Mixtral-8x7B-Instruct-v0.1", @@ -256,6 +262,7 @@ class ModelUtils: 'llama2-13b': llama2_13b, 'llama2-70b': llama2_70b, 'codellama-34b-instruct': codellama_34b_instruct, + 'codellama-70b-instruct': codellama_70b_instruct, 'mixtral-8x7b': mixtral_8x7b, 'mistral-7b': mistral_7b, @@ -270,4 +277,4 @@ class ModelUtils: 'pi': pi } -_all_models = list(ModelUtils.convert.keys())
\ No newline at end of file +_all_models = list(ModelUtils.convert.keys()) |