summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Cohere.py2
-rw-r--r--g4f/Provider/DuckDuckGo.py7
-rw-r--r--g4f/Provider/Llama.py2
-rw-r--r--g4f/Provider/PerplexityLabs.py7
-rw-r--r--g4f/Provider/__init__.py2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py3
6 files changed, 15 insertions, 8 deletions
diff --git a/g4f/Provider/Cohere.py b/g4f/Provider/Cohere.py
index 4f9fd30a..eac04ab4 100644
--- a/g4f/Provider/Cohere.py
+++ b/g4f/Provider/Cohere.py
@@ -9,7 +9,7 @@ from .helper import format_prompt
class Cohere(AbstractProvider):
url = "https://cohereforai-c4ai-command-r-plus.hf.space"
- working = True
+ working = False
supports_gpt_35_turbo = False
supports_gpt_4 = False
supports_stream = True
diff --git a/g4f/Provider/DuckDuckGo.py b/g4f/Provider/DuckDuckGo.py
index 2fa0612a..9379660b 100644
--- a/g4f/Provider/DuckDuckGo.py
+++ b/g4f/Provider/DuckDuckGo.py
@@ -16,8 +16,11 @@ class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
default_model = "gpt-3.5-turbo-0125"
- models = ["gpt-3.5-turbo-0125", "claude-instant-1.2"]
- model_aliases = {"gpt-3.5-turbo": "gpt-3.5-turbo-0125"}
+ models = ["gpt-3.5-turbo-0125", "claude-3-haiku-20240307"]
+ model_aliases = {
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "claude-3-haiku": "claude-3-haiku-20240307"
+ }
status_url = "https://duckduckgo.com/duckchat/v1/status"
chat_url = "https://duckduckgo.com/duckchat/v1/chat"
diff --git a/g4f/Provider/Llama.py b/g4f/Provider/Llama.py
index f2c78b36..235c0994 100644
--- a/g4f/Provider/Llama.py
+++ b/g4f/Provider/Llama.py
@@ -9,7 +9,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Llama(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.llama2.ai"
- working = True
+ working = False
supports_message_history = True
default_model = "meta/meta-llama-3-70b-instruct"
models = [
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index 91ba63f2..4a2cc9e5 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -21,11 +21,14 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"related"
]
model_aliases = {
- "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
+ "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
+ "mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
"codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
"llava-v1.5-7b": "llava-v1.5-7b-wrapper",
- 'databricks/dbrx-instruct': "dbrx-instruct"
+ "databricks/dbrx-instruct": "dbrx-instruct",
+ "meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
+ "meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
}
@classmethod
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index d0c0d8b6..e60e1310 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ..providers.types import BaseProvider, ProviderType
-from ..providers.retry_provider import RetryProvider, IterProvider
+from ..providers.retry_provider import RetryProvider, IterListProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index d8ea4fad..f40ae961 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -373,6 +373,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
except NoValidHarFileError as e:
if cls._api_key is None and cls.needs_auth:
raise e
+ cls._create_request_args()
if cls.default_model is None:
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
@@ -420,7 +421,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
**requirements["proofofwork"],
user_agent=cls._headers["user-agent"],
proofTokens=proofTokens
- )
+ )
if debug.logging:
print(
'Arkose:', False if not need_arkose else arkose_token[:12]+"...",