From 9def1aa71f5c0340967297a94b7742c8d7c7fd8d Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Fri, 24 Jan 2025 02:47:57 +0000 Subject: Update model configurations, provider implementations, and documentation (#2577) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update model configurations, provider implementations, and documentation - Updated model names and aliases for Qwen QVQ 72B and Qwen 2 72B (@TheFirstNoob) - Revised HuggingSpace class configuration, added default_image_model - Added llama-3.2-70b alias for Llama 3.2 70B model in AutonomousAI - Removed BlackboxCreateAgent class - Added gpt-4o alias for Copilot model - Moved api_key to Mhystical class attribute - Added models property with default_model value for Free2GPT - Simplified Jmuz class implementation - Improved image generation and model handling in DeepInfra - Standardized default models and removed aliases in Gemini - Replaced model aliases with direct model list in GlhfChat (@TheFirstNoob) - Removed trailing slash from image generation URL in PollinationsAI (https://github.com/xtekky/gpt4free/issues/2571) - Updated llama and qwen model configurations - Enhanced provider documentation and model details * Removed from (g4f/models.py) 'Yqcloud' provider from Default due to error 'ResponseStatusError: Response 429: 文字过长,请删减后重试。' * Update docs/providers-and-models.md * refactor(g4f/Provider/DDG.py): Add error handling and rate limiting to DDG provider - Add custom exception classes for rate limits, timeouts, and conversation limits - Implement rate limiting with sleep between requests (0.75s minimum delay) - Add model validation method to check supported models - Add proper error handling for API responses with custom exceptions - Improve session cookie handling for conversation persistence - Clean up User-Agent string and remove redundant code - Add proper error propagation through async generator Breaking changes: - New custom exceptions may require updates to error handling code - Rate limiting affects request timing and throughput - Model validation is now stricter Related: - Adds error handling similar to standard API clients - Improves reliability and robustness of chat interactions * Update g4f/models.py g4f/Provider/PollinationsAI.py * Update g4f/models.py * Restored provider which was not working and was disabled (g4f/Provider/DeepInfraChat.py) * Fixing a bug with Streaming Completions * Update g4f/Provider/PollinationsAI.py * Update g4f/Provider/Blackbox.py g4f/Provider/DDG.py * Added another model for generating images 'ImageGeneration2' to the 'Blackbox' provider * Update docs/providers-and-models.md * Update g4f/models.py g4f/Provider/Blackbox.py * Added a new OIVSCode provider from the Text Models and Vision (Image Upload) model * Update docs/providers-and-models.md * docs: add Conversation Memory class with context handling requested by @TheFirstNoob * Simplified README.md documentation added new docs/configuration.md documentation * Update add README.md docs/configuration.md * Update README.md * Update docs/providers-and-models.md g4f/models.py g4f/Provider/PollinationsAI.py * Added new model deepseek-r1 to Blackbox provider. @TheFirstNoob * Fixed bugs and updated docs/providers-and-models.md etc/unittest/client.py g4f/models.py g4f/Provider/. --------- Co-authored-by: kqlio67 <> Co-authored-by: H Lohaus --- g4f/Provider/needs_auth/Cerebras.py | 4 +- g4f/Provider/needs_auth/DeepInfra.py | 81 +++++++++++++++------ g4f/Provider/needs_auth/Gemini.py | 12 ++-- g4f/Provider/needs_auth/GigaChat.py | 2 +- g4f/Provider/needs_auth/GlhfChat.py | 26 ++----- g4f/Provider/needs_auth/HuggingChat.py | 2 + g4f/Provider/needs_auth/HuggingFaceAPI.py | 1 + g4f/Provider/needs_auth/OpenaiAccount.py | 8 +-- g4f/Provider/needs_auth/OpenaiChat.py | 6 +- g4f/Provider/needs_auth/PerplexityApi.py | 2 +- g4f/Provider/needs_auth/Poe.py | 115 ------------------------------ g4f/Provider/needs_auth/Raycast.py | 68 ------------------ g4f/Provider/needs_auth/Replicate.py | 4 +- g4f/Provider/needs_auth/__init__.py | 3 +- 14 files changed, 85 insertions(+), 249 deletions(-) delete mode 100644 g4f/Provider/needs_auth/Poe.py delete mode 100644 g4f/Provider/needs_auth/Raycast.py (limited to 'g4f/Provider/needs_auth') diff --git a/g4f/Provider/needs_auth/Cerebras.py b/g4f/Provider/needs_auth/Cerebras.py index 996e8e11..e91fa8b2 100644 --- a/g4f/Provider/needs_auth/Cerebras.py +++ b/g4f/Provider/needs_auth/Cerebras.py @@ -15,11 +15,11 @@ class Cerebras(OpenaiAPI): working = True default_model = "llama3.1-70b" models = [ - "llama3.1-70b", + default_model, "llama3.1-8b", "llama-3.3-70b" ] - model_aliases = {"llama-3.1-70b": "llama3.1-70b", "llama-3.1-8b": "llama3.1-8b"} + model_aliases = {"llama-3.1-70b": default_model, "llama-3.1-8b": "llama3.1-8b"} @classmethod async def create_async_generator( diff --git a/g4f/Provider/needs_auth/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py index 86993314..ea537b3b 100644 --- a/g4f/Provider/needs_auth/DeepInfra.py +++ b/g4f/Provider/needs_auth/DeepInfra.py @@ -2,41 +2,59 @@ from __future__ import annotations import requests from ...typing import AsyncResult, Messages -from .OpenaiAPI import OpenaiAPI from ...requests import StreamSession, raise_for_status from ...image import ImageResponse +from .OpenaiAPI import OpenaiAPI +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -class DeepInfra(OpenaiAPI): +class DeepInfra(OpenaiAPI, AsyncGeneratorProvider, ProviderModelMixin): label = "DeepInfra" url = "https://deepinfra.com" login_url = "https://deepinfra.com/dash/api_keys" working = True - api_base = "https://api.deepinfra.com/v1/openai", + api_base = "https://api.deepinfra.com/v1/openai" needs_auth = True supports_stream = True supports_message_history = True default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" - default_image_model = '' - image_models = [default_image_model] + default_image_model = "stabilityai/sd3.5" + models = [] + image_models = [] @classmethod def get_models(cls, **kwargs): if not cls.models: url = 'https://api.deepinfra.com/models/featured' - models = requests.get(url).json() - cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"] - cls.image_models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"] + response = requests.get(url) + models = response.json() + + cls.models = [] + cls.image_models = [] + + for model in models: + if model["type"] == "text-generation": + cls.models.append(model['model_name']) + elif model["reported_type"] == "text-to-image": + cls.image_models.append(model['model_name']) + + cls.models.extend(cls.image_models) + return cls.models + @classmethod + def get_image_models(cls, **kwargs): + if not cls.image_models: + cls.get_models() + return cls.image_models + @classmethod def create_async_generator( cls, model: str, messages: Messages, - stream: bool = True, + stream: bool, temperature: float = 0.7, max_tokens: int = 1028, - prompt: str = None, **kwargs ) -> AsyncResult: headers = { @@ -47,12 +65,6 @@ class DeepInfra(OpenaiAPI): 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', 'X-Deepinfra-Source': 'web-embed', } - - # Check if the model is an image model - if model in cls.image_models: - return cls.create_image_generator(messages[-1]["content"] if prompt is None else prompt, model, headers=headers, **kwargs) - - # Text generation return super().create_async_generator( model, messages, stream=stream, @@ -63,7 +75,7 @@ class DeepInfra(OpenaiAPI): ) @classmethod - async def create_image_generator( + async def create_async_image( cls, prompt: str, model: str, @@ -71,13 +83,26 @@ class DeepInfra(OpenaiAPI): api_base: str = "https://api.deepinfra.com/v1/inference", proxy: str = None, timeout: int = 180, - headers: dict = None, extra_data: dict = {}, **kwargs - ) -> AsyncResult: - if api_key is not None and headers is not None: + ) -> ImageResponse: + headers = { + 'Accept-Encoding': 'gzip, deflate, br', + 'Accept-Language': 'en-US', + 'Connection': 'keep-alive', + 'Origin': 'https://deepinfra.com', + 'Referer': 'https://deepinfra.com/', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-site', + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', + 'X-Deepinfra-Source': 'web-embed', + 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + } + if api_key is not None: headers["Authorization"] = f"Bearer {api_key}" - async with StreamSession( proxies={"all": proxy}, headers=headers, @@ -85,7 +110,7 @@ class DeepInfra(OpenaiAPI): ) as session: model = cls.get_model(model) data = {"prompt": prompt, **extra_data} - data = {"input": data} if model == cls.default_image_model else data + data = {"input": data} if model == cls.default_model else data async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response: await raise_for_status(response) data = await response.json() @@ -93,4 +118,14 @@ class DeepInfra(OpenaiAPI): if not images: raise RuntimeError(f"Response: {data}") images = images[0] if len(images) == 1 else images - yield ImageResponse(images, prompt) + return ImageResponse(images, prompt) + + @classmethod + async def create_async_image_generator( + cls, + model: str, + messages: Messages, + prompt: str = None, + **kwargs + ) -> AsyncResult: + yield await cls.create_async_image(messages[-1]["content"] if prompt is None else prompt, model, **kwargs) diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index 498137e5..0e1a733f 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -60,13 +60,11 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): working = True default_model = 'gemini' - image_models = ["gemini"] - default_vision_model = "gemini" - models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"] - model_aliases = { - "gemini-flash": "gemini-1.5-flash", - "gemini-pro": "gemini-1.5-pro", - } + default_image_model = default_model + default_vision_model = default_model + image_models = [default_image_model] + models = [default_model, "gemini-1.5-flash", "gemini-1.5-pro"] + synthesize_content_type = "audio/vnd.wav" _cookies: Cookies = None diff --git a/g4f/Provider/needs_auth/GigaChat.py b/g4f/Provider/needs_auth/GigaChat.py index 59da21a2..11eb6635 100644 --- a/g4f/Provider/needs_auth/GigaChat.py +++ b/g4f/Provider/needs_auth/GigaChat.py @@ -61,7 +61,7 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin): supports_stream = True needs_auth = True default_model = "GigaChat:latest" - models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"] + models = [default_model, "GigaChat-Plus", "GigaChat-Pro"] @classmethod async def create_async_generator( diff --git a/g4f/Provider/needs_auth/GlhfChat.py b/g4f/Provider/needs_auth/GlhfChat.py index f3a578af..be56ebb6 100644 --- a/g4f/Provider/needs_auth/GlhfChat.py +++ b/g4f/Provider/needs_auth/GlhfChat.py @@ -5,26 +5,10 @@ from .OpenaiAPI import OpenaiAPI class GlhfChat(OpenaiAPI): label = "GlhfChat" url = "https://glhf.chat" - login_url = "https://glhf.chat/users/settings/api" + login_url = "https://glhf.chat/user-settings/api" api_base = "https://glhf.chat/api/openai/v1" + working = True - model_aliases = { - 'Qwen2.5-Coder-32B-Instruct': 'hf:Qwen/Qwen2.5-Coder-32B-Instruct', - 'Llama-3.1-405B-Instruct': 'hf:meta-llama/Llama-3.1-405B-Instruct', - 'Llama-3.1-70B-Instruct': 'hf:meta-llama/Llama-3.1-70B-Instruct', - 'Llama-3.1-8B-Instruct': 'hf:meta-llama/Llama-3.1-8B-Instruct', - 'Llama-3.2-3B-Instruct': 'hf:meta-llama/Llama-3.2-3B-Instruct', - 'Llama-3.2-11B-Vision-Instruct': 'hf:meta-llama/Llama-3.2-11B-Vision-Instruct', - 'Llama-3.2-90B-Vision-Instruct': 'hf:meta-llama/Llama-3.2-90B-Vision-Instruct', - 'Qwen2.5-72B-Instruct': 'hf:Qwen/Qwen2.5-72B-Instruct', - 'Llama-3.3-70B-Instruct': 'hf:meta-llama/Llama-3.3-70B-Instruct', - 'gemma-2-9b-it': 'hf:google/gemma-2-9b-it', - 'gemma-2-27b-it': 'hf:google/gemma-2-27b-it', - 'Mistral-7B-Instruct-v0.3': 'hf:mistralai/Mistral-7B-Instruct-v0.3', - 'Mixtral-8x7B-Instruct-v0.1': 'hf:mistralai/Mixtral-8x7B-Instruct-v0.1', - 'Mixtral-8x22B-Instruct-v0.1': 'hf:mistralai/Mixtral-8x22B-Instruct-v0.1', - 'Nous-Hermes-2-Mixtral-8x7B-DPO': 'hf:NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - 'Qwen2.5-7B-Instruct': 'hf:Qwen/Qwen2.5-7B-Instruct', - 'SOLAR-10.7B-Instruct-v1.0': 'hf:upstage/SOLAR-10.7B-Instruct-v1.0', - 'Llama-3.1-Nemotron-70B-Instruct-HF': 'hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' - } + + default_model = "hf:meta-llama/Llama-3.3-70B-Instruct" + models = ["hf:meta-llama/Llama-3.1-405B-Instruct", default_model, "hf:deepseek-ai/DeepSeek-V3", "hf:Qwen/QwQ-32B-Preview", "hf:huihui-ai/Llama-3.3-70B-Instruct-abliterated", "hf:anthracite-org/magnum-v4-12b", "hf:meta-llama/Llama-3.1-70B-Instruct", "hf:meta-llama/Llama-3.1-8B-Instruct", "hf:meta-llama/Llama-3.2-3B-Instruct", "hf:meta-llama/Llama-3.2-11B-Vision-Instruct", "hf:meta-llama/Llama-3.2-90B-Vision-Instruct", "hf:Qwen/Qwen2.5-72B-Instruct", "hf:Qwen/Qwen2.5-Coder-32B-Instruct", "hf:google/gemma-2-9b-it", "hf:google/gemma-2-27b-it", "hf:mistralai/Mistral-7B-Instruct-v0.3", "hf:mistralai/Mixtral-8x7B-Instruct-v0.1", "hf:mistralai/Mixtral-8x22B-Instruct-v0.1", "hf:NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "hf:Qwen/Qwen2.5-7B-Instruct", "hf:upstage/SOLAR-10.7B-Instruct-v1.0", "hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"] diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 1bc0ae41..36d292b7 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -45,6 +45,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin): default_model, 'meta-llama/Llama-3.3-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', + 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', 'Qwen/QwQ-32B-Preview', 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', 'Qwen/Qwen2.5-Coder-32B-Instruct', @@ -57,6 +58,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin): "qwen-2.5-72b": "Qwen/Qwen2.5-Coder-32B-Instruct", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", + "deepseek-r1": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "qwq-32b": "Qwen/QwQ-32B-Preview", "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct", diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py index 1c1466d7..5c329965 100644 --- a/g4f/Provider/needs_auth/HuggingFaceAPI.py +++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py @@ -10,6 +10,7 @@ class HuggingFaceAPI(OpenaiAPI): url = "https://api-inference.huggingface.com" api_base = "https://api-inference.huggingface.co/v1" working = True + default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct" default_vision_model = default_model diff --git a/g4f/Provider/needs_auth/OpenaiAccount.py b/g4f/Provider/needs_auth/OpenaiAccount.py index 5e6c9449..b94c5e3b 100644 --- a/g4f/Provider/needs_auth/OpenaiAccount.py +++ b/g4f/Provider/needs_auth/OpenaiAccount.py @@ -5,8 +5,8 @@ from .OpenaiChat import OpenaiChat class OpenaiAccount(OpenaiChat): needs_auth = True parent = "OpenaiChat" - image_models = ["dall-e-3", "gpt-4", "gpt-4o"] - default_vision_model = "gpt-4o" - default_image_model = "dall-e-3" + default_model = "gpt-4o" + default_vision_model = default_model + default_image_model = OpenaiChat.default_image_model + image_models = [default_model, default_image_model, "gpt-4"] fallback_models = [*OpenaiChat.fallback_models, default_image_model] - model_aliases = {default_image_model: default_vision_model} \ No newline at end of file diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 75396a87..5b18e759 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -95,7 +95,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): supports_message_history = True supports_system_message = True default_model = "auto" - fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1", "o1-preview", "o1-mini"] + default_image_model = "dall-e-3" + image_models = [default_image_model] + fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1", "o1-preview", "o1-mini"] +image_models vision_models = fallback_models synthesize_content_type = "audio/mpeg" @@ -734,4 +736,4 @@ def get_cookies( 'params': params, } json = yield cmd_dict - return {c["name"]: c["value"] for c in json['cookies']} if 'cookies' in json else {} \ No newline at end of file + return {c["name"]: c["value"] for c in json['cookies']} if 'cookies' in json else {} diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py index 77d71c21..3d8aa9bc 100644 --- a/g4f/Provider/needs_auth/PerplexityApi.py +++ b/g4f/Provider/needs_auth/PerplexityApi.py @@ -11,7 +11,7 @@ class PerplexityApi(OpenaiAPI): default_model = "llama-3-sonar-large-32k-online" models = [ "llama-3-sonar-small-32k-chat", - "llama-3-sonar-small-32k-online", + default_model, "llama-3-sonar-large-32k-chat", "llama-3-sonar-large-32k-online", "llama-3-8b-instruct", diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py deleted file mode 100644 index a0ef7453..00000000 --- a/g4f/Provider/needs_auth/Poe.py +++ /dev/null @@ -1,115 +0,0 @@ -from __future__ import annotations - -import time - -from ...typing import CreateResult, Messages -from ..base_provider import AbstractProvider -from ..helper import format_prompt - -models = { - "meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"}, - "meta-llama/Llama-2-13b-chat-hf": {"name": "Llama-2-13b"}, - "meta-llama/Llama-2-70b-chat-hf": {"name": "Llama-2-70b"}, - "codellama/CodeLlama-7b-Instruct-hf": {"name": "Code-Llama-7b"}, - "codellama/CodeLlama-13b-Instruct-hf": {"name": "Code-Llama-13b"}, - "codellama/CodeLlama-34b-Instruct-hf": {"name": "Code-Llama-34b"}, - "gpt-3.5-turbo": {"name": "GPT-3.5-Turbo"}, - "gpt-3.5-turbo-instruct": {"name": "GPT-3.5-Turbo-Instruct"}, - "gpt-4": {"name": "GPT-4"}, - "palm": {"name": "Google-PaLM"}, -} - -class Poe(AbstractProvider): - url = "https://poe.com" - working = False - needs_auth = True - supports_stream = True - - models = models.keys() - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - webdriver: WebDriver = None, - user_data_dir: str = None, - headless: bool = True, - **kwargs - ) -> CreateResult: - if not model: - model = "gpt-3.5-turbo" - elif model not in models: - raise ValueError(f"Model are not supported: {model}") - prompt = format_prompt(messages) - - session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy) - with session as driver: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - - driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { - "source": """ - window._message = window._last_message = ""; - window._message_finished = false; - class ProxiedWebSocket extends WebSocket { - constructor(url, options) { - super(url, options); - this.addEventListener("message", (e) => { - const data = JSON.parse(JSON.parse(e.data)["messages"][0])["payload"]["data"]; - if ("messageAdded" in data) { - if (data["messageAdded"]["author"] != "human") { - window._message = data["messageAdded"]["text"]; - if (data["messageAdded"]["state"] == "complete") { - window._message_finished = true; - } - } - } - }); - } - } - window.WebSocket = ProxiedWebSocket; - """ - }) - - try: - driver.get(f"{cls.url}/{models[model]['name']}") - wait = WebDriverWait(driver, 10 if headless else 240) - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']"))) - except: - # Reopen browser for login - if not webdriver: - driver = session.reopen() - driver.get(f"{cls.url}/{models[model]['name']}") - wait = WebDriverWait(driver, 240) - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']"))) - else: - raise RuntimeError("Prompt textarea not found. You may not be logged in.") - - element_send_text(driver.find_element(By.CSS_SELECTOR, "footer textarea[class^='GrowingTextArea']"), prompt) - driver.find_element(By.CSS_SELECTOR, "footer button[class*='ChatMessageSendButton']").click() - - script = """ -if(window._message && window._message != window._last_message) { - try { - return window._message.substring(window._last_message.length); - } finally { - window._last_message = window._message; - } -} else if(window._message_finished) { - return null; -} else { - return ''; -} -""" - while True: - chunk = driver.execute_script(script) - if chunk: - yield chunk - elif chunk != "": - break - else: - time.sleep(0.1) diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py deleted file mode 100644 index 008fcad8..00000000 --- a/g4f/Provider/needs_auth/Raycast.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import annotations - -import json - -import requests - -from ...typing import CreateResult, Messages -from ..base_provider import AbstractProvider - - -class Raycast(AbstractProvider): - url = "https://raycast.com" - supports_stream = True - needs_auth = True - working = True - - models = [ - "gpt-3.5-turbo", - "gpt-4" - ] - - @staticmethod - def create_completion( - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - **kwargs, - ) -> CreateResult: - auth = kwargs.get('auth') - if not auth: - raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter") - - headers = { - 'Accept': 'application/json', - 'Accept-Language': 'en-US,en;q=0.9', - 'Authorization': f'Bearer {auth}', - 'Content-Type': 'application/json', - 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0', - } - parsed_messages = [ - {'author': message['role'], 'content': {'text': message['content']}} - for message in messages - ] - data = { - "debug": False, - "locale": "en-CN", - "messages": parsed_messages, - "model": model, - "provider": "openai", - "source": "ai_chat", - "system_instruction": "markdown", - "temperature": 0.5 - } - response = requests.post( - "https://backend.raycast.com/api/v1/ai/chat_completions", - headers=headers, - json=data, - stream=True, - proxies={"https": proxy} - ) - for token in response.iter_lines(): - if b'data: ' not in token: - continue - completion_chunk = json.loads(token.decode().replace('data: ', '')) - token = completion_chunk['text'] - if token != None: - yield token diff --git a/g4f/Provider/needs_auth/Replicate.py b/g4f/Provider/needs_auth/Replicate.py index 3c9b23cd..328f701f 100644 --- a/g4f/Provider/needs_auth/Replicate.py +++ b/g4f/Provider/needs_auth/Replicate.py @@ -13,9 +13,7 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin): working = True needs_auth = True default_model = "meta/meta-llama-3-70b-instruct" - model_aliases = { - "meta-llama/Meta-Llama-3-70B-Instruct": default_model - } + models = [default_model] @classmethod async def create_async_generator( diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index 03898013..426c9874 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -1,3 +1,4 @@ +from .Anthropic import Anthropic from .BingCreateImages import BingCreateImages from .Cerebras import Cerebras from .CopilotAccount import CopilotAccount @@ -20,8 +21,6 @@ from .OpenaiAccount import OpenaiAccount from .OpenaiAPI import OpenaiAPI from .OpenaiChat import OpenaiChat from .PerplexityApi import PerplexityApi -from .Poe import Poe -from .Raycast import Raycast from .Reka import Reka from .Replicate import Replicate from .ThebApi import ThebApi -- cgit v1.2.3