From bb9132bcb42a5f720398b65c721cc77957555863 Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Mon, 9 Dec 2024 15:52:25 +0000 Subject: Updating provider documentation and small fixes in providers (#2469) * refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. * fix: Updating provider documentation and small fixes in providers * Disabled the provider (RobocodersAPI) * Fix: Conflicting file g4f/models.py * Update g4f/models.py g4f/Provider/Airforce.py * Update docs/providers-and-models.md g4f/models.py g4f/Provider/Airforce.py g4f/Provider/PollinationsAI.py * Update docs/providers-and-models.md * Update .gitignore * Update g4f/models.py * Update g4f/Provider/PollinationsAI.py --------- Co-authored-by: kqlio67 <> --- g4f/Provider/needs_auth/Gemini.py | 8 ++ g4f/Provider/needs_auth/GeminiPro.py | 10 +- g4f/Provider/needs_auth/GithubCopilot.py | 6 +- g4f/Provider/needs_auth/HuggingChat.py | 12 ++- g4f/Provider/needs_auth/HuggingFace.py | 2 +- g4f/Provider/needs_auth/HuggingFace2.py | 28 ------ g4f/Provider/needs_auth/HuggingFaceAPI.py | 28 ++++++ g4f/Provider/needs_auth/Poe.py | 4 +- g4f/Provider/needs_auth/Raycast.py | 2 - g4f/Provider/needs_auth/Reka.py | 148 ++++++++++++++++++++++++++++++ g4f/Provider/needs_auth/Theb.py | 5 +- g4f/Provider/needs_auth/__init__.py | 3 +- 12 files changed, 212 insertions(+), 44 deletions(-) delete mode 100644 g4f/Provider/needs_auth/HuggingFace2.py create mode 100644 g4f/Provider/needs_auth/HuggingFaceAPI.py create mode 100644 g4f/Provider/needs_auth/Reka.py (limited to 'g4f/Provider/needs_auth') diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index 9127708c..b55a604b 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -51,14 +51,22 @@ UPLOAD_IMAGE_HEADERS = { } class Gemini(AsyncGeneratorProvider, ProviderModelMixin): + label = "Google Gemini" url = "https://gemini.google.com" + needs_auth = True working = True + default_model = 'gemini' image_models = ["gemini"] default_vision_model = "gemini" models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"] + model_aliases = { + "gemini-flash": "gemini-1.5-flash", + "gemini-pro": "gemini-1.5-pro", + } synthesize_content_type = "audio/vnd.wav" + _cookies: Cookies = None _snlm0e: str = None _sid: str = None diff --git a/g4f/Provider/needs_auth/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py index a7f1e0aa..d9204b25 100644 --- a/g4f/Provider/needs_auth/GeminiPro.py +++ b/g4f/Provider/needs_auth/GeminiPro.py @@ -11,14 +11,20 @@ from ...errors import MissingAuthError from ..helper import get_connector class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): - label = "Gemini API" + label = "Google Gemini API" url = "https://ai.google.dev" + working = True supports_message_history = True needs_auth = True + default_model = "gemini-1.5-pro" default_vision_model = default_model models = [default_model, "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"] + model_aliases = { + "gemini-flash": "gemini-1.5-flash", + "gemini-flash": "gemini-1.5-flash-8b", + } @classmethod async def create_async_generator( @@ -108,4 +114,4 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): if candidate["finishReason"] == "STOP": yield candidate["content"]["parts"][0]["text"] else: - yield candidate["finishReason"] + ' ' + candidate["safetyRatings"] \ No newline at end of file + yield candidate["finishReason"] + ' ' + candidate["safetyRatings"] diff --git a/g4f/Provider/needs_auth/GithubCopilot.py b/g4f/Provider/needs_auth/GithubCopilot.py index 3eb66b5e..754c8d4e 100644 --- a/g4f/Provider/needs_auth/GithubCopilot.py +++ b/g4f/Provider/needs_auth/GithubCopilot.py @@ -16,10 +16,12 @@ class Conversation(BaseConversation): self.conversation_id = conversation_id class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://copilot.microsoft.com" + url = "https://github.com/copilot" + working = True needs_auth = True supports_stream = True + default_model = "gpt-4o" models = [default_model, "o1-mini", "o1-preview", "claude-3.5-sonnet"] @@ -90,4 +92,4 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin): if line.startswith(b"data: "): data = json.loads(line[6:]) if data.get("type") == "content": - yield data.get("body") \ No newline at end of file + yield data.get("body") diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 2f3dbb57..431273f6 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -24,16 +24,19 @@ class Conversation(BaseConversation): class HuggingChat(AbstractProvider, ProviderModelMixin): url = "https://huggingface.co/chat" + working = True supports_stream = True needs_auth = True + default_model = "Qwen/Qwen2.5-72B-Instruct" + default_image_model = "black-forest-labs/FLUX.1-dev" image_models = [ "black-forest-labs/FLUX.1-dev" ] models = [ default_model, - 'meta-llama/Meta-Llama-3.1-70B-Instruct', + 'meta-llama/Llama-3.3-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', 'Qwen/QwQ-32B-Preview', 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', @@ -45,8 +48,9 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): *image_models ] model_aliases = { + ### Chat ### "qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct", - "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", + "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "qwq-32b": "Qwen/QwQ-32B-Preview", "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", @@ -55,6 +59,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B", "mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407", "phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct", + + ### Image ### "flux-dev": "black-forest-labs/FLUX.1-dev", } @@ -214,4 +220,4 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): return data[message_keys["id"]] except (KeyError, IndexError, TypeError) as e: - raise RuntimeError(f"Failed to extract message ID: {str(e)}") \ No newline at end of file + raise RuntimeError(f"Failed to extract message ID: {str(e)}") diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index 94530252..fd1da2a7 100644 --- a/g4f/Provider/needs_auth/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -17,7 +17,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_message_history = True default_model = HuggingChat.default_model - default_image_model = "black-forest-labs/FLUX.1-dev" + default_image_model = HuggingChat.default_image_model models = [*HuggingChat.models, default_image_model] image_models = [default_image_model] model_aliases = HuggingChat.model_aliases diff --git a/g4f/Provider/needs_auth/HuggingFace2.py b/g4f/Provider/needs_auth/HuggingFace2.py deleted file mode 100644 index 0bde770b..00000000 --- a/g4f/Provider/needs_auth/HuggingFace2.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import annotations - -from .OpenaiAPI import OpenaiAPI -from .HuggingChat import HuggingChat -from ...typing import AsyncResult, Messages - -class HuggingFace2(OpenaiAPI): - label = "HuggingFace (Inference API)" - url = "https://huggingface.co" - working = True - default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct" - default_vision_model = default_model - models = [ - *HuggingChat.models - ] - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - api_base: str = "https://api-inference.huggingface.co/v1", - max_tokens: int = 500, - **kwargs - ) -> AsyncResult: - return super().create_async_generator( - model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs - ) diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py new file mode 100644 index 00000000..a93ab3a6 --- /dev/null +++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from .OpenaiAPI import OpenaiAPI +from .HuggingChat import HuggingChat +from ...typing import AsyncResult, Messages + +class HuggingFaceAPI(OpenaiAPI): + label = "HuggingFace (Inference API)" + url = "https://api-inference.huggingface.co" + working = True + default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct" + default_vision_model = default_model + models = [ + *HuggingChat.models + ] + + @classmethod + def create_async_generator( + cls, + model: str, + messages: Messages, + api_base: str = "https://api-inference.huggingface.co/v1", + max_tokens: int = 500, + **kwargs + ) -> AsyncResult: + return super().create_async_generator( + model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs + ) diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py index 65fdbef9..46b998e8 100644 --- a/g4f/Provider/needs_auth/Poe.py +++ b/g4f/Provider/needs_auth/Poe.py @@ -24,8 +24,8 @@ class Poe(AbstractProvider): url = "https://poe.com" working = True needs_auth = True - supports_gpt_35_turbo = True supports_stream = True + models = models.keys() @classmethod @@ -113,4 +113,4 @@ if(window._message && window._message != window._last_message) { elif chunk != "": break else: - time.sleep(0.1) \ No newline at end of file + time.sleep(0.1) diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py index b8ec5a97..008fcad8 100644 --- a/g4f/Provider/needs_auth/Raycast.py +++ b/g4f/Provider/needs_auth/Raycast.py @@ -10,8 +10,6 @@ from ..base_provider import AbstractProvider class Raycast(AbstractProvider): url = "https://raycast.com" - supports_gpt_35_turbo = True - supports_gpt_4 = True supports_stream = True needs_auth = True working = True diff --git a/g4f/Provider/needs_auth/Reka.py b/g4f/Provider/needs_auth/Reka.py new file mode 100644 index 00000000..780ff31e --- /dev/null +++ b/g4f/Provider/needs_auth/Reka.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +import os, requests, time, json +from ...typing import CreateResult, Messages, ImageType +from ..base_provider import AbstractProvider +from ...cookies import get_cookies +from ...image import to_bytes + +class Reka(AbstractProvider): + url = "https://chat.reka.ai/" + working = True + needs_auth = True + supports_stream = True + default_vision_model = "reka" + cookies = {} + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + proxy: str = None, + api_key: str = None, + image: ImageType = None, + **kwargs + ) -> CreateResult: + cls.proxy = proxy + + if not api_key: + cls.cookies = get_cookies("chat.reka.ai") + if not cls.cookies: + raise ValueError("No cookies found for chat.reka.ai") + elif "appSession" not in cls.cookies: + raise ValueError("No appSession found in cookies for chat.reka.ai, log in or provide bearer_auth") + api_key = cls.get_access_token(cls) + + conversation = [] + for message in messages: + conversation.append({ + "type": "human", + "text": message["content"], + }) + + if image: + image_url = cls.upload_image(cls, api_key, image) + conversation[-1]["image_url"] = image_url + conversation[-1]["media_type"] = "image" + + headers = { + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'authorization': f'Bearer {api_key}', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://chat.reka.ai', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36', + } + + json_data = { + 'conversation_history': conversation, + 'stream': True, + 'use_search_engine': False, + 'use_code_interpreter': False, + 'model_name': 'reka-core', + 'random_seed': int(time.time() * 1000), + } + + tokens = '' + + response = requests.post('https://chat.reka.ai/api/chat', + cookies=cls.cookies, headers=headers, json=json_data, proxies=cls.proxy, stream=True) + + for completion in response.iter_lines(): + if b'data' in completion: + token_data = json.loads(completion.decode('utf-8')[5:])['text'] + + yield (token_data.replace(tokens, '')) + + tokens = token_data + + def upload_image(cls, access_token, image: ImageType) -> str: + boundary_token = os.urandom(8).hex() + + headers = { + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control': 'no-cache', + 'authorization': f'Bearer {access_token}', + 'content-type': f'multipart/form-data; boundary=----WebKitFormBoundary{boundary_token}', + 'origin': 'https://chat.reka.ai', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://chat.reka.ai/chat/hPReZExtDOPvUfF8vCPC', + 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36', + } + + image_data = to_bytes(image) + + boundary = f'----WebKitFormBoundary{boundary_token}' + data = f'--{boundary}\r\nContent-Disposition: form-data; name="image"; filename="image.png"\r\nContent-Type: image/png\r\n\r\n' + data += image_data.decode('latin-1') + data += f'\r\n--{boundary}--\r\n' + + response = requests.post('https://chat.reka.ai/api/upload-image', + cookies=cls.cookies, headers=headers, proxies=cls.proxy, data=data.encode('latin-1')) + + return response.json()['media_url'] + + def get_access_token(cls): + headers = { + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control': 'no-cache', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://chat.reka.ai/chat', + 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36', + } + + try: + response = requests.get('https://chat.reka.ai/bff/auth/access_token', + cookies=cls.cookies, headers=headers, proxies=cls.proxy) + + return response.json()['accessToken'] + + except Exception as e: + raise ValueError(f"Failed to get access token: {e}, refresh your cookies / log in into chat.reka.ai") diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py index c7d7d58e..7d3de027 100644 --- a/g4f/Provider/needs_auth/Theb.py +++ b/g4f/Provider/needs_auth/Theb.py @@ -35,9 +35,8 @@ class Theb(AbstractProvider): label = "TheB.AI" url = "https://beta.theb.ai" working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True supports_stream = True + models = models.keys() @classmethod @@ -155,4 +154,4 @@ return ''; elif chunk != "": break else: - time.sleep(0.1) \ No newline at end of file + time.sleep(0.1) diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index c67dfb56..d79e7e3d 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -11,7 +11,7 @@ from .GithubCopilot import GithubCopilot from .Groq import Groq from .HuggingChat import HuggingChat from .HuggingFace import HuggingFace -from .HuggingFace2 import HuggingFace2 +from .HuggingFaceAPI import HuggingFaceAPI from .MetaAI import MetaAI from .MetaAIAccount import MetaAIAccount from .MicrosoftDesigner import MicrosoftDesigner @@ -21,6 +21,7 @@ from .OpenaiChat import OpenaiChat from .PerplexityApi import PerplexityApi from .Poe import Poe from .Raycast import Raycast +from .Reka import Reka from .Replicate import Replicate from .Theb import Theb from .ThebApi import ThebApi -- cgit v1.2.3