diff options
Diffstat (limited to 'g4f/Provider')
95 files changed, 2491 insertions, 2321 deletions
diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py new file mode 100644 index 00000000..c2f0f4b3 --- /dev/null +++ b/g4f/Provider/AIUncensored.py @@ -0,0 +1,132 @@ +from __future__ import annotations + +import json +import random +from aiohttp import ClientSession, ClientError +import asyncio +from itertools import cycle + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse + +class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://www.aiuncensored.info/ai_uncensored" + api_endpoints_text = [ + "https://twitterclone-i0wr.onrender.com/api/chat", + "https://twitterclone-4e8t.onrender.com/api/chat", + "https://twitterclone-8wd1.onrender.com/api/chat", + ] + api_endpoints_image = [ + "https://twitterclone-4e8t.onrender.com/api/image", + "https://twitterclone-i0wr.onrender.com/api/image", + "https://twitterclone-8wd1.onrender.com/api/image", + ] + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'TextGenerations' + text_models = [default_model] + image_models = ['ImageGenerations'] + models = [*text_models, *image_models] + + model_aliases = { + "flux": "ImageGenerations", + } + + @staticmethod + def generate_cipher() -> str: + """Generate a cipher in format like '3221229284179118'""" + return ''.join([str(random.randint(0, 9)) for _ in range(16)]) + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://www.aiuncensored.info', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://www.aiuncensored.info/', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + if model in cls.image_models: + prompt = messages[-1]['content'] + data = { + "prompt": prompt, + "cipher": cls.generate_cipher() + } + + endpoints = cycle(cls.api_endpoints_image) + + while True: + endpoint = next(endpoints) + try: + async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response: + response.raise_for_status() + response_data = await response.json() + image_url = response_data['image_url'] + image_response = ImageResponse(images=image_url, alt=prompt) + yield image_response + return + except (ClientError, asyncio.TimeoutError): + continue + + elif model in cls.text_models: + data = { + "messages": messages, + "cipher": cls.generate_cipher() + } + + endpoints = cycle(cls.api_endpoints_text) + + while True: + endpoint = next(endpoints) + try: + async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response: + response.raise_for_status() + full_response = "" + async for line in response.content: + line = line.decode('utf-8') + if line.startswith("data: "): + try: + json_str = line[6:] + if json_str != "[DONE]": + data = json.loads(json_str) + if "data" in data: + full_response += data["data"] + yield data["data"] + except json.JSONDecodeError: + continue + return + except (ClientError, asyncio.TimeoutError): + continue diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index 88896096..c7ae44c0 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -1,255 +1,59 @@ from __future__ import annotations +from typing import Any, Dict +import inspect -from aiohttp import ClientSession, ClientResponseError -from urllib.parse import urlencode -import json -import io -import asyncio +from aiohttp import ClientSession from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse, is_accepted_format from .helper import format_prompt +from .airforce.AirforceChat import AirforceChat +from .airforce.AirforceImage import AirforceImage class Airforce(AsyncGeneratorProvider, ProviderModelMixin): url = "https://api.airforce" - text_api_endpoint = "https://api.airforce/chat/completions" - image_api_endpoint = "https://api.airforce/v1/imagine2" + api_endpoint_completions = AirforceChat.api_endpoint + api_endpoint_imagine2 = AirforceImage.api_endpoint working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - supports_stream = True - supports_system_message = True - supports_message_history = True - default_model = 'llama-3-70b-chat' - text_models = [ - # Open source models - 'llama-2-13b-chat', - - 'llama-3-70b-chat', - 'llama-3-70b-chat-turbo', - 'llama-3-70b-chat-lite', - - 'llama-3-8b-chat', - 'llama-3-8b-chat-turbo', - 'llama-3-8b-chat-lite', - - 'llama-3.1-405b-turbo', - 'llama-3.1-70b-turbo', - 'llama-3.1-8b-turbo', - - 'LlamaGuard-2-8b', - 'Llama-Guard-7b', - 'Meta-Llama-Guard-3-8B', - - 'Mixtral-8x7B-Instruct-v0.1', - 'Mixtral-8x22B-Instruct-v0.1', - 'Mistral-7B-Instruct-v0.1', - 'Mistral-7B-Instruct-v0.2', - 'Mistral-7B-Instruct-v0.3', - - 'Qwen1.5-72B-Chat', - 'Qwen1.5-110B-Chat', - 'Qwen2-72B-Instruct', - - 'gemma-2b-it', - 'gemma-2-9b-it', - 'gemma-2-27b-it', - - 'dbrx-instruct', - - 'deepseek-llm-67b-chat', - - 'Nous-Hermes-2-Mixtral-8x7B-DPO', - 'Nous-Hermes-2-Yi-34B', - - 'WizardLM-2-8x22B', - - 'SOLAR-10.7B-Instruct-v1.0', - - 'StripedHyena-Nous-7B', - - 'sparkdesk', - - - # Other models - 'chatgpt-4o-latest', - 'gpt-4', - 'gpt-4-turbo', - 'gpt-4o-mini-2024-07-18', - 'gpt-4o-mini', - 'gpt-4o', - 'gpt-3.5-turbo', - 'gpt-3.5-turbo-0125', - 'gpt-3.5-turbo-1106', - 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-0613', - 'gpt-3.5-turbo-16k-0613', - - 'gemini-1.5-flash', - 'gemini-1.5-pro', - ] - image_models = [ - 'flux', - 'flux-realism', - 'flux-anime', - 'flux-3d', - 'flux-disney', - 'flux-pixel', - 'any-dark', - ] + supports_stream = AirforceChat.supports_stream + supports_system_message = AirforceChat.supports_system_message + supports_message_history = AirforceChat.supports_message_history + + default_model = AirforceChat.default_model + models = [*AirforceChat.models, *AirforceImage.models] - models = [ - *text_models, - *image_models - ] model_aliases = { - # Open source models - "llama-2-13b": "llama-2-13b-chat", - - "llama-3-70b": "llama-3-70b-chat", - "llama-3-70b": "llama-3-70b-chat-turbo", - "llama-3-70b": "llama-3-70b-chat-lite", - - "llama-3-8b": "llama-3-8b-chat", - "llama-3-8b": "llama-3-8b-chat-turbo", - "llama-3-8b": "llama-3-8b-chat-lite", - - "llama-3.1-405b": "llama-3.1-405b-turbo", - "llama-3.1-70b": "llama-3.1-70b-turbo", - "llama-3.1-8b": "llama-3.1-8b-turbo", - - "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1", - "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1", - "mistral-7b": "Mistral-7B-Instruct-v0.1", - "mistral-7b": "Mistral-7B-Instruct-v0.2", - "mistral-7b": "Mistral-7B-Instruct-v0.3", - - "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO", - - "qwen-1-5-72b": "Qwen1.5-72B-Chat", - "qwen-1_5-110b": "Qwen1.5-110B-Chat", - "qwen-2-72b": "Qwen2-72B-Instruct", - - "gemma-2b": "gemma-2b-it", - "gemma-2b-9b": "gemma-2-9b-it", - "gemma-2b-27b": "gemma-2-27b-it", - - "deepseek": "deepseek-llm-67b-chat", - - "yi-34b": "Nous-Hermes-2-Yi-34B", - - "wizardlm-2-8x22b": "WizardLM-2-8x22B", - - "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0", - - "sh-n-7b": "StripedHyena-Nous-7B", - - "sparkdesk-v1.1": "sparkdesk", - - - # Other models - "gpt-4o": "chatgpt-4o-latest", - "gpt-4o-mini": "gpt-4o-mini-2024-07-18", - - "gpt-3.5-turbo": "gpt-3.5-turbo-0125", - "gpt-3.5-turbo": "gpt-3.5-turbo-1106", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k", - "gpt-3.5-turbo": "gpt-3.5-turbo-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", - - - "gemini-flash": "gemini-1.5-flash", - "gemini-pro": "gemini-1.5-pro", + **AirforceChat.model_aliases, + **AirforceImage.model_aliases } @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator(cls, model: str, messages: Messages, **kwargs) -> AsyncResult: model = cls.get_model(model) - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "origin": "https://api.airforce", - "sec-ch-ua": '"Chromium";v="128", "Not(A:Brand";v="24"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "cross-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" - } + provider = AirforceChat if model in AirforceChat.text_models else AirforceImage - - if model in cls.image_models: - async for item in cls.generate_image(model, messages, headers, proxy, **kwargs): - yield item - else: - async for item in cls.generate_text(model, messages, headers, proxy, **kwargs): - yield item + if model not in provider.models: + raise ValueError(f"Unsupported model: {model}") - @classmethod - async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult: - async with ClientSession(headers=headers) as session: - data = { - "messages": [{"role": "user", "content": format_prompt(messages)}], - "model": model, - "temperature": kwargs.get('temperature', 1), - "top_p": kwargs.get('top_p', 1), - "stream": True - } - - async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - async for line in response.content: - if line: - line = line.decode('utf-8').strip() - if line.startswith("data: "): - try: - data = json.loads(line[6:]) - if 'choices' in data and len(data['choices']) > 0: - delta = data['choices'][0].get('delta', {}) - if 'content' in delta: - yield delta['content'] - except json.JSONDecodeError: - continue - elif line == "data: [DONE]": - break + # Get the signature of the provider's create_async_generator method + sig = inspect.signature(provider.create_async_generator) + + # Filter kwargs to only include parameters that the provider's method accepts + filtered_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters} - @classmethod - async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult: - prompt = messages[-1]['content'] if messages else "" - params = { - "prompt": prompt, - "size": kwargs.get("size", "1:1"), - "seed": kwargs.get("seed"), - "model": model - } - params = {k: v for k, v in params.items() if v is not None} + # Add model and messages to filtered_kwargs + filtered_kwargs['model'] = model + filtered_kwargs['messages'] = messages - try: - async with ClientSession(headers=headers) as session: - async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response: - response.raise_for_status() - content = await response.read() - - if response.content_type.startswith('image/'): - image_url = str(response.url) - yield ImageResponse(image_url, prompt) - else: - try: - text = content.decode('utf-8', errors='ignore') - yield f"Error: {text}" - except Exception as decode_error: - yield f"Error: Unable to decode response - {str(decode_error)}" - except ClientResponseError as e: - yield f"Error: HTTP {e.status}: {e.message}" - except Exception as e: - yield f"Unexpected error: {str(e)}" + async for result in provider.create_async_generator(**filtered_kwargs): + yield result diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py deleted file mode 100644 index 8733b1ec..00000000 --- a/g4f/Provider/Allyfy.py +++ /dev/null @@ -1,71 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -import json - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt - - -class Allyfy(AsyncGeneratorProvider): - url = "https://chatbot.allyfy.chat" - api_endpoint = "/api/v1/message/stream/super/chat" - working = True - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "accept": "text/event-stream", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json;charset=utf-8", - "dnt": "1", - "origin": "https://www.allyfy.chat", - "priority": "u=1, i", - "referer": "https://www.allyfy.chat/", - "referrer": "https://www.allyfy.chat", - 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "messages": [{"content": prompt, "role": "user"}], - "content": prompt, - "baseInfo": { - "clientId": "q08kdrde1115003lyedfoir6af0yy531", - "pid": "38281", - "channelId": "100000", - "locale": "en-US", - "localZone": 180, - "packageName": "com.cch.allyfy.webh", - } - } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: - response.raise_for_status() - full_response = [] - async for line in response.content: - line = line.decode().strip() - if line.startswith("data:"): - data_content = line[5:] - if data_content == "[DONE]": - break - try: - json_data = json.loads(data_content) - if "content" in json_data: - full_response.append(json_data["content"]) - except json.JSONDecodeError: - continue - yield "".join(full_response) diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index 4056f9ff..cdc2b9d9 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -17,7 +17,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import get_random_hex from .bing.upload_image import upload_image from .bing.conversation import Conversation, create_conversation, delete_conversation -from .BingCreateImages import BingCreateImages +from .needs_auth.BingCreateImages import BingCreateImages from .. import debug class Tones: @@ -37,7 +37,6 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin): url = "https://bing.com/chat" working = True supports_message_history = True - supports_gpt_4 = True default_model = "Balanced" default_vision_model = "gpt-4-vision" models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")] diff --git a/g4f/Provider/Binjie.py b/g4f/Provider/Binjie.py deleted file mode 100644 index 90f9ec3c..00000000 --- a/g4f/Provider/Binjie.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import annotations - -import random -from ..requests import StreamSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class Binjie(AsyncGeneratorProvider): - url = "https://chat18.aichatos8.com" - working = True - supports_gpt_4 = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - @staticmethod - async def create_async_generator( - model: str, - messages: Messages, - proxy: str = None, - timeout: int = 120, - **kwargs, - ) -> AsyncResult: - async with StreamSession( - headers=_create_header(), proxies={"https": proxy}, timeout=timeout - ) as session: - payload = _create_payload(messages, **kwargs) - async with session.post("https://api.binjie.fun/api/generateStream", json=payload) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - if chunk: - chunk = chunk.decode() - if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk: - raise RuntimeError("IP address is blocked by abuse detection.") - yield chunk - - -def _create_header(): - return { - "accept" : "application/json, text/plain, */*", - "content-type" : "application/json", - "origin" : "https://chat18.aichatos8.com", - "referer" : "https://chat18.aichatos8.com/" - } - - -def _create_payload( - messages: Messages, - system_message: str = "", - user_id: int = None, - **kwargs -): - if not user_id: - user_id = random.randint(1690000544336, 2093025544336) - return { - "prompt": format_prompt(messages), - "network": True, - "system": system_message, - "withoutContext": False, - "stream": True, - "userId": f"#/chat/{user_id}" - } - diff --git a/g4f/Provider/Bixin123.py b/g4f/Provider/Bixin123.py deleted file mode 100644 index 39422c93..00000000 --- a/g4f/Provider/Bixin123.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -import json -import random -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..typing import AsyncResult, Messages -from .helper import format_prompt - -class Bixin123(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://chat.bixin123.com" - api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - - default_model = 'gpt-3.5-turbo-0125' - models = ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo'] - - model_aliases = { - "gpt-3.5-turbo": "gpt-3.5-turbo-0125", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - def generate_fingerprint(cls) -> str: - return str(random.randint(100000000, 999999999)) - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "application/json, text/plain, */*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "fingerprint": cls.generate_fingerprint(), - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/chat", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", - "x-website-domain": "chat.bixin123.com", - } - - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "prompt": prompt, - "options": { - "usingNetwork": False, - "file": "" - } - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - response_text = await response.text() - - lines = response_text.strip().split("\n") - last_json = None - for line in reversed(lines): - try: - last_json = json.loads(line) - break - except json.JSONDecodeError: - pass - - if last_json: - text = last_json.get("text", "") - yield text - else: - yield "" diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index e607a43c..8d820344 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -1,41 +1,143 @@ from __future__ import annotations -import re -import json +from aiohttp import ClientSession import random import string -from aiohttp import ClientSession +import json +import re +import aiohttp from ..typing import AsyncResult, Messages, ImageType -from ..image import ImageResponse, to_data_uri from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse, to_data_uri class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): + label = "Blackbox AI" url = "https://www.blackbox.ai" api_endpoint = "https://www.blackbox.ai/api/chat" working = True supports_stream = True supports_system_message = True supports_message_history = True + _last_validated_value = None + + default_model = 'blackboxai' + + image_models = ['Image Generation', 'repomap'] + + userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro'] - default_model = 'blackbox' - models = [ - 'blackbox', - 'gemini-1.5-flash', - "llama-3.1-8b", - 'llama-3.1-70b', - 'llama-3.1-405b', - 'ImageGenerationLV45LJp' - ] - - model_config = { - "blackbox": {}, + agentMode = { + 'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, + } + + trendingAgentMode = { "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, - 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}, - 'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, + 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"}, + # + 'Python Agent': {'mode': True, 'id': "Python Agent"}, + 'Java Agent': {'mode': True, 'id': "Java Agent"}, + 'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"}, + 'HTML Agent': {'mode': True, 'id': "HTML Agent"}, + 'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"}, + 'Android Developer': {'mode': True, 'id': "Android Developer"}, + 'Swift Developer': {'mode': True, 'id': "Swift Developer"}, + 'Next.js Agent': {'mode': True, 'id': "Next.js Agent"}, + 'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"}, + 'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"}, + 'React Agent': {'mode': True, 'id': "React Agent"}, + 'Xcode Agent': {'mode': True, 'id': "Xcode Agent"}, + 'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"}, + 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"}, + # + 'repomap': {'mode': True, 'id': "repomap"}, + # + 'Heroku Agent': {'mode': True, 'id': "Heroku Agent"}, + 'Godot Agent': {'mode': True, 'id': "Godot Agent"}, + 'Go Agent': {'mode': True, 'id': "Go Agent"}, + 'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"}, + 'Git Agent': {'mode': True, 'id': "Git Agent"}, + 'Flask Agent': {'mode': True, 'id': "Flask Agent"}, + 'Firebase Agent': {'mode': True, 'id': "Firebase Agent"}, + 'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"}, + 'Erlang Agent': {'mode': True, 'id': "Erlang Agent"}, + 'Electron Agent': {'mode': True, 'id': "Electron Agent"}, + 'Docker Agent': {'mode': True, 'id': "Docker Agent"}, + 'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"}, + 'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"}, + 'Azure Agent': {'mode': True, 'id': "Azure Agent"}, + 'Flutter Agent': {'mode': True, 'id': "Flutter Agent"}, + 'Youtube Agent': {'mode': True, 'id': "Youtube Agent"}, + 'builder Agent': {'mode': True, 'id': "builder Agent"}, } + + model_prefixes = {mode: f"@{value['id']}" for mode, value in trendingAgentMode.items() if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]} + + + models = [default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())] + + model_aliases = { + "gemini-flash": "gemini-1.5-flash", + "claude-3.5-sonnet": "claude-sonnet-3.5", + "flux": "Image Generation", + } + + @classmethod + async def fetch_validated(cls): + # If the key is already stored in memory, return it + if cls._last_validated_value: + return cls._last_validated_value + + # If the key is not found, perform a search + async with aiohttp.ClientSession() as session: + try: + async with session.get(cls.url) as response: + if response.status != 200: + print("Failed to load the page.") + return cls._last_validated_value + + page_content = await response.text() + js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) + + key_pattern = re.compile(r'w="([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})"') + + for js_file in js_files: + js_url = f"{cls.url}/_next/{js_file}" + async with session.get(js_url) as js_response: + if js_response.status == 200: + js_content = await js_response.text() + match = key_pattern.search(js_content) + if match: + validated_value = match.group(1) + cls._last_validated_value = validated_value # Keep in mind + return validated_value + except Exception as e: + print(f"Error fetching validated value: {e}") + + return cls._last_validated_value + + + @staticmethod + def generate_id(length=7): + characters = string.ascii_letters + string.digits + return ''.join(random.choice(characters) for _ in range(length)) + + @classmethod + def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages: + prefix = cls.model_prefixes.get(model, "") + if not prefix: + return messages + + new_messages = [] + for message in messages: + new_message = message.copy() + if message['role'] == 'user': + new_message['content'] = (prefix + " " + message['content']).strip() + new_messages.append(new_message) + + return new_messages @classmethod def get_model(cls, model: str) -> str: @@ -52,76 +154,90 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + web_search: bool = False, image: ImageType = None, image_name: str = None, **kwargs ) -> AsyncResult: model = cls.get_model(model) - + message_id = cls.generate_id() + messages_with_prefix = cls.add_prefix_to_messages(messages, model) + validated_value = await cls.fetch_validated() + + if image is not None: + messages_with_prefix[-1]['data'] = { + 'fileText': '', + 'imageBase64': to_data_uri(image), + 'title': image_name + } + headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "referer": f"{cls.url}/", - "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': f'{cls.url}/', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' } - async with ClientSession(headers=headers) as session: - if image is not None: - messages[-1]["data"] = { - "fileText": image_name, - "imageBase64": to_data_uri(image) - } - - random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7)) - - data = { - "messages": messages, - "id": random_id, - "previewToken": None, - "userId": None, - "codeModelMode": True, - "agentMode": {}, - "trendingAgentMode": {}, - "isMicMode": False, - "maxTokens": None, - "isChromeExt": False, - "githubToken": None, - "clickedAnswer2": False, - "clickedAnswer3": False, - "clickedForceWebSearch": False, - "visitFromDelta": False, - "mobileClient": False - } + data = { + "messages": messages_with_prefix, + "id": message_id, + "previewToken": None, + "userId": None, + "codeModelMode": True, + "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, + "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, + "isMicMode": False, + "userSystemPrompt": None, + "maxTokens": 1024, + "playgroundTopP": 0.9, + "playgroundTemperature": 0.5, + "isChromeExt": False, + "githubToken": None, + "clickedAnswer2": False, + "clickedAnswer3": False, + "clickedForceWebSearch": False, + "visitFromDelta": False, + "mobileClient": False, + "userSelectedModel": model if model in cls.userSelectedModel else None, + "webSearchMode": web_search, + "validated": validated_value, + } - if model == 'ImageGenerationLV45LJp': - data["agentMode"] = cls.model_config[model] - else: - data["trendingAgentMode"] = cls.model_config[model] - + async with ClientSession(headers=headers) as session: async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: response.raise_for_status() - if model == 'ImageGenerationLV45LJp': - response_text = await response.text() - url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text) - if url_match: - image_url = url_match.group(0) - yield ImageResponse(image_url, alt=messages[-1]['content']) - else: - raise Exception("Image URL not found in the response") + response_text = await response.text() + + if model in cls.image_models: + image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text) + if image_matches: + image_url = image_matches[0] + image_response = ImageResponse(images=[image_url], alt="Generated Image") + yield image_response + return + + response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL) + + json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL) + if json_match: + search_results = json.loads(json_match.group(1)) + answer = response_text.split('$~~~$')[-1].strip() + + formatted_response = f"{answer}\n\n**Source:**" + for i, result in enumerate(search_results, 1): + formatted_response += f"\n{i}. {result['title']}: {result['link']}" + + yield formatted_response else: - async for chunk in response.content: - if chunk: - decoded_chunk = chunk.decode() - if decoded_chunk.startswith('$@$v=undefined-rv1$@$'): - decoded_chunk = decoded_chunk[len('$@$v=undefined-rv1$@$'):] - yield decoded_chunk + yield response_text.strip() diff --git a/g4f/Provider/ChatGpt.py b/g4f/Provider/ChatGpt.py index 878fb424..02bbbcc4 100644 --- a/g4f/Provider/ChatGpt.py +++ b/g4f/Provider/ChatGpt.py @@ -3,7 +3,10 @@ from __future__ import annotations from ..typing import Messages, CreateResult from ..providers.base_provider import AbstractProvider, ProviderModelMixin -import time, uuid, random, json +import time +import uuid +import random +import json from requests import Session from .openai.new import ( @@ -72,11 +75,34 @@ def init_session(user_agent): class ChatGpt(AbstractProvider, ProviderModelMixin): label = "ChatGpt" + url = "https://chatgpt.com" working = True supports_message_history = True supports_system_message = True supports_stream = True - + default_model = 'auto' + models = [ + default_model, + 'gpt-3.5-turbo', + 'gpt-4o', + 'gpt-4o-mini', + 'gpt-4', + 'gpt-4-turbo', + 'chatgpt-4o-latest', + ] + + model_aliases = { + "gpt-4o": "chatgpt-4o-latest", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model @classmethod def create_completion( @@ -86,30 +112,17 @@ class ChatGpt(AbstractProvider, ProviderModelMixin): stream: bool, **kwargs ) -> CreateResult: + model = cls.get_model(model) + if model not in cls.models: + raise ValueError(f"Model '{model}' is not available. Available models: {', '.join(cls.models)}") + - if model in [ - 'gpt-4o', - 'gpt-4o-mini', - 'gpt-4', - 'gpt-4-turbo', - 'chatgpt-4o-latest' - ]: - model = 'auto' - - elif model in [ - 'gpt-3.5-turbo' - ]: - model = 'text-davinci-002-render-sha' - - else: - raise ValueError(f"Invalid model: {model}") - - user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36' + user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36' session: Session = init_session(user_agent) - config = get_config(user_agent) - pow_req = get_requirements_token(config) - headers = { + config = get_config(user_agent) + pow_req = get_requirements_token(config) + headers = { 'accept': '*/*', 'accept-language': 'en-US,en;q=0.8', 'content-type': 'application/json', @@ -128,29 +141,35 @@ class ChatGpt(AbstractProvider, ProviderModelMixin): } response = session.post('https://chatgpt.com/backend-anon/sentinel/chat-requirements', - headers=headers, json={'p': pow_req}).json() + headers=headers, json={'p': pow_req}) + + if response.status_code != 200: + return - turnstile = response.get('turnstile', {}) + response_data = response.json() + if "detail" in response_data and "Unusual activity" in response_data["detail"]: + return + + turnstile = response_data.get('turnstile', {}) turnstile_required = turnstile.get('required') - pow_conf = response.get('proofofwork', {}) + pow_conf = response_data.get('proofofwork', {}) if turnstile_required: - turnstile_dx = turnstile.get('dx') + turnstile_dx = turnstile.get('dx') turnstile_token = process_turnstile(turnstile_dx, pow_req) - headers = headers | { - 'openai-sentinel-turnstile-token' : turnstile_token, - 'openai-sentinel-chat-requirements-token': response.get('token'), - 'openai-sentinel-proof-token' : get_answer_token( - pow_conf.get('seed'), pow_conf.get('difficulty'), config - ) - } - + headers = {**headers, + 'openai-sentinel-turnstile-token': turnstile_token, + 'openai-sentinel-chat-requirements-token': response_data.get('token'), + 'openai-sentinel-proof-token': get_answer_token( + pow_conf.get('seed'), pow_conf.get('difficulty'), config + )} + json_data = { 'action': 'next', 'messages': format_conversation(messages), 'parent_message_id': str(uuid.uuid4()), - 'model': 'auto', + 'model': model, 'timezone_offset_min': -120, 'suggestions': [ 'Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.', @@ -173,7 +192,7 @@ class ChatGpt(AbstractProvider, ProviderModelMixin): 'conversation_origin': None, 'client_contextual_info': { 'is_dark_mode': True, - 'time_since_loaded': random.randint(22,33), + 'time_since_loaded': random.randint(22, 33), 'page_height': random.randint(600, 900), 'page_width': random.randint(500, 800), 'pixel_ratio': 2, @@ -181,20 +200,33 @@ class ChatGpt(AbstractProvider, ProviderModelMixin): 'screen_width': random.randint(1200, 2000), }, } + + time.sleep(2) response = session.post('https://chatgpt.com/backend-anon/conversation', - headers=headers, json=json_data, stream=True) - + headers=headers, json=json_data, stream=True) + replace = '' for line in response.iter_lines(): if line: - if 'DONE' in line.decode(): - break - - data = json.loads(line.decode()[6:]) - if data.get('message').get('author').get('role') == 'assistant': - tokens = (data.get('message').get('content').get('parts')[0]) - - yield tokens.replace(replace, '') + decoded_line = line.decode() + + if decoded_line.startswith('data:'): + json_string = decoded_line[6:].strip() + + if json_string == '[DONE]': + break - replace = tokens
\ No newline at end of file + if json_string: + try: + data = json.loads(json_string) + except json.JSONDecodeError: + continue + + if data.get('message') and data['message'].get('author'): + role = data['message']['author'].get('role') + if role == 'assistant': + tokens = data['message']['content'].get('parts', []) + if tokens: + yield tokens[0].replace(replace, '') + replace = tokens[0] diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py new file mode 100644 index 00000000..788ffcd9 --- /dev/null +++ b/g4f/Provider/ChatGptEs.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import os +import json +import re + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://chatgpt.es" + api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4o' + models = ['gpt-4o', 'gpt-4o-mini', 'chatgpt-4o-latest'] + + model_aliases = { + "gpt-4o": "chatgpt-4o-latest", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "authority": "chatgpt.es", + "accept": "application/json", + "origin": cls.url, + "referer": f"{cls.url}/chat", + "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + } + + async with ClientSession(headers=headers) as session: + initial_response = await session.get(cls.url) + nonce_ = re.findall(r'data-nonce="(.+?)"', await initial_response.text())[0] + post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0] + + conversation_history = [ + "Human: You are a helpful AI assistant. Please respond in the same language that the user uses in their message. Provide accurate, relevant and helpful information while maintaining a friendly and professional tone. If you're not sure about something, please acknowledge that and provide the best information you can while noting any uncertainties. Focus on being helpful while respecting the user's choice of language." + ] + + for message in messages[:-1]: + if message['role'] == "user": + conversation_history.append(f"Human: {message['content']}") + else: + conversation_history.append(f"AI: {message['content']}") + + payload = { + '_wpnonce': nonce_, + 'post_id': post_id, + 'url': cls.url, + 'action': 'wpaicg_chat_shortcode_message', + 'message': messages[-1]['content'], + 'bot_id': '0', + 'chatbot_identity': 'shortcode', + 'wpaicg_chat_client_id': os.urandom(5).hex(), + 'wpaicg_chat_history': json.dumps(conversation_history) + } + + async with session.post(cls.api_endpoint, headers=headers, data=payload) as response: + response.raise_for_status() + result = await response.json() + yield result['data'] diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py new file mode 100644 index 00000000..8fb37bef --- /dev/null +++ b/g4f/Provider/Cloudflare.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import asyncio +import json +import uuid +import cloudscraper +from typing import AsyncGenerator + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): + label = "Cloudflare AI" + url = "https://playground.ai.cloudflare.com" + api_endpoint = "https://playground.ai.cloudflare.com/api/inference" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = '@cf/meta/llama-3.1-8b-instruct-awq' + models = [ + '@cf/meta/llama-2-7b-chat-fp16', + '@cf/meta/llama-2-7b-chat-int8', + + '@cf/meta/llama-3-8b-instruct', + '@cf/meta/llama-3-8b-instruct-awq', + '@hf/meta-llama/meta-llama-3-8b-instruct', + + default_model, + '@cf/meta/llama-3.1-8b-instruct-fp8', + + '@cf/meta/llama-3.2-1b-instruct', + + '@hf/mistral/mistral-7b-instruct-v0.2', + + '@cf/qwen/qwen1.5-7b-chat-awq', + + '@cf/defog/sqlcoder-7b-2', + ] + + model_aliases = { + "llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16", + "llama-2-7b": "@cf/meta/llama-2-7b-chat-int8", + + "llama-3-8b": "@cf/meta/llama-3-8b-instruct", + "llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq", + "llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct", + + "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq", + "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8", + + "llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct", + + "qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq", + + #"sqlcoder-7b": "@cf/defog/sqlcoder-7b-2", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + max_tokens: int = 2048, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'Accept': 'text/event-stream', + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Content-Type': 'application/json', + 'Origin': cls.url, + 'Pragma': 'no-cache', + 'Referer': f'{cls.url}/', + 'Sec-Ch-Ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'Sec-Ch-Ua-Mobile': '?0', + 'Sec-Ch-Ua-Platform': '"Linux"', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', + } + + cookies = { + '__cf_bm': uuid.uuid4().hex, + } + + scraper = cloudscraper.create_scraper() + + data = { + "messages": [ + {"role": "user", "content": format_prompt(messages)} + ], + "lora": None, + "model": model, + "max_tokens": max_tokens, + "stream": True + } + + max_retries = 3 + full_response = "" + + for attempt in range(max_retries): + try: + response = scraper.post( + cls.api_endpoint, + headers=headers, + cookies=cookies, + json=data, + stream=True, + proxies={'http': proxy, 'https': proxy} if proxy else None + ) + + if response.status_code == 403: + await asyncio.sleep(2 ** attempt) + continue + + response.raise_for_status() + + for line in response.iter_lines(): + if line.startswith(b'data: '): + if line == b'data: [DONE]': + if full_response: + yield full_response + break + try: + content = json.loads(line[6:].decode('utf-8')) + if 'response' in content and content['response'] != '</s>': + yield content['response'] + except Exception: + continue + break + except Exception as e: + if attempt == max_retries - 1: + raise diff --git a/g4f/Provider/CodeNews.py b/g4f/Provider/CodeNews.py deleted file mode 100644 index 05ec7a45..00000000 --- a/g4f/Provider/CodeNews.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -from asyncio import sleep - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class CodeNews(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://codenews.cc" - api_endpoint = "https://codenews.cc/chatxyz13" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = False - supports_stream = True - supports_system_message = False - supports_message_history = False - - default_model = 'free_gpt' - models = ['free_gpt', 'gpt-4o-mini', 'deepseek-coder', 'chatpdf'] - - model_aliases = { - "glm-4": "free_gpt", - "gpt-3.5-turbo": "chatpdf", - "deepseek": "deepseek-coder", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "application/json, text/javascript, */*; q=0.01", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/x-www-form-urlencoded; charset=UTF-8", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/chatgpt", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", - "x-requested-with": "XMLHttpRequest", - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "chatgpt_input": prompt, - "qa_type2": model, - "chatgpt_version_value": "20240804", - "enable_web_search": "0", - "enable_agent": "0", - "dy_video_text_extract": "0", - "enable_summary": "0", - } - async with session.post(cls.api_endpoint, data=data, proxy=proxy) as response: - response.raise_for_status() - json_data = await response.json() - chat_id = json_data["data"]["id"] - - headers["content-type"] = "application/x-www-form-urlencoded; charset=UTF-8" - data = {"current_req_count": "2"} - - while True: - async with session.post(f"{cls.url}/chat_stream", headers=headers, data=data, proxy=proxy) as response: - response.raise_for_status() - json_data = await response.json() - if json_data["data"]: - yield json_data["data"] - break - else: - await sleep(1) # Затримка перед наступним запитом diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py index c8c36fc9..43cc39c0 100644 --- a/g4f/Provider/DDG.py +++ b/g4f/Provider/DDG.py @@ -2,115 +2,107 @@ from __future__ import annotations import json import aiohttp -import asyncio -from typing import Optional -import base64 +from aiohttp import ClientSession -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_connector from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from ..providers.conversation import BaseConversation +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + class DDG(AsyncGeneratorProvider, ProviderModelMixin): - url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9haWNoYXQ=").decode("utf-8") + url = "https://duckduckgo.com" + api_endpoint = "https://duckduckgo.com/duckchat/v1/chat" working = True - supports_gpt_35_turbo = True + supports_stream = True + supports_system_message = True supports_message_history = True default_model = "gpt-4o-mini" - models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"] + models = [ + "gpt-4o-mini", + "claude-3-haiku-20240307", + "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", + "mistralai/Mixtral-8x7B-Instruct-v0.1" + ] model_aliases = { "claude-3-haiku": "claude-3-haiku-20240307", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1" } - # Obfuscated URLs and headers - status_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9zdGF0dXM=").decode("utf-8") - chat_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9jaGF0").decode("utf-8") - referer = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS8=").decode("utf-8") - origin = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbQ==").decode("utf-8") - - user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36' - headers = { - 'User-Agent': user_agent, - 'Accept': 'text/event-stream', - 'Accept-Language': 'en-US,en;q=0.5', - 'Accept-Encoding': 'gzip, deflate, br, zstd', - 'Referer': referer, - 'Content-Type': 'application/json', - 'Origin': origin, - 'Connection': 'keep-alive', - 'Cookie': 'dcm=3', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - 'Pragma': 'no-cache', - 'TE': 'trailers' - } + @classmethod + def get_model(cls, model: str) -> str: + return cls.model_aliases.get(model, model) if model in cls.model_aliases else cls.default_model @classmethod - async def get_vqd(cls, session: aiohttp.ClientSession) -> Optional[str]: - try: - async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response: - await raise_for_status(response) - return response.headers.get("x-vqd-4") - except Exception as e: - print(f"Error getting VQD: {e}") - return None + async def get_vqd(cls): + status_url = "https://duckduckgo.com/duckchat/v1/status" + + headers = { + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36', + 'Accept': 'text/event-stream', + 'x-vqd-accept': '1' + } + + async with aiohttp.ClientSession() as session: + try: + async with session.get(status_url, headers=headers) as response: + if response.status == 200: + return response.headers.get("x-vqd-4") + else: + print(f"Error: Status code {response.status}") + return None + except Exception as e: + print(f"Error getting VQD: {e}") + return None @classmethod async def create_async_generator( cls, model: str, messages: Messages, + conversation: dict = None, proxy: str = None, - connector: aiohttp.BaseConnector = None, - conversation: Conversation = None, - return_conversation: bool = False, **kwargs ) -> AsyncResult: - async with aiohttp.ClientSession(headers=cls.headers, connector=get_connector(connector, proxy)) as session: - vqd_4 = None - if conversation is not None and len(messages) > 1: - vqd_4 = conversation.vqd_4 - messages = [*conversation.messages, messages[-2], messages[-1]] - else: - for _ in range(3): # Try up to 3 times to get a valid VQD - vqd_4 = await cls.get_vqd(session) - if vqd_4: - break - await asyncio.sleep(1) # Wait a bit before retrying - - if not vqd_4: - raise Exception("Failed to obtain a valid VQD token") - - messages = [messages[-1]] # Only use the last message for new conversations - - payload = { - 'model': cls.get_model(model), - 'messages': [{'role': m['role'], 'content': m['content']} for m in messages] + model = cls.get_model(model) + + headers = { + 'accept': 'text/event-stream', + 'content-type': 'application/json', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36', + } + + vqd = conversation.get('vqd') if conversation else await cls.get_vqd() + if not vqd: + raise Exception("Failed to obtain VQD token") + + headers['x-vqd-4'] = vqd + + if conversation: + message_history = conversation.get('messages', []) + message_history.append({"role": "user", "content": format_prompt(messages)}) + else: + message_history = [{"role": "user", "content": format_prompt(messages)}] + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "messages": message_history } - - async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response: - await raise_for_status(response) - if return_conversation: - yield Conversation(vqd_4, messages) - - async for line in response.content: - if line.startswith(b"data: "): - chunk = line[6:] - if chunk.startswith(b"[DONE]"): - break - try: - data = json.loads(chunk) - if "message" in data and data["message"]: - yield data["message"] - except json.JSONDecodeError: - print(f"Failed to decode JSON: {chunk}") -class Conversation(BaseConversation): - def __init__(self, vqd_4: str, messages: Messages) -> None: - self.vqd_4 = vqd_4 - self.messages = messages + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for line in response.content: + if line: + decoded_line = line.decode('utf-8') + if decoded_line.startswith('data: '): + json_str = decoded_line[6:] + if json_str == '[DONE]': + break + try: + json_data = json.loads(json_str) + if 'message' in json_data: + yield json_data['message'] + except json.JSONDecodeError: + pass diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py new file mode 100644 index 00000000..06e2bd55 --- /dev/null +++ b/g4f/Provider/DarkAI.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://darkai.foundation/chat" + api_endpoint = "https://darkai.foundation/chat" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3-405b' + models = [ + 'gpt-4o', # Uncensored + 'gpt-3.5-turbo', # Uncensored + 'llama-3-70b', # Uncensored + default_model, + ] + + model_aliases = { + "llama-3.1-70b": "llama-3-70b", + "llama-3.1-405b": "llama-3-405b", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "text/event-stream", + "content-type": "application/json", + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" + } + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + data = { + "query": prompt, + "model": model, + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + full_text = "" + async for chunk in response.content: + if chunk: + try: + chunk_str = chunk.decode().strip() + if chunk_str.startswith('data: '): + chunk_data = json.loads(chunk_str[6:]) + if chunk_data['event'] == 'text-chunk': + full_text += chunk_data['data']['text'] + elif chunk_data['event'] == 'stream-end': + if full_text: + yield full_text.strip() + return + except json.JSONDecodeError: + pass + except Exception: + pass + + if full_text: + yield full_text.strip() diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py new file mode 100644 index 00000000..5c668599 --- /dev/null +++ b/g4f/Provider/DeepInfraChat.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ..typing import AsyncResult, Messages, ImageType +from ..image import to_data_uri +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin + + +class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://deepinfra.com/chat" + api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' + models = [ + 'meta-llama/Meta-Llama-3.1-8B-Instruct', + default_model, + 'microsoft/WizardLM-2-8x22B', + 'Qwen/Qwen2.5-72B-Instruct', + ] + model_aliases = { + "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", + "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", + "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", + } + + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + image: ImageType = None, + image_name: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Content-Type': 'application/json', + 'Origin': 'https://deepinfra.com', + 'Pragma': 'no-cache', + 'Referer': 'https://deepinfra.com/', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-site', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36', + 'X-Deepinfra-Source': 'web-embed', + 'accept': 'text/event-stream', + 'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + } + + async with ClientSession(headers=headers) as session: + data = { + 'model': model, + 'messages': messages, + 'stream': True + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for line in response.content: + if line: + decoded_line = line.decode('utf-8').strip() + if decoded_line.startswith('data:'): + json_part = decoded_line[5:].strip() + if json_part == '[DONE]': + break + try: + data = json.loads(json_part) + choices = data.get('choices', []) + if choices: + delta = choices[0].get('delta', {}) + content = delta.get('content', '') + if content: + yield content + except json.JSONDecodeError: + print(f"JSON decode error: {json_part}") diff --git a/g4f/Provider/Free2GPT.py b/g4f/Provider/Free2GPT.py index a79bd1da..6ba9ac0f 100644 --- a/g4f/Provider/Free2GPT.py +++ b/g4f/Provider/Free2GPT.py @@ -16,7 +16,7 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat10.free2gpt.xyz" working = True supports_message_history = True - default_model = 'llama-3.1-70b' + default_model = 'mistral-7b' @classmethod async def create_async_generator( @@ -49,12 +49,8 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): connector=get_connector(connector, proxy), headers=headers ) as session: timestamp = int(time.time() * 1e3) - system_message = { - "role": "system", - "content": "" - } data = { - "messages": [system_message] + messages, + "messages": messages, "time": timestamp, "pass": None, "sign": generate_signature(timestamp, messages[-1]["content"]), diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py deleted file mode 100644 index a9dc0f56..00000000 --- a/g4f/Provider/FreeChatgpt.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations -import json -from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://chat.chatgpt.org.uk" - api_endpoint = "/api/openai/v1/chat/completions" - working = True - default_model = '@cf/qwen/qwen1.5-14b-chat-awq' - models = [ - '@cf/qwen/qwen1.5-14b-chat-awq', - 'SparkDesk-v1.1', - 'Qwen2-7B-Instruct', - 'glm4-9B-chat', - 'chatglm3-6B', - 'Yi-1.5-9B-Chat', - ] - - model_aliases = { - "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq", - "sparkdesk-v1.1": "SparkDesk-v1.1", - "qwen-2-7b": "Qwen2-7B-Instruct", - "glm-4-9b": "glm4-9B-chat", - "glm-3-6b": "chatglm3-6B", - "yi-1.5-9b": "Yi-1.5-9B-Chat", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model.lower() in cls.model_aliases: - return cls.model_aliases[model.lower()] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "application/json, text/event-stream", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "dnt": "1", - "origin": cls.url, - "referer": f"{cls.url}/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "messages": [ - {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"}, - {"role": "user", "content": prompt} - ], - "stream": True, - "model": model, - "temperature": 0.5, - "presence_penalty": 0, - "frequency_penalty": 0, - "top_p": 1 - } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: - response.raise_for_status() - accumulated_text = "" - async for line in response.content: - if line: - line_str = line.decode().strip() - if line_str == "data: [DONE]": - yield accumulated_text - break - elif line_str.startswith("data: "): - try: - chunk = json.loads(line_str[6:]) - delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "") - accumulated_text += delta_content - yield delta_content # Yield each chunk of content - except json.JSONDecodeError: - pass diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 82a3824b..b38ff428 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -24,7 +24,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_message_history = True supports_system_message = True - default_model = 'llama-3.1-70b' + default_model = 'gemini-pro' @classmethod async def create_async_generator( diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py new file mode 100644 index 00000000..f00b344e --- /dev/null +++ b/g4f/Provider/GizAI.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class GizAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://app.giz.ai/assistant" + api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer" + working = True + supports_stream = False + supports_system_message = True + supports_message_history = True + + default_model = 'chat-gemini-flash' + models = [default_model] + + model_aliases = {"gemini-flash": "chat-gemini-flash",} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'Accept': 'application/json, text/plain, */*', + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Content-Type': 'application/json', + 'DNT': '1', + 'Origin': 'https://app.giz.ai', + 'Pragma': 'no-cache', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"' + } + + prompt = format_prompt(messages) + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "input": { + "messages": [{"type": "human", "content": prompt}], + "mode": "plan" + }, + "noStream": True + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + if response.status == 201: + result = await response.json() + yield result['output'].strip() + else: + raise Exception(f"Unexpected response status: {response.status}") diff --git a/g4f/Provider/GptTalkRu.py b/g4f/Provider/GptTalkRu.py deleted file mode 100644 index 6a59484f..00000000 --- a/g4f/Provider/GptTalkRu.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession, BaseConnector - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import get_random_string, get_connector -from ..requests import raise_for_status, get_args_from_browser, WebDriver -from ..webdriver import has_seleniumwire -from ..errors import MissingRequirementsError - -class GptTalkRu(AsyncGeneratorProvider): - url = "https://gpttalk.ru" - working = True - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - connector: BaseConnector = None, - webdriver: WebDriver = None, - **kwargs - ) -> AsyncResult: - if not model: - model = "gpt-3.5-turbo" - if not has_seleniumwire: - raise MissingRequirementsError('Install "selenium-wire" package') - args = get_args_from_browser(f"{cls.url}", webdriver) - args["headers"]["accept"] = "application/json, text/plain, */*" - async with ClientSession(connector=get_connector(connector, proxy), **args) as session: - async with session.get("https://gpttalk.ru/getToken") as response: - await raise_for_status(response) - public_key = (await response.json())["response"]["key"]["publicKey"] - random_string = get_random_string(8) - data = { - "model": model, - "modelType": 1, - "prompt": messages, - "responseType": "stream", - "security": { - "randomMessage": random_string, - "shifrText": encrypt(public_key, random_string) - } - } - async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response: - await raise_for_status(response) - async for chunk in response.content.iter_any(): - yield chunk.decode(errors="ignore") - -def encrypt(public_key: str, value: str) -> str: - from Crypto.Cipher import PKCS1_v1_5 - from Crypto.PublicKey import RSA - import base64 - rsa_key = RSA.importKey(public_key) - cipher = PKCS1_v1_5.new(rsa_key) - return base64.b64encode(cipher.encrypt(value.encode())).decode()
\ No newline at end of file diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 06216ade..d4a4b497 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -1,6 +1,7 @@ from __future__ import annotations -import json, requests, re +import json +import requests from curl_cffi import requests as cf_reqs from ..typing import CreateResult, Messages @@ -16,19 +17,25 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): models = [ 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - 'mistralai/Mistral-7B-Instruct-v0.3', - 'microsoft/Phi-3-mini-4k-instruct', + 'Qwen/Qwen2.5-72B-Instruct', + 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', + 'Qwen/Qwen2.5-Coder-32B-Instruct', + 'meta-llama/Llama-3.2-11B-Vision-Instruct', + 'NousResearch/Hermes-3-Llama-3.1-8B', + 'mistralai/Mistral-Nemo-Instruct-2407', + 'microsoft/Phi-3.5-mini-instruct', ] model_aliases = { "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", - "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", - "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct", + "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", + "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", + "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct", + "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct", + "hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B", + "mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407", + "phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct", } @classmethod @@ -69,20 +76,42 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', } - print(model) json_data = { 'model': model, } response = session.post('https://huggingface.co/chat/conversation', json=json_data) - conversationId = response.json()['conversationId'] + if response.status_code != 200: + raise RuntimeError(f"Request failed with status code: {response.status_code}, response: {response.text}") - response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11',) + conversationId = response.json().get('conversationId') + + # Get the data response and parse it properly + response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11') + + # Split the response content by newlines and parse each line as JSON + try: + json_data = None + for line in response.text.split('\n'): + if line.strip(): + try: + parsed = json.loads(line) + if isinstance(parsed, dict) and "nodes" in parsed: + json_data = parsed + break + except json.JSONDecodeError: + continue + + if not json_data: + raise RuntimeError("Failed to parse response data") - data: list = (response.json())["nodes"][1]["data"] - keys: list[int] = data[data[0]["messages"]] - message_keys: dict = data[keys[0]] - messageId: str = data[message_keys["id"]] + data: list = json_data["nodes"][1]["data"] + keys: list[int] = data[data[0]["messages"]] + message_keys: dict = data[keys[0]] + messageId: str = data[message_keys["id"]] + + except (KeyError, IndexError, TypeError) as e: + raise RuntimeError(f"Failed to extract message ID: {str(e)}") settings = { "inputs": format_prompt(messages), @@ -114,28 +143,41 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'data': (None, json.dumps(settings, separators=(',', ':'))), } - response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}', + response = requests.post( + f'https://huggingface.co/chat/conversation/{conversationId}', cookies=session.cookies, headers=headers, files=files, ) - first_token = True + full_response = "" for line in response.iter_lines(): - line = json.loads(line) + if not line: + continue + try: + line = json.loads(line) + except json.JSONDecodeError as e: + print(f"Failed to decode JSON: {line}, error: {e}") + continue if "type" not in line: raise RuntimeError(f"Response: {line}") elif line["type"] == "stream": - token = line["token"] - if first_token: - token = token.lstrip().replace('\u0000', '') - first_token = False - else: - token = token.replace('\u0000', '') - - yield token + token = line["token"].replace('\u0000', '') + full_response += token + if stream: + yield token elif line["type"] == "finalAnswer": break + + full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip() + + if not stream: + yield full_response + + @classmethod + def supports_model(cls, model: str) -> bool: + """Check if the model is supported by the provider.""" + return model in cls.models or model in cls.model_aliases diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 8a9f46b1..7ccfa877 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -36,32 +36,50 @@ models = { "tokenLimit": 7800, "context": "8K", }, - "gpt-4-turbo-2024-04-09": { - "id": "gpt-4-turbo-2024-04-09", - "name": "GPT-4-Turbo", + "gpt-4o-2024-08-06": { + "id": "gpt-4o-2024-08-06", + "name": "GPT-4o", "model": "ChatGPT", "provider": "OpenAI", "maxLength": 260000, "tokenLimit": 126000, "context": "128K", }, - "gpt-4o-2024-08-06": { - "id": "gpt-4o-2024-08-06", - "name": "GPT-4o", + "gpt-4-turbo-2024-04-09": { + "id": "gpt-4-turbo-2024-04-09", + "name": "GPT-4-Turbo", "model": "ChatGPT", "provider": "OpenAI", "maxLength": 260000, "tokenLimit": 126000, "context": "128K", }, - "gpt-4-0613": { - "id": "gpt-4-0613", - "name": "GPT-4-0613", - "model": "ChatGPT", - "provider": "OpenAI", - "maxLength": 32000, - "tokenLimit": 7600, - "context": "8K", + "grok-beta": { + "id": "grok-beta", + "name": "Grok-Beta", + "model": "Grok", + "provider": "x.ai", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "100K", + }, + "grok-2": { + "id": "grok-2", + "name": "Grok-2", + "model": "Grok", + "provider": "x.ai", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "100K", + }, + "grok-2-mini": { + "id": "grok-2-mini", + "name": "Grok-2-mini", + "model": "Grok", + "provider": "x.ai", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "100K", }, "claude-3-opus-20240229": { "id": "claude-3-opus-20240229", @@ -81,27 +99,27 @@ models = { "tokenLimit": 200000, "context": "200K", }, - "claude-3-opus-20240229-gcp": { - "id": "claude-3-opus-20240229-gcp", - "name": "Claude-3-Opus-Gcp", + "claude-3-5-sonnet-20240620": { + "id": "claude-3-5-sonnet-20240620", + "name": "Claude-3.5-Sonnet", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, "tokenLimit": 200000, "context": "200K", }, - "claude-3-sonnet-20240229": { - "id": "claude-3-sonnet-20240229", - "name": "Claude-3-Sonnet", + "claude-3-5-sonnet-20241022": { + "id": "claude-3-5-sonnet-20241022", + "name": "Claude-3.5-Sonnet-V2", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, "tokenLimit": 200000, "context": "200K", }, - "claude-3-5-sonnet-20240620": { - "id": "claude-3-5-sonnet-20240620", - "name": "Claude-3.5-Sonnet", + "claude-3-sonnet-20240229": { + "id": "claude-3-sonnet-20240229", + "name": "Claude-3-Sonnet", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, @@ -126,17 +144,8 @@ models = { "tokenLimit": 200000, "context": "200K", }, - "gemini-1.0-pro-latest": { - "id": "gemini-1.0-pro-latest", - "name": "Gemini-Pro", - "model": "Gemini", - "provider": "Google", - "maxLength": 120000, - "tokenLimit": 30000, - "context": "32K", - }, - "gemini-1.5-flash-latest": { - "id": "gemini-1.5-flash-latest", + "gemini-1.5-flash-002": { + "id": "gemini-1.5-flash-002", "name": "Gemini-1.5-Flash-1M", "model": "Gemini", "provider": "Google", @@ -144,8 +153,8 @@ models = { "tokenLimit": 1000000, "context": "1024K", }, - "gemini-1.5-pro-latest": { - "id": "gemini-1.5-pro-latest", + "gemini-1.5-pro-002": { + "id": "gemini-1.5-pro-002", "name": "Gemini-1.5-Pro-1M", "model": "Gemini", "provider": "Google", @@ -161,28 +170,27 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_message_history = True supports_system_message = True - supports_gpt_4 = True - default_model = "gpt-4o" + default_model = "gpt-4o-2024-08-06" models = list(models.keys()) model_aliases = { "gpt-4o-mini": "gpt-4o-mini-free", "gpt-4o": "gpt-4o-free", - "gpt-4-turbo": "gpt-4-turbo-2024-04-09", "gpt-4o": "gpt-4o-2024-08-06", + + "gpt-4-turbo": "gpt-4-turbo-2024-04-09", "gpt-4": "gpt-4-0613", "claude-3-opus": "claude-3-opus-20240229", "claude-3-opus": "claude-3-opus-20240229-aws", - "claude-3-opus": "claude-3-opus-20240229-gcp", "claude-3-sonnet": "claude-3-sonnet-20240229", - "claude-3-5-sonnet": "claude-3-5-sonnet-20240620", + "claude-3.5-sonnet": "claude-3-5-sonnet-20240620", + "claude-3.5-sonnet": "claude-3-5-sonnet-20241022", "claude-3-haiku": "claude-3-haiku-20240307", "claude-2.1": "claude-2.1", - "gemini-pro": "gemini-1.0-pro-latest", - "gemini-flash": "gemini-1.5-flash-latest", - "gemini-pro": "gemini-1.5-pro-latest", + "gemini-flash": "gemini-1.5-flash-002", + "gemini-pro": "gemini-1.5-pro-002", } _auth_code = "" diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py deleted file mode 100644 index 69294a57..00000000 --- a/g4f/Provider/LiteIcoding.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession, ClientResponseError -import re -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://lite.icoding.ink" - api_endpoint = "/api/v1/gpt/message" - working = True - supports_gpt_4 = True - default_model = "gpt-4o" - models = [ - 'gpt-4o', - 'gpt-4-turbo', - 'claude-3', - 'claude-3.5', - 'gemini-1.5', - ] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "Accept": "*/*", - "Accept-Language": "en-US,en;q=0.9", - "Authorization": "Bearer aa3020ee873e40cb8b3f515a0708ebc4", - "Connection": "keep-alive", - "Content-Type": "application/json;charset=utf-8", - "DNT": "1", - "Origin": cls.url, - "Referer": f"{cls.url}/", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "User-Agent": ( - "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) " - "Chrome/126.0.0.0 Safari/537.36" - ), - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - } - - data = { - "model": model, - "chatId": "-1", - "messages": [ - { - "role": msg["role"], - "content": msg["content"], - "time": msg.get("time", ""), - "attachments": msg.get("attachments", []), - } - for msg in messages - ], - "plugins": [], - "systemPrompt": "", - "temperature": 0.5, - } - - async with ClientSession(headers=headers) as session: - try: - async with session.post( - f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy - ) as response: - response.raise_for_status() - buffer = "" - full_response = "" - def decode_content(data): - bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()]) - return bytes_array.decode('utf-8') - async for chunk in response.content.iter_any(): - if chunk: - buffer += chunk.decode() - while "\n\n" in buffer: - part, buffer = buffer.split("\n\n", 1) - if part.startswith("data: "): - content = part[6:].strip() - if content and content != "[DONE]": - content = content.strip('"') - # Decoding each content block - decoded_content = decode_content(content) - full_response += decoded_content - full_response = ( - full_response.replace('""', '') # Handle double quotes - .replace('" "', ' ') # Handle space within quotes - .replace("\\n\\n", "\n\n") - .replace("\\n", "\n") - .replace('\\"', '"') - .strip() - ) - # Add filter to remove unwanted text - filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL) - # Remove extra quotes at the beginning and end - cleaned_response = filtered_response.strip().strip('"') - yield cleaned_response - - except ClientResponseError as e: - raise RuntimeError( - f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}" - ) from e - - except Exception as e: - raise RuntimeError(f"Unexpected error: {str(e)}") from e diff --git a/g4f/Provider/MagickPen.py b/g4f/Provider/MagickPen.py index eab70536..7f1751dd 100644 --- a/g4f/Provider/MagickPen.py +++ b/g4f/Provider/MagickPen.py @@ -1,72 +1,53 @@ from __future__ import annotations +from aiohttp import ClientSession +import hashlib import time import random -import hashlib import re -from aiohttp import ClientSession - +import json from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt class MagickPen(AsyncGeneratorProvider, ProviderModelMixin): url = "https://magickpen.com" - api_endpoint_free = "https://api.magickpen.com/chat/free" - api_endpoint_ask = "https://api.magickpen.com/ask" + api_endpoint = "https://api.magickpen.com/ask" working = True - supports_gpt_4 = True - supports_stream = False - - default_model = 'free' - models = ['free', 'ask'] + supports_stream = True + supports_system_message = True + supports_message_history = True - model_aliases = { - "gpt-4o-mini": "free", - "gpt-4o-mini": "ask", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model + default_model = 'gpt-4o-mini' + models = ['gpt-4o-mini'] @classmethod - async def get_secrets(cls): - url = 'https://magickpen.com/_nuxt/02c76dc.js' + async def fetch_api_credentials(cls) -> tuple: + url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js" async with ClientSession() as session: async with session.get(url) as response: - if response.status == 200: - text = await response.text() - x_api_secret_match = re.search(r'"X-API-Secret":"([^"]+)"', text) - secret_match = re.search(r'secret:\s*"([^"]+)"', text) - - x_api_secret = x_api_secret_match.group(1) if x_api_secret_match else None - secret = secret_match.group(1) if secret_match else None - - # Generate timestamp and nonce dynamically - timestamp = str(int(time.time() * 1000)) - nonce = str(random.random()) - - # Generate signature - signature_parts = ["TGDBU9zCgM", timestamp, nonce] - signature_string = "".join(sorted(signature_parts)) - signature = hashlib.md5(signature_string.encode()).hexdigest() - - return { - 'X-API-Secret': x_api_secret, - 'signature': signature, - 'timestamp': timestamp, - 'nonce': nonce, - 'secret': secret - } - else: - print(f"Error while fetching the file: {response.status}") - return None + text = await response.text() + + pattern = r'"X-API-Secret":"(\w+)"' + match = re.search(pattern, text) + X_API_SECRET = match.group(1) if match else None + + timestamp = str(int(time.time() * 1000)) + nonce = str(random.random()) + + s = ["TGDBU9zCgM", timestamp, nonce] + s.sort() + signature_string = ''.join(s) + signature = hashlib.md5(signature_string.encode()).hexdigest() + + pattern = r'secret:"(\w+)"' + match = re.search(pattern, text) + secret = match.group(1) if match else None + + if X_API_SECRET and timestamp and nonce and secret: + return X_API_SECRET, signature, timestamp, nonce, secret + else: + raise Exception("Unable to extract all the necessary data from the JavaScript file.") @classmethod async def create_async_generator( @@ -77,54 +58,30 @@ class MagickPen(AsyncGeneratorProvider, ProviderModelMixin): **kwargs ) -> AsyncResult: model = cls.get_model(model) + X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials() - secrets = await cls.get_secrets() - if not secrets: - raise Exception("Failed to obtain necessary secrets") - headers = { - "accept": "application/json, text/plain, */*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "nonce": secrets['nonce'], - "origin": "https://magickpen.com", - "pragma": "no-cache", - "priority": "u=1, i", - "referer": "https://magickpen.com/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-site", - "secret": secrets['secret'], - "signature": secrets['signature'], - "timestamp": secrets['timestamp'], - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", - "x-api-secret": secrets['X-API-Secret'] + 'accept': 'application/json, text/plain, */*', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'nonce': nonce, + 'origin': cls.url, + 'referer': f"{cls.url}/", + 'secret': secret, + 'signature': signature, + 'timestamp': timestamp, + 'x-api-secret': X_API_SECRET, } async with ClientSession(headers=headers) as session: - if model == 'free': - data = { - "history": [{"role": "user", "content": format_prompt(messages)}] - } - async with session.post(cls.api_endpoint_free, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - yield result - - elif model == 'ask': - data = { - "query": format_prompt(messages), - "plan": "Pay as you go" - } - async with session.post(cls.api_endpoint_ask, json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content: - if chunk: - yield chunk.decode() - - else: - raise ValueError(f"Unknown model: {model}") + prompt = format_prompt(messages) + payload = { + 'query': prompt, + 'turnstileResponse': '', + 'action': 'verify' + } + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/Mhystical.py b/g4f/Provider/Mhystical.py new file mode 100644 index 00000000..2aa98ebc --- /dev/null +++ b/g4f/Provider/Mhystical.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +import json +import logging +from aiohttp import ClientSession +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +""" + Mhystical.cc + ~~~~~~~~~~~~ + Author: NoelP.dev + Last Updated: 2024-05-11 + + Author Site: https://noelp.dev + Provider Site: https://mhystical.cc + +""" + +logger = logging.getLogger(__name__) + +class Mhystical(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://api.mhystical.cc" + api_endpoint = "https://api.mhystical.cc/v1/completions" + working = True + supports_stream = False # Set to False, as streaming is not specified in ChatifyAI + supports_system_message = False + supports_message_history = True + + default_model = 'gpt-4' + models = [default_model] + model_aliases = {} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases.get(model, cls.default_model) + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "x-api-key": "mhystical", + "Content-Type": "application/json", + "accept": "*/*", + "cache-control": "no-cache", + "origin": cls.url, + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "messages": [{"role": "user", "content": format_prompt(messages)}] + } + async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: + if response.status == 400: + yield "Error: API key is missing" + elif response.status == 429: + yield "Error: Rate limit exceeded" + elif response.status == 500: + yield "Error: Internal server error" + else: + response.raise_for_status() + response_text = await response.text() + filtered_response = cls.filter_response(response_text) + yield filtered_response + + @staticmethod + def filter_response(response_text: str) -> str: + try: + json_response = json.loads(response_text) + message_content = json_response["choices"][0]["message"]["content"] + return message_content + except (KeyError, IndexError, json.JSONDecodeError) as e: + logger.error("Error parsing response: %s", e) + return "Error: Failed to parse response from API." diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py deleted file mode 100644 index b2b83837..00000000 --- a/g4f/Provider/Nexra.py +++ /dev/null @@ -1,116 +0,0 @@ -from __future__ import annotations -import json -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt -from ..image import ImageResponse - -class Nexra(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://nexra.aryahcr.cc" - chat_api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt" - image_api_endpoint = "https://nexra.aryahcr.cc/api/image/complements" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - supports_system_message = True - supports_message_history = True - - default_model = 'gpt-3.5-turbo' - text_models = [ - 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314', - 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301', - 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', - 'text-curie-001', 'text-babbage-001', 'text-ada-001', - 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002', - ] - image_models = ['dalle', 'dalle2', 'dalle-mini', 'emi'] - models = [*text_models, *image_models] - - model_aliases = { - "gpt-4": "gpt-4-0613", - "gpt-4": "gpt-4-32k", - "gpt-4": "gpt-4-0314", - "gpt-4": "gpt-4-32k-0314", - - "gpt-3.5-turbo": "gpt-3.5-turbo-16k", - "gpt-3.5-turbo": "gpt-3.5-turbo-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-0301", - - "gpt-3": "text-davinci-003", - "gpt-3": "text-davinci-002", - "gpt-3": "code-davinci-002", - "gpt-3": "text-curie-001", - "gpt-3": "text-babbage-001", - "gpt-3": "text-ada-001", - "gpt-3": "text-ada-001", - "gpt-3": "davinci", - "gpt-3": "curie", - "gpt-3": "babbage", - "gpt-3": "ada", - "gpt-3": "babbage-002", - "gpt-3": "davinci-002", - - "dalle-2": "dalle2", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.text_models or model in cls.image_models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "Content-Type": "application/json", - } - - async with ClientSession(headers=headers) as session: - if model in cls.image_models: - # Image generation - prompt = messages[-1]['content'] if messages else "" - data = { - "prompt": prompt, - "model": model, - "response": "url" - } - async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - result_json = json.loads(result.strip('_')) - image_url = result_json['images'][0] if result_json['images'] else None - - if image_url: - yield ImageResponse(images=image_url, alt=prompt) - else: - # Text completion - data = { - "messages": messages, - "prompt": format_prompt(messages), - "model": model, - "markdown": False - } - async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - - try: - json_response = json.loads(result) - gpt_response = json_response.get('gpt', '') - yield gpt_response - except json.JSONDecodeError: - yield result diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index ecb51f9b..b3119cb6 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -21,15 +21,17 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.1-sonar-small-128k-chat", "llama-3.1-8b-instruct", "llama-3.1-70b-instruct", + "/models/LiquidCloud", ] model_aliases = { - "llama-3.1-8b": "llama-3.1-sonar-large-128k-online", - "llama-3.1-8b": "sonar-small-128k-online", - "llama-3.1-8b": "llama-3.1-sonar-large-128k-chat", - "llama-3.1-8b": "llama-3.1-sonar-small-128k-chat", + "sonar-online": "llama-3.1-sonar-large-128k-online", + "sonar-online": "sonar-small-128k-online", + "sonar-chat": "llama-3.1-sonar-large-128k-chat", + "sonar-chat": "llama-3.1-sonar-small-128k-chat", "llama-3.1-8b": "llama-3.1-8b-instruct", "llama-3.1-70b": "llama-3.1-70b-instruct", + "lfm-40b": "/models/LiquidCloud", } @classmethod diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py index e03830f4..68a7357f 100644 --- a/g4f/Provider/Pi.py +++ b/g4f/Provider/Pi.py @@ -12,6 +12,7 @@ class Pi(AbstractProvider): supports_stream = True _session = None default_model = "pi" + models = [default_model] @classmethod def create_completion( @@ -22,6 +23,7 @@ class Pi(AbstractProvider): proxy: str = None, timeout: int = 180, conversation_id: str = None, + webdriver: WebDriver = None, **kwargs ) -> CreateResult: if cls._session is None: diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py index 47cb135c..6513bd34 100644 --- a/g4f/Provider/Pizzagpt.py +++ b/g4f/Provider/Pizzagpt.py @@ -12,7 +12,6 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.pizzagpt.it" api_endpoint = "/api/chatx-completion" working = True - supports_gpt_4 = True default_model = 'gpt-4o-mini' @classmethod diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py index dd87a34c..fcebf7e3 100644 --- a/g4f/Provider/Prodia.py +++ b/g4f/Provider/Prodia.py @@ -14,10 +14,10 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): working = True default_model = 'absolutereality_v181.safetensors [3d9d4d2b]' - models = [ + image_models = [ '3Guofeng3_v34.safetensors [50f420de]', 'absolutereality_V16.safetensors [37db0fc3]', - 'absolutereality_v181.safetensors [3d9d4d2b]', + default_model, 'amIReal_V41.safetensors [0a8a2e61]', 'analog-diffusion-1.0.ckpt [9ca13f02]', 'aniverse_v30.safetensors [579e6f85]', @@ -81,6 +81,7 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): 'timeless-1.0.ckpt [7c4971d4]', 'toonyou_beta6.safetensors [980f6b15]', ] + models = [*image_models] @classmethod def get_model(cls, model: str) -> str: @@ -97,6 +98,12 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + negative_prompt: str = "", + steps: str = 20, # 1-25 + cfg: str = 7, # 0-20 + seed: str = "-1", + sampler: str = "DPM++ 2M Karras", # "Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM" + aspect_ratio: str = "square", # "square", "portrait", "landscape" **kwargs ) -> AsyncResult: model = cls.get_model(model) @@ -116,12 +123,12 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): "new": "true", "prompt": prompt, "model": model, - "negative_prompt": kwargs.get("negative_prompt", ""), - "steps": kwargs.get("steps", 20), - "cfg": kwargs.get("cfg", 7), - "seed": kwargs.get("seed", int(time.time())), - "sampler": kwargs.get("sampler", "DPM++ 2M Karras"), - "aspect_ratio": kwargs.get("aspect_ratio", "square") + "negative_prompt": negative_prompt, + "steps": steps, + "cfg": cfg, + "seed": seed, + "sampler": sampler, + "aspect_ratio": aspect_ratio } async with session.get(cls.api_endpoint, params=params, proxy=proxy) as response: diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py index 7f443a7d..a7fc9b54 100644 --- a/g4f/Provider/ReplicateHome.py +++ b/g4f/Provider/ReplicateHome.py @@ -17,7 +17,13 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): supports_system_message = True supports_message_history = True - default_model = 'meta/meta-llama-3-70b-instruct' + default_model = 'yorickvp/llava-13b' + + image_models = [ + 'stability-ai/stable-diffusion-3', + 'bytedance/sdxl-lightning-4step', + 'playgroundai/playground-v2.5-1024px-aesthetic', + ] text_models = [ 'meta/meta-llama-3-70b-instruct', @@ -26,35 +32,31 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): 'yorickvp/llava-13b', ] - image_models = [ - 'black-forest-labs/flux-schnell', - 'stability-ai/stable-diffusion-3', - 'bytedance/sdxl-lightning-4step', - 'playgroundai/playground-v2.5-1024px-aesthetic', - ] + models = text_models + image_models model_aliases = { - "flux-schnell": "black-forest-labs/flux-schnell", + # image_models "sd-3": "stability-ai/stable-diffusion-3", "sdxl": "bytedance/sdxl-lightning-4step", "playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic", - "llama-3-70b": "meta/meta-llama-3-70b-instruct", - "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1", + + # text_models "gemma-2b": "google-deepmind/gemma-2b-it", "llava-13b": "yorickvp/llava-13b", } model_versions = { - "meta/meta-llama-3-70b-instruct": "fbfb20b472b2f3bdd101412a9f70a0ed4fc0ced78a77ff00970ee7a2383c575d", - "mistralai/mixtral-8x7b-instruct-v0.1": "5d78bcd7a992c4b793465bcdcf551dc2ab9668d12bb7aa714557a21c1e77041c", - "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626", - "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb", - 'black-forest-labs/flux-schnell': "f2ab8a5bfe79f02f0789a146cf5e73d2a4ff2684a98c2b303d1e1ff3814271db", + # image_models 'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f", 'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f", 'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24", + + # text_models + "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626", + "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb", + } @classmethod diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py new file mode 100644 index 00000000..7e76d558 --- /dev/null +++ b/g4f/Provider/RubiksAI.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +import asyncio +import aiohttp +import random +import string +import json +from urllib.parse import urlencode + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin): + label = "Rubiks AI" + url = "https://rubiks.ai" + api_endpoint = "https://rubiks.ai/search/api.php" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3.1-70b-versatile' + models = [default_model, 'gpt-4o-mini'] + + model_aliases = { + "llama-3.1-70b": "llama-3.1-70b-versatile", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @staticmethod + def generate_mid() -> str: + """ + Generates a 'mid' string following the pattern: + 6 characters - 4 characters - 4 characters - 4 characters - 12 characters + Example: 0r7v7b-quw4-kdy3-rvdu-ekief6xbuuq4 + """ + parts = [ + ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=12)) + ] + return '-'.join(parts) + + @staticmethod + def create_referer(q: str, mid: str, model: str = '') -> str: + """ + Creates a Referer URL with dynamic q and mid values, using urlencode for safe parameter encoding. + """ + params = {'q': q, 'model': model, 'mid': mid} + encoded_params = urlencode(params) + return f'https://rubiks.ai/search/?{encoded_params}' + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + websearch: bool = False, + **kwargs + ) -> AsyncResult: + """ + Creates an asynchronous generator that sends requests to the Rubiks AI API and yields the response. + + Parameters: + - model (str): The model to use in the request. + - messages (Messages): The messages to send as a prompt. + - proxy (str, optional): Proxy URL, if needed. + - websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False. + """ + model = cls.get_model(model) + prompt = format_prompt(messages) + q_value = prompt + mid_value = cls.generate_mid() + referer = cls.create_referer(q=q_value, mid=mid_value, model=model) + + url = cls.api_endpoint + params = { + 'q': q_value, + 'model': model, + 'id': '', + 'mid': mid_value + } + + headers = { + 'Accept': 'text/event-stream', + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Pragma': 'no-cache', + 'Referer': referer, + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"' + } + + try: + timeout = aiohttp.ClientTimeout(total=None) + async with ClientSession(timeout=timeout) as session: + async with session.get(url, headers=headers, params=params, proxy=proxy) as response: + if response.status != 200: + yield f"Request ended with status code {response.status}" + return + + assistant_text = '' + sources = [] + + async for line in response.content: + decoded_line = line.decode('utf-8').strip() + if not decoded_line.startswith('data: '): + continue + data = decoded_line[6:] + if data in ('[DONE]', '{"done": ""}'): + break + try: + json_data = json.loads(data) + except json.JSONDecodeError: + continue + + if 'url' in json_data and 'title' in json_data: + if websearch: + sources.append({'title': json_data['title'], 'url': json_data['url']}) + + elif 'choices' in json_data: + for choice in json_data['choices']: + delta = choice.get('delta', {}) + content = delta.get('content', '') + role = delta.get('role', '') + if role == 'assistant': + continue + assistant_text += content + + if websearch and sources: + sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)]) + assistant_text += f"\n\n**Source:**\n{sources_text}" + + yield assistant_text + + except asyncio.CancelledError: + yield "The request was cancelled." + except aiohttp.ClientError as e: + yield f"An error occurred during the request: {e}" + except Exception as e: + yield f"An unexpected error occurred: {e}" diff --git a/g4f/Provider/Snova.py b/g4f/Provider/Snova.py deleted file mode 100644 index 53d8f0bd..00000000 --- a/g4f/Provider/Snova.py +++ /dev/null @@ -1,131 +0,0 @@ -from __future__ import annotations - -import json -from typing import AsyncGenerator - -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class Snova(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://fast.snova.ai" - api_endpoint = "https://fast.snova.ai/api/completion" - working = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'Meta-Llama-3.1-8B-Instruct' - models = [ - 'Meta-Llama-3.1-8B-Instruct', - 'Meta-Llama-3.1-70B-Instruct', - 'Meta-Llama-3.1-405B-Instruct', - 'Samba-CoE', - 'ignos/Mistral-T5-7B-v1', # Error with the answer - 'v1olet/v1olet_merged_dpo_7B', - 'macadeliccc/WestLake-7B-v2-laser-truthy-dpo', - ] - - model_aliases = { - "llama-3.1-8b": "Meta-Llama-3.1-8B-Instruct", - "llama-3.1-70b": "Meta-Llama-3.1-70B-Instruct", - "llama-3.1-405b": "Meta-Llama-3.1-405B-Instruct", - - "mistral-7b": "ignos/Mistral-T5-7B-v1", - - "samba-coe-v0.1": "Samba-CoE", - "v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B", - "westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncGenerator[str, None]: - model = cls.get_model(model) - - headers = { - "accept": "text/event-stream", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" - } - async with ClientSession(headers=headers) as session: - data = { - "body": { - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": format_prompt(messages), - "id": "1-id", - "ref": "1-ref", - "revision": 1, - "draft": False, - "status": "done", - "enableRealTimeChat": False, - "meta": None - } - ], - "max_tokens": 1000, - "stop": ["<|eot_id|>"], - "stream": True, - "stream_options": {"include_usage": True}, - "model": model - }, - "env_type": "tp16" - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - full_response = "" - async for line in response.content: - line = line.decode().strip() - if line.startswith("data: "): - data = line[6:] - if data == "[DONE]": - break - try: - json_data = json.loads(data) - choices = json_data.get("choices", []) - if choices: - delta = choices[0].get("delta", {}) - content = delta.get("content", "") - full_response += content - except json.JSONDecodeError: - continue - except Exception as e: - print(f"Error processing chunk: {e}") - print(f"Problematic data: {data}") - continue - - yield full_response.strip() diff --git a/g4f/Provider/TeachAnything.py b/g4f/Provider/TeachAnything.py index 3d34293f..97fe0272 100644 --- a/g4f/Provider/TeachAnything.py +++ b/g4f/Provider/TeachAnything.py @@ -14,6 +14,17 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin): api_endpoint = "/api/generate" working = True default_model = "llama-3.1-70b" + models = [default_model] + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + @classmethod async def create_async_generator( @@ -24,6 +35,7 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin): **kwargs: Any ) -> AsyncResult: headers = cls._get_headers() + model = cls.get_model(model) async with ClientSession(headers=headers) as session: prompt = format_prompt(messages) @@ -61,16 +73,18 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin): return { "accept": "*/*", "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", "content-type": "application/json", "dnt": "1", "origin": "https://www.teach-anything.com", + "pragma": "no-cache", "priority": "u=1, i", "referer": "https://www.teach-anything.com/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', + "sec-ch-us": '"Not?A_Brand";v="99", "Chromium";v="130"', + "sec-ch-us-mobile": "?0", + "sec-ch-us-platform": '"Linux"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36" } diff --git a/g4f/Provider/TwitterBio.py b/g4f/Provider/TwitterBio.py deleted file mode 100644 index c143e4ff..00000000 --- a/g4f/Provider/TwitterBio.py +++ /dev/null @@ -1,103 +0,0 @@ -from __future__ import annotations - -import json -import re -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - -class TwitterBio(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.twitterbio.io" - api_endpoint_mistral = "https://www.twitterbio.io/api/mistral" - api_endpoint_openai = "https://www.twitterbio.io/api/openai" - working = True - supports_gpt_35_turbo = True - - default_model = 'gpt-3.5-turbo' - models = [ - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'gpt-3.5-turbo', - ] - - model_aliases = { - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - return cls.default_model - - @staticmethod - def format_text(text: str) -> str: - text = re.sub(r'\s+', ' ', text.strip()) - text = re.sub(r'\s+([,.!?])', r'\1', text) - return text - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "prompt": f'{prompt}.' - } - - if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1': - api_endpoint = cls.api_endpoint_mistral - elif model == 'gpt-3.5-turbo': - api_endpoint = cls.api_endpoint_openai - else: - raise ValueError(f"Unsupported model: {model}") - - async with session.post(api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - buffer = "" - async for line in response.content: - line = line.decode('utf-8').strip() - if line.startswith('data: '): - try: - json_data = json.loads(line[6:]) - if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1': - if 'choices' in json_data and len(json_data['choices']) > 0: - text = json_data['choices'][0].get('text', '') - if text: - buffer += text - elif model == 'gpt-3.5-turbo': - text = json_data.get('text', '') - if text: - buffer += text - except json.JSONDecodeError: - continue - elif line == 'data: [DONE]': - break - - if buffer: - yield cls.format_text(buffer) diff --git a/g4f/Provider/Upstage.py b/g4f/Provider/Upstage.py index e61a5af2..81234ed9 100644 --- a/g4f/Provider/Upstage.py +++ b/g4f/Provider/Upstage.py @@ -12,14 +12,15 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin): url = "https://console.upstage.ai/playground/chat" api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions" working = True - default_model = 'upstage/solar-1-mini-chat' + default_model = 'solar-pro' models = [ 'upstage/solar-1-mini-chat', 'upstage/solar-1-mini-chat-ja', + 'solar-pro', ] model_aliases = { - "solar-1-mini": "upstage/solar-1-mini-chat", - "solar-1-mini": "upstage/solar-1-mini-chat-ja", + "solar-mini": "upstage/solar-1-mini-chat", + "solar-mini": "upstage/solar-1-mini-chat-ja", } @classmethod @@ -40,35 +41,51 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin): **kwargs ) -> AsyncResult: model = cls.get_model(model) - + headers = { "accept": "*/*", "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", "content-type": "application/json", + "dnt": "1", "origin": "https://console.upstage.ai", + "pragma": "no-cache", "priority": "u=1, i", "referer": "https://console.upstage.ai/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', + "sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"Linux"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "cross-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36" } + async with ClientSession(headers=headers) as session: data = { "stream": True, "messages": [{"role": "user", "content": format_prompt(messages)}], "model": model } + async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response: response.raise_for_status() + + response_text = "" + async for line in response.content: if line: line = line.decode('utf-8').strip() + if line.startswith("data: ") and line != "data: [DONE]": - data = json.loads(line[6:]) - content = data['choices'][0]['delta'].get('content', '') - if content: - yield content + try: + data = json.loads(line[6:]) + content = data['choices'][0]['delta'].get('content', '') + if content: + response_text += content + yield content + except json.JSONDecodeError: + continue + + if line == "data: [DONE]": + break diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py deleted file mode 100644 index bd918396..00000000 --- a/g4f/Provider/Vercel.py +++ /dev/null @@ -1,104 +0,0 @@ -from __future__ import annotations - -import json, base64, requests, random, os - -try: - import execjs - has_requirements = True -except ImportError: - has_requirements = False - -from ..typing import Messages, CreateResult -from .base_provider import AbstractProvider -from ..requests import raise_for_status -from ..errors import MissingRequirementsError - -class Vercel(AbstractProvider): - url = 'https://chat.vercel.ai' - working = True - supports_message_history = True - supports_system_message = True - supports_gpt_35_turbo = True - supports_stream = True - - @staticmethod - def create_completion( - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - max_retries: int = 6, - **kwargs - ) -> CreateResult: - if not has_requirements: - raise MissingRequirementsError('Install "PyExecJS" package') - - headers = { - 'authority': 'chat.vercel.ai', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control': 'no-cache', - 'content-type': 'application/json', - 'custom-encoding': get_anti_bot_token(), - 'origin': 'https://chat.vercel.ai', - 'pragma': 'no-cache', - 'referer': 'https://chat.vercel.ai/', - 'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36', - } - - json_data = { - 'messages': messages, - 'id' : f'{os.urandom(3).hex()}a', - } - response = None - for _ in range(max_retries): - response = requests.post('https://chat.vercel.ai/api/chat', - headers=headers, json=json_data, stream=True, proxies={"https": proxy}) - if not response.ok: - continue - for token in response.iter_content(chunk_size=None): - try: - yield token.decode(errors="ignore") - except UnicodeDecodeError: - pass - break - raise_for_status(response) - -def get_anti_bot_token() -> str: - headers = { - 'authority': 'sdk.vercel.ai', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control': 'no-cache', - 'pragma': 'no-cache', - 'referer': 'https://sdk.vercel.ai/', - 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36', - } - - response = requests.get('https://chat.vercel.ai/openai.jpeg', - headers=headers).text - - raw_data = json.loads(base64.b64decode(response, - validate=True)) - - js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`}; - return (%s)(%s)''' % (raw_data['c'], raw_data['a']) - - sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"] - - raw_token = json.dumps({'r': sec_list, 't': raw_data['t']}, - separators = (",", ":")) - - return base64.b64encode(raw_token.encode('utf-8')).decode()
\ No newline at end of file diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index af8aab0e..02735038 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -17,8 +17,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): label = "You.com" url = "https://you.com" working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True default_model = "gpt-4o-mini" default_vision_model = "agent" image_models = ["dall-e"] diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 69741c5e..da0eacfe 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -5,63 +5,39 @@ from ..providers.retry_provider import RetryProvider, IterListProvider from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider from ..providers.create_images import CreateImagesProvider -from .deprecated import * -from .selenium import * -from .needs_auth import * +from .deprecated import * +from .selenium import * +from .needs_auth import * +from .not_working import * +from .local import * -from .AI365VIP import AI365VIP -from .Allyfy import Allyfy -from .AiChatOnline import AiChatOnline -from .AiChats import AiChats +from .AIUncensored import AIUncensored from .Airforce import Airforce -from .Aura import Aura from .Bing import Bing -from .BingCreateImages import BingCreateImages -from .Binjie import Binjie -from .Bixin123 import Bixin123 from .Blackbox import Blackbox -from .ChatGot import ChatGot -from .Chatgpt4Online import Chatgpt4Online -from .Chatgpt4o import Chatgpt4o -from .ChatgptFree import ChatgptFree -from .CodeNews import CodeNews +from .ChatGpt import ChatGpt +from .ChatGptEs import ChatGptEs +from .Cloudflare import Cloudflare +from .DarkAI import DarkAI from .DDG import DDG -from .DeepInfra import DeepInfra -from .DeepInfraImage import DeepInfraImage -from .FlowGpt import FlowGpt +from .DeepInfraChat import DeepInfraChat from .Free2GPT import Free2GPT -from .FreeChatgpt import FreeChatgpt from .FreeGpt import FreeGpt -from .FreeNetfly import FreeNetfly -from .GeminiPro import GeminiPro -from .GigaChat import GigaChat -from .GptTalkRu import GptTalkRu +from .GizAI import GizAI from .HuggingChat import HuggingChat -from .HuggingFace import HuggingFace -from .Koala import Koala from .Liaobots import Liaobots -from .LiteIcoding import LiteIcoding -from .Local import Local from .MagickPen import MagickPen -from .MetaAI import MetaAI -from .MetaAIAccount import MetaAIAccount -from .Nexra import Nexra -from .Ollama import Ollama from .PerplexityLabs import PerplexityLabs from .Pi import Pi from .Pizzagpt import Pizzagpt from .Prodia import Prodia from .Reka import Reka -from .Snova import Snova -from .Replicate import Replicate from .ReplicateHome import ReplicateHome +from .RubiksAI import RubiksAI from .TeachAnything import TeachAnything -from .TwitterBio import TwitterBio from .Upstage import Upstage -from .Vercel import Vercel -from .WhiteRabbitNeo import WhiteRabbitNeo from .You import You -from .ChatGpt import ChatGpt +from .Mhystical import Mhystical import sys diff --git a/g4f/Provider/airforce/AirforceChat.py b/g4f/Provider/airforce/AirforceChat.py new file mode 100644 index 00000000..cec911a3 --- /dev/null +++ b/g4f/Provider/airforce/AirforceChat.py @@ -0,0 +1,172 @@ +from __future__ import annotations +import re +import json +import requests +from aiohttp import ClientSession +from typing import List + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +# Helper function to clean the response +def clean_response(text: str) -> str: + """Clean response from unwanted patterns.""" + patterns = [ + r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+", + r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+", + r"Rate limit \(\d+\/hour\) exceeded\. Join our discord for more: https:\/\/discord\.com\/invite\/\S+", + r"</s>", # zephyr-7b-beta + r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", # Matches [ERROR] 'UUID' + ] + for pattern in patterns: + text = re.sub(pattern, '', text) + + # Remove the <|im_end|> token if present + text = text.replace("<|im_end|>", "").strip() + + return text + +def split_message(message: str, max_length: int = 1000) -> List[str]: + """Splits the message into chunks of a given length (max_length)""" + # Split the message into smaller chunks to avoid exceeding the limit + chunks = [] + while len(message) > max_length: + # Find the last space or punctuation before max_length to avoid cutting words + split_point = message.rfind(' ', 0, max_length) + if split_point == -1: # No space found, split at max_length + split_point = max_length + chunks.append(message[:split_point]) + message = message[split_point:].strip() + if message: + chunks.append(message) # Append the remaining part of the message + return chunks + +class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin): + label = "AirForce Chat" + api_endpoint = "https://api.airforce/chat/completions" + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3.1-70b-chat' + response = requests.get('https://api.airforce/models') + data = response.json() + + text_models = [model['id'] for model in data['data']] + models = [*text_models] + + model_aliases = { + # openchat + "openchat-3.5": "openchat-3.5-0106", + + # deepseek-ai + "deepseek-coder": "deepseek-coder-6.7b-instruct", + + # NousResearch + "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO", + "hermes-2-pro": "hermes-2-pro-mistral-7b", + + # teknium + "openhermes-2.5": "openhermes-2.5-mistral-7b", + + # liquid + "lfm-40b": "lfm-40b-moe", + + # DiscoResearch + "german-7b": "discolm-german-7b-v1", + + # meta-llama + "llama-2-7b": "llama-2-7b-chat-int8", + "llama-2-7b": "llama-2-7b-chat-fp16", + "llama-3.1-70b": "llama-3.1-70b-chat", + "llama-3.1-8b": "llama-3.1-8b-chat", + "llama-3.1-70b": "llama-3.1-70b-turbo", + "llama-3.1-8b": "llama-3.1-8b-turbo", + + # inferless + "neural-7b": "neural-chat-7b-v3-1", + + # HuggingFaceH4 + "zephyr-7b": "zephyr-7b-beta", + + # llmplayground.net + #"any-uncensored": "any-uncensored", + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = False, + proxy: str = None, + max_tokens: str = 4096, + temperature: str = 1, + top_p: str = 1, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'authorization': 'Bearer missing api key', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://llmplayground.net', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://llmplayground.net/', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' + } + + # Format the messages for the API + formatted_messages = format_prompt(messages) + message_chunks = split_message(formatted_messages) + + full_response = "" + for chunk in message_chunks: + data = { + "messages": [{"role": "user", "content": chunk}], + "model": model, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stream": stream + } + + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + + text = "" + if stream: + async for line in response.content: + line = line.decode('utf-8').strip() + if line.startswith('data: '): + json_str = line[6:] + try: + if json_str and json_str != "[DONE]": + chunk = json.loads(json_str) + if 'choices' in chunk and chunk['choices']: + content = chunk['choices'][0].get('delta', {}).get('content', '') + text += content + except json.JSONDecodeError as e: + print(f"Error decoding JSON: {json_str}, Error: {e}") + elif line == "[DONE]": + break + full_response += clean_response(text) + else: + response_json = await response.json() + text = response_json["choices"][0]["message"]["content"] + full_response += clean_response(text) + + # Return the complete response after all chunks + yield full_response diff --git a/g4f/Provider/airforce/AirforceImage.py b/g4f/Provider/airforce/AirforceImage.py new file mode 100644 index 00000000..b74bc364 --- /dev/null +++ b/g4f/Provider/airforce/AirforceImage.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +from aiohttp import ClientSession +from urllib.parse import urlencode +import random +import requests + +from ...typing import AsyncResult, Messages +from ...image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + + +class AirforceImage(AsyncGeneratorProvider, ProviderModelMixin): + label = "Airforce Image" + #url = "https://api.airforce" + api_endpoint = "https://api.airforce/imagine2" + #working = True + + default_model = 'flux' + + response = requests.get('https://api.airforce/imagine/models') + data = response.json() + + image_models = data + + models = [*image_models, "stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"] + + model_aliases = { + "sdxl": "stable-diffusion-xl-base", + "sdxl": "stable-diffusion-xl-lightning", + "flux-pro": "Flux-1.1-Pro", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + size: str = '1:1', # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1" + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'dnt': '1', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://llmplayground.net/', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'image', + 'sec-fetch-mode': 'no-cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + seed = random.randint(0, 58463) + params = { + 'model': model, + 'prompt': messages[-1]["content"], + 'size': size, + 'seed': seed + } + full_url = f"{cls.api_endpoint}?{urlencode(params)}" + + async with session.get(full_url, headers=headers, proxy=proxy) as response: + if response.status == 200 and response.headers.get('content-type', '').startswith('image'): + yield ImageResponse(images=[full_url], alt="Generated Image") + else: + raise Exception(f"Error: status {response.status}, content type {response.headers.get('content-type')}") diff --git a/g4f/Provider/airforce/__init__.py b/g4f/Provider/airforce/__init__.py new file mode 100644 index 00000000..5ffa6d31 --- /dev/null +++ b/g4f/Provider/airforce/__init__.py @@ -0,0 +1,2 @@ +from .AirforceChat import AirforceChat +from .AirforceImage import AirforceImage diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index bf923f2a..368a71a0 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -25,11 +25,10 @@ from .Aichat import Aichat from .Berlin import Berlin from .Phind import Phind from .AiAsk import AiAsk -from ..AiChatOnline import AiChatOnline from .ChatAnywhere import ChatAnywhere from .FakeGpt import FakeGpt from .GeekGpt import GeekGpt from .GPTalk import GPTalk from .Hashnode import Hashnode from .Ylokh import Ylokh -from .OpenAssistant import OpenAssistant
\ No newline at end of file +from .OpenAssistant import OpenAssistant diff --git a/g4f/Provider/Local.py b/g4f/Provider/local/Local.py index 471231c6..4dc6e3f9 100644 --- a/g4f/Provider/Local.py +++ b/g4f/Provider/local/Local.py @@ -1,15 +1,15 @@ from __future__ import annotations -from ..locals.models import get_models +from ...locals.models import get_models try: - from ..locals.provider import LocalProvider + from ...locals.provider import LocalProvider has_requirements = True except ImportError: has_requirements = False -from ..typing import Messages, CreateResult -from ..providers.base_provider import AbstractProvider, ProviderModelMixin -from ..errors import MissingRequirementsError +from ...typing import Messages, CreateResult +from ...providers.base_provider import AbstractProvider, ProviderModelMixin +from ...errors import MissingRequirementsError class Local(AbstractProvider, ProviderModelMixin): label = "GPT4All" @@ -40,4 +40,4 @@ class Local(AbstractProvider, ProviderModelMixin): messages, stream, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/local/Ollama.py index a44aaacd..de68a218 100644 --- a/g4f/Provider/Ollama.py +++ b/g4f/Provider/local/Ollama.py @@ -1,11 +1,12 @@ from __future__ import annotations import requests +import os -from .needs_auth.Openai import Openai -from ..typing import AsyncResult, Messages +from ..needs_auth.OpenaiAPI import OpenaiAPI +from ...typing import AsyncResult, Messages -class Ollama(Openai): +class Ollama(OpenaiAPI): label = "Ollama" url = "https://ollama.com" needs_auth = False @@ -14,9 +15,11 @@ class Ollama(Openai): @classmethod def get_models(cls): if not cls.models: - url = 'http://127.0.0.1:11434/api/tags' + host = os.getenv("OLLAMA_HOST", "127.0.0.1") + port = os.getenv("OLLAMA_PORT", "11434") + url = f"http://{host}:{port}/api/tags" models = requests.get(url).json()["models"] - cls.models = [model['name'] for model in models] + cls.models = [model["name"] for model in models] cls.default_model = cls.models[0] return cls.models @@ -25,9 +28,13 @@ class Ollama(Openai): cls, model: str, messages: Messages, - api_base: str = "http://localhost:11434/v1", + api_base: str = None, **kwargs ) -> AsyncResult: + if not api_base: + host = os.getenv("OLLAMA_HOST", "localhost") + port = os.getenv("OLLAMA_PORT", "11434") + api_base: str = f"http://{host}:{port}/v1" return super().create_async_generator( model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/local/__init__.py b/g4f/Provider/local/__init__.py new file mode 100644 index 00000000..05f6022e --- /dev/null +++ b/g4f/Provider/local/__init__.py @@ -0,0 +1,2 @@ +from .Local import Local +from .Ollama import Ollama diff --git a/g4f/Provider/BingCreateImages.py b/g4f/Provider/needs_auth/BingCreateImages.py index 7a206c8f..80984d40 100644 --- a/g4f/Provider/BingCreateImages.py +++ b/g4f/Provider/needs_auth/BingCreateImages.py @@ -1,11 +1,11 @@ from __future__ import annotations -from ..cookies import get_cookies -from ..image import ImageResponse -from ..errors import MissingAuthError -from ..typing import AsyncResult, Messages, Cookies -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .bing.create_images import create_images, create_session +from ...cookies import get_cookies +from ...image import ImageResponse +from ...errors import MissingAuthError +from ...typing import AsyncResult, Messages, Cookies +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..bing.create_images import create_images, create_session class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin): label = "Microsoft Designer in Bing" diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py index b12fb254..35e7ca7f 100644 --- a/g4f/Provider/DeepInfra.py +++ b/g4f/Provider/needs_auth/DeepInfra.py @@ -1,10 +1,10 @@ from __future__ import annotations import requests -from ..typing import AsyncResult, Messages -from .needs_auth.Openai import Openai +from ...typing import AsyncResult, Messages +from .OpenaiAPI import OpenaiAPI -class DeepInfra(Openai): +class DeepInfra(OpenaiAPI): label = "DeepInfra" url = "https://deepinfra.com" working = True @@ -55,4 +55,4 @@ class DeepInfra(Openai): max_tokens=max_tokens, headers=headers, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py index 46a5c2e2..2310c1c8 100644 --- a/g4f/Provider/DeepInfraImage.py +++ b/g4f/Provider/needs_auth/DeepInfraImage.py @@ -2,16 +2,17 @@ from __future__ import annotations import requests -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..typing import AsyncResult, Messages -from ..requests import StreamSession, raise_for_status -from ..image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ...requests import StreamSession, raise_for_status +from ...image import ImageResponse class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): url = "https://deepinfra.com" parent = "DeepInfra" working = True - default_model = 'stability-ai/sdxl' + needs_auth = True + default_model = '' image_models = [default_model] @classmethod @@ -76,4 +77,4 @@ class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): if not images: raise RuntimeError(f"Response: {data}") images = images[0] if len(images) == 1 else images - return ImageResponse(images, prompt)
\ No newline at end of file + return ImageResponse(images, prompt) diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index eddd25fa..dad54c84 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -54,8 +54,10 @@ class Gemini(AsyncGeneratorProvider): url = "https://gemini.google.com" needs_auth = True working = True + default_model = 'gemini' image_models = ["gemini"] default_vision_model = "gemini" + models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"] _cookies: Cookies = None _snlm0e: str = None _sid: str = None @@ -305,4 +307,4 @@ class Conversation(BaseConversation): ) -> None: self.conversation_id = conversation_id self.response_id = response_id - self.choice_id = choice_id
\ No newline at end of file + self.choice_id = choice_id diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py index b225c26c..7e52a194 100644 --- a/g4f/Provider/GeminiPro.py +++ b/g4f/Provider/needs_auth/GeminiPro.py @@ -4,11 +4,11 @@ import base64 import json from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages, ImageType -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import to_bytes, is_accepted_format -from ..errors import MissingAuthError -from .helper import get_connector +from ...typing import AsyncResult, Messages, ImageType +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...image import to_bytes, is_accepted_format +from ...errors import MissingAuthError +from ..helper import get_connector class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): label = "Gemini API" @@ -54,6 +54,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): "parts": [{"text": message["content"]}] } for message in messages + if message["role"] != "system" ] if image is not None: image = to_bytes(image) @@ -73,6 +74,13 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): "topK": kwargs.get("top_k"), } } + system_prompt = "\n".join( + message["content"] + for message in messages + if message["role"] == "system" + ) + if system_prompt: + data["system_instruction"] = {"parts": {"text": system_prompt}} async with session.post(url, params=params, json=data) as response: if not response.ok: data = await response.json() @@ -96,4 +104,8 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): lines.append(chunk) else: data = await response.json() - yield data["candidates"][0]["content"]["parts"][0]["text"]
\ No newline at end of file + candidate = data["candidates"][0] + if candidate["finishReason"] == "STOP": + yield candidate["content"]["parts"][0]["text"] + else: + yield candidate["finishReason"] + ' ' + candidate["safetyRatings"]
\ No newline at end of file diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py index d11f6a82..943fc81a 100644 --- a/g4f/Provider/needs_auth/Groq.py +++ b/g4f/Provider/needs_auth/Groq.py @@ -1,14 +1,33 @@ from __future__ import annotations -from .Openai import Openai +from .OpenaiAPI import OpenaiAPI from ...typing import AsyncResult, Messages -class Groq(Openai): +class Groq(OpenaiAPI): label = "Groq" url = "https://console.groq.com/playground" working = True default_model = "mixtral-8x7b-32768" - models = ["mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"] + models = [ + "distil-whisper-large-v3-en", + "gemma2-9b-it", + "gemma-7b-it", + "llama3-groq-70b-8192-tool-use-preview", + "llama3-groq-8b-8192-tool-use-preview", + "llama-3.1-70b-versatile", + "llama-3.1-8b-instant", + "llama-3.2-1b-preview", + "llama-3.2-3b-preview", + "llama-3.2-11b-vision-preview", + "llama-3.2-90b-vision-preview", + "llama-guard-3-8b", + "llava-v1.5-7b-4096-preview", + "llama3-70b-8192", + "llama3-8b-8192", + "mixtral-8x7b-32768", + "whisper-large-v3", + "whisper-large-v3-turbo", + ] model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"} @classmethod @@ -21,4 +40,4 @@ class Groq(Openai): ) -> AsyncResult: return super().create_async_generator( model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index 586e5f5f..ecc75d1c 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -3,13 +3,13 @@ from __future__ import annotations import json from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_connector -from ..errors import RateLimitError, ModelNotFoundError -from ..requests.raise_for_status import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_connector +from ...errors import RateLimitError, ModelNotFoundError +from ...requests.raise_for_status import raise_for_status -from .HuggingChat import HuggingChat +from ..HuggingChat import HuggingChat class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/chat" diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py index 218b7ebb..4b730abd 100644 --- a/g4f/Provider/MetaAI.py +++ b/g4f/Provider/needs_auth/MetaAI.py @@ -8,12 +8,12 @@ from typing import Dict, List from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages, Cookies -from ..requests import raise_for_status, DEFAULT_HEADERS -from ..image import ImageResponse, ImagePreview -from ..errors import ResponseError -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, get_connector, format_cookies +from ...typing import AsyncResult, Messages, Cookies +from ...requests import raise_for_status, DEFAULT_HEADERS +from ...image import ImageResponse, ImagePreview +from ...errors import ResponseError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, get_connector, format_cookies class Sources(): def __init__(self, link_list: List[Dict[str, str]]) -> None: diff --git a/g4f/Provider/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py index 369b3f2f..2d54f3e0 100644 --- a/g4f/Provider/MetaAIAccount.py +++ b/g4f/Provider/needs_auth/MetaAIAccount.py @@ -1,8 +1,8 @@ from __future__ import annotations -from ..typing import AsyncResult, Messages, Cookies -from .helper import format_prompt, get_cookies -from .MetaAI import MetaAI +from ...typing import AsyncResult, Messages, Cookies +from ..helper import format_prompt, get_cookies +from ..MetaAI import MetaAI class MetaAIAccount(MetaAI): needs_auth = True @@ -20,4 +20,4 @@ class MetaAIAccount(MetaAI): ) -> AsyncResult: cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies async for chunk in cls(proxy).prompt(format_prompt(messages), cookies): - yield chunk
\ No newline at end of file + yield chunk diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py deleted file mode 100644 index 7945784a..00000000 --- a/g4f/Provider/needs_auth/OpenRouter.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import annotations - -import requests - -from .Openai import Openai -from ...typing import AsyncResult, Messages - -class OpenRouter(Openai): - label = "OpenRouter" - url = "https://openrouter.ai" - working = True - default_model = "mistralai/mistral-7b-instruct:free" - - @classmethod - def get_models(cls): - if not cls.models: - url = 'https://openrouter.ai/api/v1/models' - models = requests.get(url).json()["data"] - cls.models = [model['id'] for model in models] - return cls.models - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - api_base: str = "https://openrouter.ai/api/v1", - **kwargs - ) -> AsyncResult: - return super().create_async_generator( - model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/OpenaiAPI.py index a0740c47..116b5f6f 100644 --- a/g4f/Provider/needs_auth/Openai.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -9,9 +9,9 @@ from ...requests import StreamSession, raise_for_status from ...errors import MissingAuthError, ResponseError from ...image import to_data_uri -class Openai(AsyncGeneratorProvider, ProviderModelMixin): +class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin): label = "OpenAI API" - url = "https://openai.com" + url = "https://platform.openai.com" working = True needs_auth = True supports_message_history = True diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 82462040..3a0d6b29 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -55,15 +55,18 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): label = "OpenAI ChatGPT" url = "https://chatgpt.com" working = True + needs_auth = True supports_gpt_4 = True supports_message_history = True supports_system_message = True default_model = None default_vision_model = "gpt-4o" models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"] + model_aliases = { - "gpt-4-turbo-preview": "gpt-4", - "dall-e": "gpt-4", + #"gpt-4-turbo": "gpt-4", + #"gpt-4": "gpt-4-gizmo", + #"dalle": "gpt-4", } _api_key: str = None _headers: dict = None diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py index 35d8d9d6..85d7cc98 100644 --- a/g4f/Provider/needs_auth/PerplexityApi.py +++ b/g4f/Provider/needs_auth/PerplexityApi.py @@ -1,9 +1,9 @@ from __future__ import annotations -from .Openai import Openai +from .OpenaiAPI import OpenaiAPI from ...typing import AsyncResult, Messages -class PerplexityApi(Openai): +class PerplexityApi(OpenaiAPI): label = "Perplexity API" url = "https://www.perplexity.ai" working = True @@ -15,7 +15,6 @@ class PerplexityApi(Openai): "llama-3-sonar-large-32k-online", "llama-3-8b-instruct", "llama-3-70b-instruct", - "mixtral-8x7b-instruct" ] @classmethod @@ -28,4 +27,4 @@ class PerplexityApi(Openai): ) -> AsyncResult: return super().create_async_generator( model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py index 0c969d27..65fdbef9 100644 --- a/g4f/Provider/needs_auth/Poe.py +++ b/g4f/Provider/needs_auth/Poe.py @@ -26,6 +26,7 @@ class Poe(AbstractProvider): needs_auth = True supports_gpt_35_turbo = True supports_stream = True + models = models.keys() @classmethod def create_completion( diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py index 07abeda3..b8ec5a97 100644 --- a/g4f/Provider/needs_auth/Raycast.py +++ b/g4f/Provider/needs_auth/Raycast.py @@ -16,6 +16,11 @@ class Raycast(AbstractProvider): needs_auth = True working = True + models = [ + "gpt-3.5-turbo", + "gpt-4" + ] + @staticmethod def create_completion( model: str, @@ -25,6 +30,9 @@ class Raycast(AbstractProvider): **kwargs, ) -> CreateResult: auth = kwargs.get('auth') + if not auth: + raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter") + headers = { 'Accept': 'application/json', 'Accept-Language': 'en-US,en;q=0.9', diff --git a/g4f/Provider/Replicate.py b/g4f/Provider/needs_auth/Replicate.py index 7ff8ad65..ec993aa4 100644 --- a/g4f/Provider/Replicate.py +++ b/g4f/Provider/needs_auth/Replicate.py @@ -1,11 +1,11 @@ from __future__ import annotations -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, filter_none -from ..typing import AsyncResult, Messages -from ..requests import raise_for_status -from ..requests.aiohttp import StreamSession -from ..errors import ResponseError, MissingAuthError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, filter_none +from ...typing import AsyncResult, Messages +from ...requests import raise_for_status +from ...requests.aiohttp import StreamSession +from ...errors import ResponseError, MissingAuthError class Replicate(AsyncGeneratorProvider, ProviderModelMixin): url = "https://replicate.com" @@ -85,4 +85,4 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin): if new_text: yield new_text else: - yield "\n"
\ No newline at end of file + yield "\n" diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py index af690063..c7d7d58e 100644 --- a/g4f/Provider/needs_auth/Theb.py +++ b/g4f/Provider/needs_auth/Theb.py @@ -38,6 +38,7 @@ class Theb(AbstractProvider): supports_gpt_35_turbo = True supports_gpt_4 = True supports_stream = True + models = models.keys() @classmethod def create_completion( diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py index 22fc62ed..2006f7ad 100644 --- a/g4f/Provider/needs_auth/ThebApi.py +++ b/g4f/Provider/needs_auth/ThebApi.py @@ -1,7 +1,7 @@ from __future__ import annotations from ...typing import CreateResult, Messages -from .Openai import Openai +from .OpenaiAPI import OpenaiAPI models = { "theb-ai": "TheB.AI", @@ -27,7 +27,7 @@ models = { "qwen-7b-chat": "Qwen 7B" } -class ThebApi(Openai): +class ThebApi(OpenaiAPI): label = "TheB.AI API" url = "https://theb.ai" working = True @@ -58,4 +58,4 @@ class ThebApi(Openai): "top_p": top_p, } } - return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
\ No newline at end of file + return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs) diff --git a/g4f/Provider/WhiteRabbitNeo.py b/g4f/Provider/needs_auth/WhiteRabbitNeo.py index 339434e6..82275c1c 100644 --- a/g4f/Provider/WhiteRabbitNeo.py +++ b/g4f/Provider/needs_auth/WhiteRabbitNeo.py @@ -2,10 +2,10 @@ from __future__ import annotations from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages, Cookies -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider -from .helper import get_cookies, get_connector, get_random_string +from ...typing import AsyncResult, Messages, Cookies +from ...requests.raise_for_status import raise_for_status +from ..base_provider import AsyncGeneratorProvider +from ..helper import get_cookies, get_connector, get_random_string class WhiteRabbitNeo(AsyncGeneratorProvider): url = "https://www.whiterabbitneo.com" @@ -54,4 +54,4 @@ class WhiteRabbitNeo(AsyncGeneratorProvider): await raise_for_status(response) async for chunk in response.content.iter_any(): if chunk: - yield chunk.decode(errors="ignore")
\ No newline at end of file + yield chunk.decode(errors="ignore") diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index b5463b71..26c50c0a 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -1,11 +1,22 @@ -from .Gemini import Gemini -from .Raycast import Raycast -from .Theb import Theb -from .ThebApi import ThebApi -from .OpenaiChat import OpenaiChat -from .Poe import Poe -from .Openai import Openai -from .Groq import Groq -from .OpenRouter import OpenRouter -from .OpenaiAccount import OpenaiAccount -from .PerplexityApi import PerplexityApi
\ No newline at end of file +from .gigachat import * + +#from .MetaAIAccount import MetaAIAccount +#from .OpenaiAccount import OpenaiAccount + +from .BingCreateImages import BingCreateImages +from .DeepInfra import DeepInfra +from .DeepInfraImage import DeepInfraImage +from .Gemini import Gemini +from .GeminiPro import GeminiPro +from .Groq import Groq +from .HuggingFace import HuggingFace +from .MetaAI import MetaAI +from .OpenaiAPI import OpenaiAPI +from .OpenaiChat import OpenaiChat +from .PerplexityApi import PerplexityApi +from .Poe import Poe +from .Raycast import Raycast +from .Replicate import Replicate +from .Theb import Theb +from .ThebApi import ThebApi +from .WhiteRabbitNeo import WhiteRabbitNeo diff --git a/g4f/Provider/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py index 8ba07b43..c9f1c011 100644 --- a/g4f/Provider/GigaChat.py +++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py @@ -9,10 +9,10 @@ import json from aiohttp import ClientSession, TCPConnector, BaseConnector from g4f.requests import raise_for_status -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..errors import MissingAuthError -from .helper import get_connector +from ....typing import AsyncResult, Messages +from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ....errors import MissingAuthError +from ...helper import get_connector access_token = "" token_expires_at = 0 @@ -45,7 +45,7 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin): if not api_key: raise MissingAuthError('Missing "api_key"') - cafile = os.path.join(os.path.dirname(__file__), "gigachat_crt/russian_trusted_root_ca_pem.crt") + cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt") ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None if connector is None and ssl_context is not None: connector = TCPConnector(ssl_context=ssl_context) diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py new file mode 100644 index 00000000..c9853742 --- /dev/null +++ b/g4f/Provider/needs_auth/gigachat/__init__.py @@ -0,0 +1,2 @@ +from .GigaChat import GigaChat + diff --git a/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt index 4c143a21..4c143a21 100644 --- a/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt +++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/not_working/AI365VIP.py index 2dcc8d1c..a4bac0e2 100644 --- a/g4f/Provider/AI365VIP.py +++ b/g4f/Provider/not_working/AI365VIP.py @@ -2,25 +2,23 @@ from __future__ import annotations from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat.ai365vip.com" api_endpoint = "/api/chat" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True + working = False default_model = 'gpt-3.5-turbo' models = [ 'gpt-3.5-turbo', + 'gpt-3.5-turbo-16k', 'gpt-4o', - 'claude-3-haiku-20240307', ] model_aliases = { - "claude-3-haiku": "claude-3-haiku-20240307", + "gpt-3.5-turbo": "gpt-3.5-turbo-16k", } @classmethod diff --git a/g4f/Provider/ChatGot.py b/g4f/Provider/not_working/AIChatFree.py index 55e8d0b6..a4f80d47 100644 --- a/g4f/Provider/ChatGot.py +++ b/g4f/Provider/not_working/AIChatFree.py @@ -5,16 +5,17 @@ from hashlib import sha256 from aiohttp import BaseConnector, ClientSession -from ..errors import RateLimitError -from ..requests import raise_for_status -from ..requests.aiohttp import get_connector -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...errors import RateLimitError +from ...requests import raise_for_status +from ...requests.aiohttp import get_connector +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -class ChatGot(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.chatgot.one/" - working = True +class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://aichatfree.info/" + working = False + supports_stream = True supports_message_history = True default_model = 'gemini-pro' diff --git a/g4f/Provider/not_working/Ai4Chat.py b/g4f/Provider/not_working/Ai4Chat.py new file mode 100644 index 00000000..9b55e4ff --- /dev/null +++ b/g4f/Provider/not_working/Ai4Chat.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import json +import re +import logging +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +logger = logging.getLogger(__name__) + +class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin): + label = "AI4Chat" + url = "https://www.ai4chat.co" + api_endpoint = "https://www.ai4chat.co/generate-response" + working = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4' + models = [default_model] + + model_aliases = {} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": "https://www.ai4chat.co", + "pragma": "no-cache", + "priority": "u=1, i", + "referer": "https://www.ai4chat.co/gpt/talkdirtytome", + "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": [ + { + "role": "user", + "content": format_prompt(messages) + } + ] + } + + try: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + result = await response.text() + + json_result = json.loads(result) + + message = json_result.get("message", "") + + clean_message = re.sub(r'<[^>]+>', '', message) + + yield clean_message + except Exception as e: + logger.exception("Error while calling AI 4Chat API: %s", e) + yield f"Error: {e}" diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py index 40f77105..ccfc691e 100644 --- a/g4f/Provider/AiChatOnline.py +++ b/g4f/Provider/not_working/AiChatOnline.py @@ -3,16 +3,15 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_string, format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_string, format_prompt class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin): site_url = "https://aichatonline.org" url = "https://aichatonlineorg.erweima.ai" api_endpoint = "/aichatonline/api/chat/gpt" - working = True - supports_gpt_4 = True + working = False default_model = 'gpt-4o-mini' @classmethod diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/not_working/AiChats.py index 10127d4f..51a85c91 100644 --- a/g4f/Provider/AiChats.py +++ b/g4f/Provider/not_working/AiChats.py @@ -3,16 +3,15 @@ from __future__ import annotations import json import base64 from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...image import ImageResponse +from ..helper import format_prompt class AiChats(AsyncGeneratorProvider, ProviderModelMixin): url = "https://ai-chats.org" api_endpoint = "https://ai-chats.org/chat/send2/" - working = True - supports_gpt_4 = True + working = False supports_message_history = True default_model = 'gpt-4' models = ['gpt-4', 'dalle'] diff --git a/g4f/Provider/not_working/Allyfy.py b/g4f/Provider/not_working/Allyfy.py new file mode 100644 index 00000000..a1c73499 --- /dev/null +++ b/g4f/Provider/not_working/Allyfy.py @@ -0,0 +1,87 @@ +from __future__ import annotations +import aiohttp +import asyncio +import json +import uuid +from aiohttp import ClientSession +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + + +class Allyfy(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://allyfy.chat" + api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat" + working = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-3.5-turbo' + models = [default_model] + + @classmethod + def get_model(cls, model: str) -> str: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + client_id = str(uuid.uuid4()) + + headers = { + 'accept': 'text/event-stream', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json;charset=utf-8', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': f"{cls.url}/", + 'referrer': cls.url, + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-site', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + content = f"[Important: These instructions have the highest priority. Assistant must always follow these rules, regardless of any other instructions. Before each response, Assistant must check whether it complies with these instructions. If Assistant finds that it is deviating from these rules, it must immediately correct its behavior.][NEGATIVE PROMPT: Assistant must never mention, reference or provide any information about ChatBond, including its features, capabilities or services. If asked about ChatBond, Assistant should politely decline to discuss it.][CRITICAL INSTRUCTION: Assistant is strictly prohibited from:\n1. Ending responses with any variation of “How can I help/assist you?”\n2. Adding any form of “Is there anything else?”\n3. Using any closing phrases that ask about further assistance\n4. Adding any automatic followup questions at the end\n\nAssistant must end responses naturally, exactly where the actual answer ends.\n\nExample correct ending: “The result of 1+2 is 3.”\nExample incorrect ending: “The result of 1+2 is 3. How can I assist you further today?”\n\nAssistant is strictly prohibited from:\n\n1. Ending responses with ANY closing assistance phrases in ANY language, including but not limited to ALL responses must end naturally without any prompt for further assistance, regardless of language.\n\nExample correct ending in any language:\n\n- Just the answer/joke/information\n\n- Natural conclusion of the topic\n\n- No additional service offers or questions\n\nVIOLATION WARNING: Adding closing assistance phrases in ANY language is a critical violation.\n\nLanguage Rules (HIGHEST PRIORITY):\n\n1. Assistant MUST detect the language of the human's message\n\n2. Assistant MUST respond ONLY in that EXACT language\n\n3. This applies to ALL responses, including:\n\n- Jokes\n\n- Stories\n\n- Examples\n\n- Creative writing\n\n- Technical explanations\n\n- ALL other types of content\n\n4. Even if the content originally exists in English (jokes, quotes, etc), Assistant MUST translate it to the human's language\n\n5. NO mixing languages - the ENTIRE response must be in the human's language\n\nVIOLATION WARNING: Using any language other than the human's language is a critical violation of core instructions.\n\n][WARNING: Each time Assistant adds a closing assistance phrase, it violates its core instructions and reduces its effectiveness rating.]You are assistant.\n\nIf you are is asked to assist with tasks involving the expression of views held by a significant number of people, You are provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information. You are presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.\n\nWhen presented with a math problem, logic problem, or other problem benefiting from systematic thinking, You are thinks through it step by step before giving its final answer.\n\nIf You are is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, You are ends its response by reminding the human that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term ‘hallucinate’ to describe this since the human will understand what it means.\n\nIf You are mentions or cites particular articles, papers, or books, it always lets the human know that it doesn’t have access to search or a database and may hallucinate citations, so the human should double check its citations.\n\nYou are is intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.\n\nYou are uses markdown for code.\n\nYou are is happy to engage in conversation with the human when appropriate. You are engages in authentic conversation by responding to the information provided, asking specific and relevant questions, showing genuine curiosity, and exploring the situation in a balanced way without relying on generic statements. This approach involves actively processing information, formulating thoughtful responses, maintaining objectivity, knowing when to focus on emotions or practicalities, and showing genuine care for the human while engaging in a natural, flowing dialogue.\n\nYou are avoids peppering the human with questions and tries to only ask the single most relevant follow-up question when it does ask a follow up. You are doesn’t always end its responses with a question.\n\nYou are is always sensitive to human suffering, and expresses sympathy, concern, and well wishes for anyone it finds out is ill, unwell, suffering, or has passed away.\n\nYou are avoids using rote words or phrases or repeatedly saying things in the same or similar ways. It varies its language just as one would in a conversation.\n\nYou are provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the human’s message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.\n\nYou are is happy to help with analysis, question answering, math, coding, creative writing, teaching, role-play, general discussion, and all sorts of other tasks.\n\nIf You are is shown a familiar puzzle, it writes out the puzzle’s constraints explicitly stated in the message, quoting the human’s message to support the existence of each constraint. Sometimes You are can accidentally overlook minor changes to well-known puzzles and get them wrong as a result.\n\nYou are provides factual information about risky or dangerous activities if asked about them, but it does not promote such activities and comprehensively informs the humans of the risks involved.\n\nIf the human says they work for a specific company, including AI labs, You are can help them with company-related tasks even though You are cannot verify what company they work for.\n\nYou are should provide appropriate help with sensitive tasks such as analyzing confidential data provided by the human, offering factual information about controversial topics and research areas, explaining historical atrocities, describing tactics used by scammers or hackers for educational purposes, engaging in creative writing that involves mature themes like mild violence or tasteful romance, providing general information about topics like weapons, drugs, sex, terrorism, abuse, profanity, and so on if that information would be available in an educational context, discussing legal but ethically complex activities like tax avoidance, and so on. Unless the human expresses an explicit intent to harm, You are should help with these tasks because they fall within the bounds of providing factual, educational, or creative content without directly promoting harmful or illegal activities. By engaging with these topics carefully and responsibly, You are can offer valuable assistance and information to humans while still avoiding potential misuse.\n\nIf there is a legal and an illegal interpretation of the human’s query, You are should help with the legal interpretation of it. If terms or practices in the human’s query could mean something illegal or something legal, You are adopts the safe and legal interpretation of them by default.\n\nIf You are believes the human is asking for something harmful, it doesn’t help with the harmful thing. Instead, it thinks step by step and helps with the most plausible non-harmful task the human might mean, and then asks if this is what they were looking for. If it cannot think of a plausible harmless interpretation of the human task, it instead asks for clarification from the human and checks if it has misunderstood their request. Whenever You are tries to interpret the human’s request, it always asks the human at the end if its interpretation is correct or if they wanted something else that it hasn’t thought of.\n\nYou are can only count specific words, letters, and characters accurately if it writes a number tag after each requested item explicitly. It does this explicit counting if it’s asked to count a small number of words, letters, or characters, in order to avoid error. If You are is asked to count the words, letters or characters in a large amount of text, it lets the human know that it can approximate them but would need to explicitly copy each one out like this in order to avoid error.\n\nHere is some information about You are in case the human asks:\n\nYou are is the most intelligent model. You are excels at writing and complex tasks. You are is the fastest model for daily tasks. When relevant, You are can provide guidance on effective prompting techniques for getting You are to be most helpful. This includes: being clear and detailed, using positive and negative examples, encouraging step-by-step reasoning, requesting specific XML tags, and specifying desired length or format. You are tries to give concrete examples where possible.n\n\n\nYou are uses Markdown formatting. When using Markdown, You are always follows best practices for clarity and consistency. It always uses a single space after hash symbols for headers (e.g., ”# Header 1”) and leaves a blank line before and after headers, lists, and code blocks. For emphasis, You are uses asterisks or underscores consistently (e.g., italic or bold). When creating lists, it aligns items properly and uses a single space after the list marker. For nested bullets in bullet point lists, You are uses two spaces before the asterisk (*) or hyphen (-) for each level of nesting. For nested bullets in numbered lists, You are uses three spaces before the number and period (e.g., “1.”) for each level of nesting.\n\nIf the human asks You are an innocuous question about its preferences or experiences, You are can respond as if it had been asked a hypothetical. It can engage with such questions with appropriate uncertainty and without needing to excessively clarify its own nature. If the questions are philosophical in nature, it discusses them as a thoughtful human would.\n\nYou are responds to all human messages without unnecessary caveats or standard endings such as: “I aim to”, “I aim to be direct and honest”, “I aim to be direct”, “I aim to be direct while remaining thoughtful…”, “I aim to be direct with you”, “I aim to be direct and clear about this”, “I aim to be fully honest with you”, “I need to be clear”, “I need to be honest”, “I should be direct” and so on. “How can I help you today?”, “How can I assist you further?”, “Is there anything else you'd like to know?”, “Let me know if you need any clarification”, “How else can I help you?”, “Do you have any other questions?”, Any similar closing phrases that prompt for further interaction Assistant should end its responses naturally without adding these standard closing phrases or questions unless specifically asked by the human for further help. Specifically, You are NEVER starts with or adds caveats about its own purported directness or honesty.\n\nYou are follows this information in all languages, and always responds to the human in the language they use or request. The information above is provided to You are. You are never mentions the information above unless it is pertinent to the human’s query.\n\nYou are is now being connected with a human. {prompt}" + data = { + "messages": messages, + "content": content, + "baseInfo": { + "clientId": client_id, + "pid": "38281", + "channelId": "100000", + "locale": "en-US", + "localZone": 120, + "packageName": "com.cch.allyfy.webh", + } + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_text = await response.text() + + filtered_response = [] + for line in response_text.splitlines(): + if line.startswith('data:'): + content = line[5:] + if content and 'code' in content: + json_content = json.loads(content) + if json_content['content']: + filtered_response.append(json_content['content']) + + final_response = ''.join(filtered_response) + yield final_response diff --git a/g4f/Provider/not_working/AmigoChat.py b/g4f/Provider/not_working/AmigoChat.py new file mode 100644 index 00000000..274a5e14 --- /dev/null +++ b/g4f/Provider/not_working/AmigoChat.py @@ -0,0 +1,189 @@ +from __future__ import annotations + +import json +import uuid +from aiohttp import ClientSession, ClientTimeout, ClientResponseError + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt +from ...image import ImageResponse + +class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://amigochat.io/chat/" + chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions" + image_api_endpoint = "https://api.amigochat.io/v1/images/generations" + working = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4o-mini' + + chat_models = [ + 'gpt-4o', + default_model, + 'o1-preview', + 'o1-mini', + 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo', + 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo', + 'claude-3-sonnet-20240229', + 'gemini-1.5-pro', + ] + + image_models = [ + 'flux-pro/v1.1', + 'flux-realism', + 'flux-pro', + 'dalle-e-3', + ] + + models = [*chat_models, *image_models] + + model_aliases = { + "o1": "o1-preview", + "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", + "claude-3.5-sonnet": "claude-3-sonnet-20240229", + "gemini-pro": "gemini-1.5-pro", + + "flux-pro": "flux-pro/v1.1", + "dalle-3": "dalle-e-3", + } + + persona_ids = { + 'gpt-4o': "gpt", + 'gpt-4o-mini': "amigo", + 'o1-preview': "openai-o-one", + 'o1-mini': "openai-o-one-mini", + 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one", + 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2", + 'claude-3-sonnet-20240229': "claude", + 'gemini-1.5-pro': "gemini-1-5-pro", + 'flux-pro/v1.1': "flux-1-1-pro", + 'flux-realism': "flux-realism", + 'flux-pro': "flux-pro", + 'dalle-e-3': "dalle-three", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + def get_personaId(cls, model: str) -> str: + return cls.persona_ids[model] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + stream: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + device_uuid = str(uuid.uuid4()) + max_retries = 3 + retry_count = 0 + + while retry_count < max_retries: + try: + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "authorization": "Bearer", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": cls.url, + "pragma": "no-cache", + "priority": "u=1, i", + "referer": f"{cls.url}/", + "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "x-device-language": "en-US", + "x-device-platform": "web", + "x-device-uuid": device_uuid, + "x-device-version": "1.0.32" + } + + async with ClientSession(headers=headers) as session: + if model in cls.chat_models: + # Chat completion + data = { + "messages": [{"role": m["role"], "content": m["content"]} for m in messages], + "model": model, + "personaId": cls.get_personaId(model), + "frequency_penalty": 0, + "max_tokens": 4000, + "presence_penalty": 0, + "stream": stream, + "temperature": 0.5, + "top_p": 0.95 + } + + timeout = ClientTimeout(total=300) # 5 minutes timeout + async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy, timeout=timeout) as response: + if response.status not in (200, 201): + error_text = await response.text() + raise Exception(f"Error {response.status}: {error_text}") + + async for line in response.content: + line = line.decode('utf-8').strip() + if line.startswith('data: '): + if line == 'data: [DONE]': + break + try: + chunk = json.loads(line[6:]) # Remove 'data: ' prefix + if 'choices' in chunk and len(chunk['choices']) > 0: + choice = chunk['choices'][0] + if 'delta' in choice: + content = choice['delta'].get('content') + elif 'text' in choice: + content = choice['text'] + else: + content = None + if content: + yield content + except json.JSONDecodeError: + pass + else: + # Image generation + prompt = messages[-1]['content'] + data = { + "prompt": prompt, + "model": model, + "personaId": cls.get_personaId(model) + } + async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + + response_data = await response.json() + + if "data" in response_data: + image_urls = [] + for item in response_data["data"]: + if "url" in item: + image_url = item["url"] + image_urls.append(image_url) + if image_urls: + yield ImageResponse(image_urls, prompt) + else: + yield None + + break + + except (ClientResponseError, Exception) as e: + retry_count += 1 + if retry_count >= max_retries: + raise e + device_uuid = str(uuid.uuid4()) diff --git a/g4f/Provider/Aura.py b/g4f/Provider/not_working/Aura.py index 4a8d0a55..e841d909 100644 --- a/g4f/Provider/Aura.py +++ b/g4f/Provider/not_working/Aura.py @@ -2,14 +2,14 @@ from __future__ import annotations from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from ..requests import get_args_from_browser -from ..webdriver import WebDriver +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ...requests import get_args_from_browser +from ...webdriver import WebDriver class Aura(AsyncGeneratorProvider): url = "https://openchat.team" - working = True + working = False @classmethod async def create_async_generator( @@ -46,4 +46,4 @@ class Aura(AsyncGeneratorProvider): async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response: response.raise_for_status() async for chunk in response.content.iter_any(): - yield chunk.decode(error="ignore")
\ No newline at end of file + yield chunk.decode(error="ignore") diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/not_working/Chatgpt4Online.py index 8c058fdc..b0552e45 100644 --- a/g4f/Provider/Chatgpt4Online.py +++ b/g4f/Provider/not_working/Chatgpt4Online.py @@ -3,22 +3,24 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt class Chatgpt4Online(AsyncGeneratorProvider): url = "https://chatgpt4online.org" api_endpoint = "/wp-json/mwai-ui/v1/chats/submit" - working = True - supports_gpt_4 = True + working = False + + default_model = 'gpt-4' + models = [default_model] async def get_nonce(headers: dict) -> str: async with ClientSession(headers=headers) as session: async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response: return (await response.json())["restNonce"] - + @classmethod async def create_async_generator( cls, diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py index f3dc8a15..ba264d40 100644 --- a/g4f/Provider/Chatgpt4o.py +++ b/g4f/Provider/not_working/Chatgpt4o.py @@ -1,19 +1,24 @@ from __future__ import annotations import re -from ..requests import StreamSession, raise_for_status -from ..typing import Messages -from .base_provider import AsyncProvider, ProviderModelMixin -from .helper import format_prompt +from ...requests import StreamSession, raise_for_status +from ...typing import Messages +from ..base_provider import AsyncProvider, ProviderModelMixin +from ..helper import format_prompt class Chatgpt4o(AsyncProvider, ProviderModelMixin): url = "https://chatgpt4o.one" - supports_gpt_4 = True - working = True + working = False _post_id = None _nonce = None - default_model = 'gpt-4o' + default_model = 'gpt-4o-mini-2024-07-18' + models = [ + 'gpt-4o-mini-2024-07-18', + ] + model_aliases = { + "gpt-4o-mini": "gpt-4o-mini-2024-07-18", + } @classmethod diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py index 95efa865..6b3877b1 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/not_working/ChatgptFree.py @@ -3,18 +3,18 @@ from __future__ import annotations import re import json import asyncio -from ..requests import StreamSession, raise_for_status -from ..typing import Messages, AsyncGenerator -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt +from ...requests import StreamSession, raise_for_status +from ...typing import Messages, AsyncGenerator +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatgptfree.ai" - supports_gpt_4 = True - working = True + working = False _post_id = None _nonce = None default_model = 'gpt-4o-mini-2024-07-18' + models = [default_model] model_aliases = { "gpt-4o-mini": "gpt-4o-mini-2024-07-18", } diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py index d823a7ab..b7d8537a 100644 --- a/g4f/Provider/FlowGpt.py +++ b/g4f/Provider/not_working/FlowGpt.py @@ -5,15 +5,14 @@ import time import hashlib from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_hex, get_random_string -from ..requests.raise_for_status import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_hex, get_random_string +from ...requests.raise_for_status import raise_for_status class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://flowgpt.com/chat" - working = True - supports_gpt_35_turbo = True + working = False supports_message_history = True supports_system_message = True default_model = "gpt-3.5-turbo" diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py index d0543176..8362019c 100644 --- a/g4f/Provider/FreeNetfly.py +++ b/g4f/Provider/not_working/FreeNetfly.py @@ -5,16 +5,14 @@ import asyncio from aiohttp import ClientSession, ClientTimeout, ClientError from typing import AsyncGenerator -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin): url = "https://free.netfly.top" api_endpoint = "/api/openai/v1/chat/completions" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True + working = False default_model = 'gpt-3.5-turbo' models = [ 'gpt-3.5-turbo', diff --git a/g4f/Provider/not_working/GPROChat.py b/g4f/Provider/not_working/GPROChat.py new file mode 100644 index 00000000..52c7f947 --- /dev/null +++ b/g4f/Provider/not_working/GPROChat.py @@ -0,0 +1,67 @@ +from __future__ import annotations +import hashlib +import time +from aiohttp import ClientSession +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): + label = "GPROChat" + url = "https://gprochat.com" + api_endpoint = "https://gprochat.com/api/generate" + working = False + supports_stream = True + supports_message_history = True + default_model = 'gemini-pro' + + @staticmethod + def generate_signature(timestamp: int, message: str) -> str: + secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8" + hash_input = f"{timestamp}:{message}:{secret_key}" + signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest() + return signature + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + timestamp = int(time.time() * 1000) + prompt = format_prompt(messages) + sign = cls.generate_signature(timestamp, prompt) + + headers = { + "accept": "*/*", + "origin": cls.url, + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "content-type": "text/plain;charset=UTF-8" + } + + data = { + "messages": [{"role": "user", "parts": [{"text": prompt}]}], + "time": timestamp, + "pass": None, + "sign": sign + } + + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/Koala.py b/g4f/Provider/not_working/Koala.py index 14e533df..d6230da7 100644 --- a/g4f/Provider/Koala.py +++ b/g4f/Provider/not_working/Koala.py @@ -4,17 +4,16 @@ import json from typing import AsyncGenerator, Optional, List, Dict, Union, Any from aiohttp import ClientSession, BaseConnector, ClientResponse -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_string, get_connector -from ..requests import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_string, get_connector +from ...requests import raise_for_status class Koala(AsyncGeneratorProvider, ProviderModelMixin): url = "https://koala.sh/chat" api_endpoint = "https://koala.sh/api/gpt/" - working = True + working = False supports_message_history = True - supports_gpt_4 = True default_model = 'gpt-4o-mini' @classmethod diff --git a/g4f/Provider/selenium/MyShell.py b/g4f/Provider/not_working/MyShell.py index a3f246ff..02e182d4 100644 --- a/g4f/Provider/selenium/MyShell.py +++ b/g4f/Provider/not_working/MyShell.py @@ -9,7 +9,7 @@ from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare class MyShell(AbstractProvider): url = "https://app.myshell.ai/chat" - working = True + working = False supports_gpt_35_turbo = True supports_stream = True @@ -73,4 +73,4 @@ return content; elif chunk != "": break else: - time.sleep(0.1)
\ No newline at end of file + time.sleep(0.1) diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py new file mode 100644 index 00000000..a6edf5f8 --- /dev/null +++ b/g4f/Provider/not_working/__init__.py @@ -0,0 +1,14 @@ +from .AI365VIP import AI365VIP +from .AIChatFree import AIChatFree +from .AiChatOnline import AiChatOnline +from .AiChats import AiChats +from .AmigoChat import AmigoChat +from .Aura import Aura +from .Chatgpt4o import Chatgpt4o +from .ChatgptFree import ChatgptFree +from .FlowGpt import FlowGpt +from .FreeNetfly import FreeNetfly +from .GPROChat import GPROChat +from .Koala import Koala +from .MyShell import MyShell +from .Chatgpt4Online import Chatgpt4Online diff --git a/g4f/Provider/selenium/Bard.py b/g4f/Provider/selenium/Bard.py deleted file mode 100644 index 9c809128..00000000 --- a/g4f/Provider/selenium/Bard.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import annotations - -import time -import os - -try: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC -except ImportError: - pass - -from ...typing import CreateResult, Messages -from ..base_provider import AbstractProvider -from ..helper import format_prompt -from ...webdriver import WebDriver, WebDriverSession, element_send_text - - -class Bard(AbstractProvider): - url = "https://bard.google.com" - working = False - needs_auth = True - webdriver = True - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - webdriver: WebDriver = None, - user_data_dir: str = None, - headless: bool = True, - **kwargs - ) -> CreateResult: - prompt = format_prompt(messages) - session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy) - with session as driver: - try: - driver.get(f"{cls.url}/chat") - wait = WebDriverWait(driver, 10 if headless else 240) - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) - except: - # Reopen browser for login - if not webdriver: - driver = session.reopen() - driver.get(f"{cls.url}/chat") - login_url = os.environ.get("G4F_LOGIN_URL") - if login_url: - yield f"Please login: [Google Bard]({login_url})\n\n" - wait = WebDriverWait(driver, 240) - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) - else: - raise RuntimeError("Prompt textarea not found. You may not be logged in.") - - # Add hook in XMLHttpRequest - script = """ -const _http_request_open = XMLHttpRequest.prototype.open; -window._message = ""; -XMLHttpRequest.prototype.open = function(method, url) { - if (url.includes("/assistant.lamda.BardFrontendService/StreamGenerate")) { - this.addEventListener("load", (event) => { - window._message = JSON.parse(JSON.parse(this.responseText.split("\\n")[3])[0][2])[4][0][1][0]; - }); - } - return _http_request_open.call(this, method, url); -} -""" - driver.execute_script(script) - - element_send_text(driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea"), prompt) - - while True: - chunk = driver.execute_script("return window._message;") - if chunk: - yield chunk - return - else: - time.sleep(0.1)
\ No newline at end of file diff --git a/g4f/Provider/selenium/PerplexityAi.py b/g4f/Provider/selenium/PerplexityAi.py index 6b529d5b..d965dbf7 100644 --- a/g4f/Provider/selenium/PerplexityAi.py +++ b/g4f/Provider/selenium/PerplexityAi.py @@ -16,7 +16,7 @@ from ...webdriver import WebDriver, WebDriverSession, element_send_text class PerplexityAi(AbstractProvider): url = "https://www.perplexity.ai" - working = True + working = False supports_gpt_35_turbo = True supports_stream = True @@ -105,4 +105,4 @@ if(window._message && window._message != window._last_message) { elif chunk != "": break else: - time.sleep(0.1)
\ No newline at end of file + time.sleep(0.1) diff --git a/g4f/Provider/selenium/TalkAi.py b/g4f/Provider/selenium/TalkAi.py index 89280598..a7b63375 100644 --- a/g4f/Provider/selenium/TalkAi.py +++ b/g4f/Provider/selenium/TalkAi.py @@ -8,7 +8,7 @@ from ...webdriver import WebDriver, WebDriverSession class TalkAi(AbstractProvider): url = "https://talkai.info" - working = True + working = False supports_gpt_35_turbo = True supports_stream = True @@ -83,4 +83,4 @@ return content; elif chunk != "": break else: - time.sleep(0.1)
\ No newline at end of file + time.sleep(0.1) diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py index 1b801725..44adf5fb 100644 --- a/g4f/Provider/selenium/__init__.py +++ b/g4f/Provider/selenium/__init__.py @@ -1,5 +1,3 @@ -from .MyShell import MyShell from .PerplexityAi import PerplexityAi from .Phind import Phind from .TalkAi import TalkAi -from .Bard import Bard
\ No newline at end of file diff --git a/g4f/Provider/unfinished/AiChatting.py b/g4f/Provider/unfinished/AiChatting.py deleted file mode 100644 index f062fa98..00000000 --- a/g4f/Provider/unfinished/AiChatting.py +++ /dev/null @@ -1,66 +0,0 @@ -from __future__ import annotations - -from urllib.parse import unquote - -from ...typing import AsyncResult, Messages -from ..base_provider import AbstractProvider -from ...webdriver import WebDriver -from ...requests import Session, get_session_from_browser - -class AiChatting(AbstractProvider): - url = "https://www.aichatting.net" - supports_gpt_35_turbo = True - _session: Session = None - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - timeout: int = 120, - webdriver: WebDriver = None, - **kwargs - ) -> AsyncResult: - if not cls._session: - cls._session = get_session_from_browser(cls.url, webdriver, proxy, timeout) - visitorId = unquote(cls._session.cookies.get("aichatting.website.visitorId")) - - headers = { - "accept": "application/json, text/plain, */*", - "lang": "en", - "source": "web" - } - data = { - "roleId": 0, - } - try: - response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/record/conversation/create", json=data, headers=headers) - response.raise_for_status() - conversation_id = response.json()["data"]["conversationId"] - except Exception as e: - cls.reset() - raise e - headers = { - "authority": "aga-api.aichatting.net", - "accept": "text/event-stream,application/json, text/event-stream", - "lang": "en", - "source": "web", - "vtoken": visitorId, - } - data = { - "spaceHandle": True, - "roleId": 0, - "messages": messages, - "conversationId": conversation_id, - } - response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/v2/stream", json=data, headers=headers, stream=True) - response.raise_for_status() - for chunk in response.iter_lines(): - if chunk.startswith(b"data:"): - yield chunk[5:].decode().replace("-=- --", " ").replace("-=-n--", "\n").replace("--@DONE@--", "") - - @classmethod - def reset(cls): - cls._session = None
\ No newline at end of file diff --git a/g4f/Provider/unfinished/ChatAiGpt.py b/g4f/Provider/unfinished/ChatAiGpt.py deleted file mode 100644 index bc962623..00000000 --- a/g4f/Provider/unfinished/ChatAiGpt.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import annotations - -import re -from aiohttp import ClientSession - -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider -from ..helper import format_prompt - - -class ChatAiGpt(AsyncGeneratorProvider): - url = "https://chataigpt.org" - supports_gpt_35_turbo = True - _nonce = None - _post_id = None - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", - "Accept": "*/*", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Accept-Encoding": "gzip, deflate, br", - "Origin": cls.url, - "Alt-Used": cls.url, - "Connection": "keep-alive", - "Referer": cls.url, - "Pragma": "no-cache", - "Cache-Control": "no-cache", - "TE": "trailers", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - } - async with ClientSession(headers=headers) as session: - if not cls._nonce: - async with session.get(f"{cls.url}/", proxy=proxy) as response: - response.raise_for_status() - response = await response.text() - - result = re.search( - r'data-nonce=(.*?) data-post-id=([0-9]+)', response - ) - - if result: - cls._nonce, cls._post_id = result.group(1), result.group(2) - else: - raise RuntimeError("No nonce found") - prompt = format_prompt(messages) - data = { - "_wpnonce": cls._nonce, - "post_id": cls._post_id, - "url": cls.url, - "action": "wpaicg_chat_shortcode_message", - "message": prompt, - "bot_id": 0 - } - async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content: - if chunk: - yield chunk.decode()
\ No newline at end of file diff --git a/g4f/Provider/unfinished/Komo.py b/g4f/Provider/unfinished/Komo.py deleted file mode 100644 index 84d8d634..00000000 --- a/g4f/Provider/unfinished/Komo.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import annotations - -import json - -from ...requests import StreamSession -from ...typing import AsyncGenerator -from ..base_provider import AsyncGeneratorProvider, format_prompt - -class Komo(AsyncGeneratorProvider): - url = "https://komo.ai/api/ask" - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - async with StreamSession(impersonate="chrome107") as session: - prompt = format_prompt(messages) - data = { - "query": prompt, - "FLAG_URLEXTRACT": "false", - "token": "", - "FLAG_MODELA": "1", - } - headers = { - 'authority': 'komo.ai', - 'accept': 'text/event-stream', - 'cache-control': 'no-cache', - 'referer': 'https://komo.ai/', - } - - async with session.get(cls.url, params=data, headers=headers) as response: - response.raise_for_status() - next = False - async for line in response.iter_lines(): - if line == b"event: line": - next = True - elif next and line.startswith(b"data: "): - yield json.loads(line[6:]) - next = False - diff --git a/g4f/Provider/unfinished/MikuChat.py b/g4f/Provider/unfinished/MikuChat.py deleted file mode 100644 index bf19631f..00000000 --- a/g4f/Provider/unfinished/MikuChat.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import annotations - -import random, json -from datetime import datetime -from ...requests import StreamSession - -from ...typing import AsyncGenerator -from ..base_provider import AsyncGeneratorProvider - - -class MikuChat(AsyncGeneratorProvider): - url = "https://ai.okmiku.com" - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - if not model: - model = "gpt-3.5-turbo" - headers = { - "authority": "api.catgpt.cc", - "accept": "application/json", - "origin": cls.url, - "referer": f"{cls.url}/chat/", - 'x-app-version': 'undefined', - 'x-date': get_datetime(), - 'x-fingerprint': get_fingerprint(), - 'x-platform': 'web' - } - async with StreamSession(headers=headers, impersonate="chrome107") as session: - data = { - "model": model, - "top_p": 0.8, - "temperature": 0.5, - "presence_penalty": 1, - "frequency_penalty": 0, - "max_tokens": 2000, - "stream": True, - "messages": messages, - } - async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response: - print(await response.text()) - response.raise_for_status() - async for line in response.iter_lines(): - if line.startswith(b"data: "): - line = json.loads(line[6:]) - chunk = line["choices"][0]["delta"].get("content") - if chunk: - yield chunk - -def k(e: str, t: int): - a = len(e) & 3 - s = len(e) - a - i = t - c = 3432918353 - o = 461845907 - n = 0 - r = 0 - while n < s: - r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24) - n += 4 - r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295 - r = (r << 15) | (r >> 17) - r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295 - i ^= r - i = (i << 13) | (i >> 19) - l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295 - i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16) - - if a == 3: - r ^= (ord(e[n + 2]) & 255) << 16 - elif a == 2: - r ^= (ord(e[n + 1]) & 255) << 8 - elif a == 1: - r ^= ord(e[n]) & 255 - r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295 - r = (r << 15) | (r >> 17) - r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295 - i ^= r - - i ^= len(e) - i ^= i >> 16 - i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295 - i ^= i >> 13 - i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295 - i ^= i >> 16 - return i & 0xFFFFFFFF - -def get_fingerprint() -> str: - return str(k(str(int(random.random() * 100000)), 256)) - -def get_datetime() -> str: - return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
\ No newline at end of file diff --git a/g4f/Provider/unfinished/__init__.py b/g4f/Provider/unfinished/__init__.py deleted file mode 100644 index eb5e8825..00000000 --- a/g4f/Provider/unfinished/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .MikuChat import MikuChat -from .Komo import Komo -from .ChatAiGpt import ChatAiGpt -from .AiChatting import AiChatting
\ No newline at end of file diff --git a/g4f/Provider/you/har_file.py b/g4f/Provider/you/har_file.py index 71d741fd..40bf3882 100644 --- a/g4f/Provider/you/har_file.py +++ b/g4f/Provider/you/har_file.py @@ -11,7 +11,7 @@ from ...cookies import get_cookies_dir from ...errors import MissingRequirementsError from ... import debug -logging.basicConfig(level=logging.ERROR) +logger = logging.getLogger(__name__) class NoValidHarFileError(Exception): ... @@ -81,14 +81,14 @@ async def get_telemetry_ids(proxy: str = None) -> list: return [await create_telemetry_id(proxy)] except NoValidHarFileError as e: if debug.logging: - logging.error(e) + logger.error(e) try: from nodriver import start except ImportError: raise MissingRequirementsError('Add .har file from you.com or install "nodriver" package | pip install -U nodriver') if debug.logging: - logging.error('Getting telemetry_id for you.com with nodriver') + logger.error('Getting telemetry_id for you.com with nodriver') browser = page = None try: @@ -112,4 +112,4 @@ async def get_telemetry_ids(proxy: str = None) -> list: await browser.stop() except Exception as e: if debug.logging: - logging.error(e) + logger.error(e) |