From e98793d0a7af43878cf023fb045dd945a82507cf Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 6 Nov 2024 17:25:09 +0200 Subject: Update (g4f/models.py g4f/Provider/ docs/providers-and-models.md) --- g4f/Provider/Ai4Chat.py | 88 -------- g4f/Provider/AiChatOnline.py | 61 ------ g4f/Provider/AiChats.py | 105 --------- g4f/Provider/AmigoChat.py | 189 ---------------- g4f/Provider/Aura.py | 49 ----- g4f/Provider/Chatgpt4o.py | 88 -------- g4f/Provider/ChatgptFree.py | 106 --------- g4f/Provider/DarkAI.py | 10 +- g4f/Provider/DeepInfraChat.py | 57 +---- g4f/Provider/DeepInfraImage.py | 80 ------- g4f/Provider/Editee.py | 77 ------- g4f/Provider/FlowGpt.py | 101 --------- g4f/Provider/Free2GPT.py | 8 +- g4f/Provider/FreeChatgpt.py | 96 --------- g4f/Provider/FreeGpt.py | 2 +- g4f/Provider/FreeNetfly.py | 105 --------- g4f/Provider/GPROChat.py | 67 ------ g4f/Provider/HuggingFace.py | 104 --------- g4f/Provider/Koala.py | 79 ------- g4f/Provider/Liaobots.py | 23 +- g4f/Provider/Local.py | 43 ---- g4f/Provider/MetaAI.py | 238 --------------------- g4f/Provider/MetaAIAccount.py | 23 -- g4f/Provider/Ollama.py | 40 ---- g4f/Provider/Replicate.py | 88 -------- g4f/Provider/__init__.py | 23 +- g4f/Provider/deprecated/__init__.py | 3 +- g4f/Provider/gigachat/GigaChat.py | 92 -------- g4f/Provider/gigachat/__init__.py | 2 - .../gigachat/russian_trusted_root_ca_pem.crt | 33 --- g4f/Provider/local/Local.py | 43 ++++ g4f/Provider/local/Ollama.py | 40 ++++ g4f/Provider/local/__init__.py | 2 + g4f/Provider/needs_auth/DeepInfraImage.py | 80 +++++++ g4f/Provider/needs_auth/HuggingFace.py | 104 +++++++++ g4f/Provider/needs_auth/MetaAI.py | 238 +++++++++++++++++++++ g4f/Provider/needs_auth/MetaAIAccount.py | 23 ++ g4f/Provider/needs_auth/OpenRouter.py | 32 --- g4f/Provider/needs_auth/Replicate.py | 88 ++++++++ g4f/Provider/needs_auth/__init__.py | 8 +- g4f/Provider/needs_auth/gigachat/GigaChat.py | 92 ++++++++ g4f/Provider/needs_auth/gigachat/__init__.py | 2 + .../gigachat/russian_trusted_root_ca_pem.crt | 33 +++ g4f/Provider/not_working/Ai4Chat.py | 88 ++++++++ g4f/Provider/not_working/AiChatOnline.py | 61 ++++++ g4f/Provider/not_working/AiChats.py | 105 +++++++++ g4f/Provider/not_working/AmigoChat.py | 189 ++++++++++++++++ g4f/Provider/not_working/Aura.py | 49 +++++ g4f/Provider/not_working/Chatgpt4o.py | 88 ++++++++ g4f/Provider/not_working/ChatgptFree.py | 106 +++++++++ g4f/Provider/not_working/FlowGpt.py | 101 +++++++++ g4f/Provider/not_working/FreeNetfly.py | 105 +++++++++ g4f/Provider/not_working/GPROChat.py | 67 ++++++ g4f/Provider/not_working/Koala.py | 79 +++++++ g4f/Provider/not_working/MyShell.py | 76 +++++++ g4f/Provider/not_working/__init__.py | 12 ++ g4f/Provider/selenium/MyShell.py | 76 ------- g4f/Provider/selenium/__init__.py | 1 - 58 files changed, 1911 insertions(+), 2157 deletions(-) delete mode 100644 g4f/Provider/Ai4Chat.py delete mode 100644 g4f/Provider/AiChatOnline.py delete mode 100644 g4f/Provider/AiChats.py delete mode 100644 g4f/Provider/AmigoChat.py delete mode 100644 g4f/Provider/Aura.py delete mode 100644 g4f/Provider/Chatgpt4o.py delete mode 100644 g4f/Provider/ChatgptFree.py delete mode 100644 g4f/Provider/DeepInfraImage.py delete mode 100644 g4f/Provider/Editee.py delete mode 100644 g4f/Provider/FlowGpt.py delete mode 100644 g4f/Provider/FreeChatgpt.py delete mode 100644 g4f/Provider/FreeNetfly.py delete mode 100644 g4f/Provider/GPROChat.py delete mode 100644 g4f/Provider/HuggingFace.py delete mode 100644 g4f/Provider/Koala.py delete mode 100644 g4f/Provider/Local.py delete mode 100644 g4f/Provider/MetaAI.py delete mode 100644 g4f/Provider/MetaAIAccount.py delete mode 100644 g4f/Provider/Ollama.py delete mode 100644 g4f/Provider/Replicate.py delete mode 100644 g4f/Provider/gigachat/GigaChat.py delete mode 100644 g4f/Provider/gigachat/__init__.py delete mode 100644 g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt create mode 100644 g4f/Provider/local/Local.py create mode 100644 g4f/Provider/local/Ollama.py create mode 100644 g4f/Provider/local/__init__.py create mode 100644 g4f/Provider/needs_auth/DeepInfraImage.py create mode 100644 g4f/Provider/needs_auth/HuggingFace.py create mode 100644 g4f/Provider/needs_auth/MetaAI.py create mode 100644 g4f/Provider/needs_auth/MetaAIAccount.py delete mode 100644 g4f/Provider/needs_auth/OpenRouter.py create mode 100644 g4f/Provider/needs_auth/Replicate.py create mode 100644 g4f/Provider/needs_auth/gigachat/GigaChat.py create mode 100644 g4f/Provider/needs_auth/gigachat/__init__.py create mode 100644 g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt create mode 100644 g4f/Provider/not_working/Ai4Chat.py create mode 100644 g4f/Provider/not_working/AiChatOnline.py create mode 100644 g4f/Provider/not_working/AiChats.py create mode 100644 g4f/Provider/not_working/AmigoChat.py create mode 100644 g4f/Provider/not_working/Aura.py create mode 100644 g4f/Provider/not_working/Chatgpt4o.py create mode 100644 g4f/Provider/not_working/ChatgptFree.py create mode 100644 g4f/Provider/not_working/FlowGpt.py create mode 100644 g4f/Provider/not_working/FreeNetfly.py create mode 100644 g4f/Provider/not_working/GPROChat.py create mode 100644 g4f/Provider/not_working/Koala.py create mode 100644 g4f/Provider/not_working/MyShell.py create mode 100644 g4f/Provider/not_working/__init__.py delete mode 100644 g4f/Provider/selenium/MyShell.py (limited to 'g4f/Provider') diff --git a/g4f/Provider/Ai4Chat.py b/g4f/Provider/Ai4Chat.py deleted file mode 100644 index 1096279d..00000000 --- a/g4f/Provider/Ai4Chat.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -import json -import re -import logging -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin): - label = "AI4Chat" - url = "https://www.ai4chat.co" - api_endpoint = "https://www.ai4chat.co/generate-response" - working = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'gpt-4' - models = [default_model] - - model_aliases = {} - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": "https://www.ai4chat.co", - "pragma": "no-cache", - "priority": "u=1, i", - "referer": "https://www.ai4chat.co/gpt/talkdirtytome", - "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" - } - - async with ClientSession(headers=headers) as session: - data = { - "messages": [ - { - "role": "user", - "content": format_prompt(messages) - } - ] - } - - try: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - - json_result = json.loads(result) - - message = json_result.get("message", "") - - clean_message = re.sub(r'<[^>]+>', '', message) - - yield clean_message - except Exception as e: - logging.exception("Error while calling AI 4Chat API: %s", e) - yield f"Error: {e}" diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/AiChatOnline.py deleted file mode 100644 index 26aacef6..00000000 --- a/g4f/Provider/AiChatOnline.py +++ /dev/null @@ -1,61 +0,0 @@ -from __future__ import annotations - -import json -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_string, format_prompt - -class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin): - site_url = "https://aichatonline.org" - url = "https://aichatonlineorg.erweima.ai" - api_endpoint = "/aichatonline/api/chat/gpt" - working = True - default_model = 'gpt-4o-mini' - - @classmethod - async def grab_token( - cls, - session: ClientSession, - proxy: str - ): - async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response: - response.raise_for_status() - return (await response.json())['data'] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Accept-Encoding": "gzip, deflate, br", - "Referer": f"{cls.url}/chatgpt/chat/", - "Content-Type": "application/json", - "Origin": cls.url, - "Alt-Used": "aichatonline.org", - "Connection": "keep-alive", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "TE": "trailers" - } - async with ClientSession(headers=headers) as session: - data = { - "conversationId": get_random_string(), - "prompt": format_prompt(messages), - } - headers['UniqueId'] = await cls.grab_token(session, proxy) - async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content: - try: - yield json.loads(chunk)['data']['message'] - except: - continue \ No newline at end of file diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/AiChats.py deleted file mode 100644 index 7ff25639..00000000 --- a/g4f/Provider/AiChats.py +++ /dev/null @@ -1,105 +0,0 @@ -from __future__ import annotations - -import json -import base64 -from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse -from .helper import format_prompt - -class AiChats(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://ai-chats.org" - api_endpoint = "https://ai-chats.org/chat/send2/" - working = False - supports_message_history = True - default_model = 'gpt-4' - models = ['gpt-4', 'dalle'] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "accept": "application/json, text/event-stream", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "referer": f"{cls.url}/{'image' if model == 'dalle' else 'chat'}/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", - 'cookie': 'muVyak=LSFNvUWqdgKkGprbDBsfieIoEMzjOQ; LSFNvUWqdgKkGprbDBsfieIoEMzjOQ=ac28831b98143847e83dbe004404e619-1725548624-1725548621; muVyak_hits=9; ai-chat-front=9d714d5dc46a6b47607c9a55e7d12a95; _csrf-front=76c23dc0a013e5d1e21baad2e6ba2b5fdab8d3d8a1d1281aa292353f8147b057a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22K9lz0ezsNPMNnfpd_8gT5yEeh-55-cch%22%3B%7D', - } - - async with ClientSession(headers=headers) as session: - if model == 'dalle': - prompt = messages[-1]['content'] if messages else "" - else: - prompt = format_prompt(messages) - - data = { - "type": "image" if model == 'dalle' else "chat", - "messagesHistory": [ - { - "from": "you", - "content": prompt - } - ] - } - - try: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - - if model == 'dalle': - response_json = await response.json() - - if 'data' in response_json and response_json['data']: - image_url = response_json['data'][0].get('url') - if image_url: - async with session.get(image_url) as img_response: - img_response.raise_for_status() - image_data = await img_response.read() - - base64_image = base64.b64encode(image_data).decode('utf-8') - base64_url = f"data:image/png;base64,{base64_image}" - yield ImageResponse(base64_url, prompt) - else: - yield f"Error: No image URL found in the response. Full response: {response_json}" - else: - yield f"Error: Unexpected response format. Full response: {response_json}" - else: - full_response = await response.text() - message = "" - for line in full_response.split('\n'): - if line.startswith('data: ') and line != 'data: ': - message += line[6:] - - message = message.strip() - yield message - except Exception as e: - yield f"Error occurred: {str(e)}" - - @classmethod - async def create_async( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> str: - async for response in cls.create_async_generator(model, messages, proxy, **kwargs): - if isinstance(response, ImageResponse): - return response.images[0] - return response diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py deleted file mode 100644 index b086d5e1..00000000 --- a/g4f/Provider/AmigoChat.py +++ /dev/null @@ -1,189 +0,0 @@ -from __future__ import annotations - -import json -import uuid -from aiohttp import ClientSession, ClientTimeout, ClientResponseError - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt -from ..image import ImageResponse - -class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://amigochat.io/chat/" - chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions" - image_api_endpoint = "https://api.amigochat.io/v1/images/generations" - working = False - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'gpt-4o-mini' - - chat_models = [ - 'gpt-4o', - default_model, - 'o1-preview', - 'o1-mini', - 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo', - 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo', - 'claude-3-sonnet-20240229', - 'gemini-1.5-pro', - ] - - image_models = [ - 'flux-pro/v1.1', - 'flux-realism', - 'flux-pro', - 'dalle-e-3', - ] - - models = [*chat_models, *image_models] - - model_aliases = { - "o1": "o1-preview", - "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", - "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", - "claude-3.5-sonnet": "claude-3-sonnet-20240229", - "gemini-pro": "gemini-1.5-pro", - - "flux-pro": "flux-pro/v1.1", - "dalle-3": "dalle-e-3", - } - - persona_ids = { - 'gpt-4o': "gpt", - 'gpt-4o-mini': "amigo", - 'o1-preview': "openai-o-one", - 'o1-mini': "openai-o-one-mini", - 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one", - 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2", - 'claude-3-sonnet-20240229': "claude", - 'gemini-1.5-pro': "gemini-1-5-pro", - 'flux-pro/v1.1': "flux-1-1-pro", - 'flux-realism': "flux-realism", - 'flux-pro': "flux-pro", - 'dalle-e-3': "dalle-three", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - def get_personaId(cls, model: str) -> str: - return cls.persona_ids[model] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - stream: bool = False, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - device_uuid = str(uuid.uuid4()) - max_retries = 3 - retry_count = 0 - - while retry_count < max_retries: - try: - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "authorization": "Bearer", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/", - "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", - "x-device-language": "en-US", - "x-device-platform": "web", - "x-device-uuid": device_uuid, - "x-device-version": "1.0.32" - } - - async with ClientSession(headers=headers) as session: - if model in cls.chat_models: - # Chat completion - data = { - "messages": [{"role": m["role"], "content": m["content"]} for m in messages], - "model": model, - "personaId": cls.get_personaId(model), - "frequency_penalty": 0, - "max_tokens": 4000, - "presence_penalty": 0, - "stream": stream, - "temperature": 0.5, - "top_p": 0.95 - } - - timeout = ClientTimeout(total=300) # 5 minutes timeout - async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy, timeout=timeout) as response: - if response.status not in (200, 201): - error_text = await response.text() - raise Exception(f"Error {response.status}: {error_text}") - - async for line in response.content: - line = line.decode('utf-8').strip() - if line.startswith('data: '): - if line == 'data: [DONE]': - break - try: - chunk = json.loads(line[6:]) # Remove 'data: ' prefix - if 'choices' in chunk and len(chunk['choices']) > 0: - choice = chunk['choices'][0] - if 'delta' in choice: - content = choice['delta'].get('content') - elif 'text' in choice: - content = choice['text'] - else: - content = None - if content: - yield content - except json.JSONDecodeError: - pass - else: - # Image generation - prompt = messages[-1]['content'] - data = { - "prompt": prompt, - "model": model, - "personaId": cls.get_personaId(model) - } - async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - - response_data = await response.json() - - if "data" in response_data: - image_urls = [] - for item in response_data["data"]: - if "url" in item: - image_url = item["url"] - image_urls.append(image_url) - if image_urls: - yield ImageResponse(image_urls, prompt) - else: - yield None - - break - - except (ClientResponseError, Exception) as e: - retry_count += 1 - if retry_count >= max_retries: - raise e - device_uuid = str(uuid.uuid4()) diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py deleted file mode 100644 index e2c56754..00000000 --- a/g4f/Provider/Aura.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from ..requests import get_args_from_browser -from ..webdriver import WebDriver - -class Aura(AsyncGeneratorProvider): - url = "https://openchat.team" - working = False - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - temperature: float = 0.5, - max_tokens: int = 8192, - webdriver: WebDriver = None, - **kwargs - ) -> AsyncResult: - args = get_args_from_browser(cls.url, webdriver, proxy) - async with ClientSession(**args) as session: - new_messages = [] - system_message = [] - for message in messages: - if message["role"] == "system": - system_message.append(message["content"]) - else: - new_messages.append(message) - data = { - "model": { - "id": "openchat_3.6", - "name": "OpenChat 3.6 (latest)", - "maxLength": 24576, - "tokenLimit": max_tokens - }, - "messages": new_messages, - "key": "", - "prompt": "\n".join(system_message), - "temperature": temperature - } - async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content.iter_any(): - yield chunk.decode(error="ignore") diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/Chatgpt4o.py deleted file mode 100644 index 7730fc84..00000000 --- a/g4f/Provider/Chatgpt4o.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -import re -from ..requests import StreamSession, raise_for_status -from ..typing import Messages -from .base_provider import AsyncProvider, ProviderModelMixin -from .helper import format_prompt - - -class Chatgpt4o(AsyncProvider, ProviderModelMixin): - url = "https://chatgpt4o.one" - working = True - _post_id = None - _nonce = None - default_model = 'gpt-4o-mini-2024-07-18' - models = [ - 'gpt-4o-mini-2024-07-18', - ] - model_aliases = { - "gpt-4o-mini": "gpt-4o-mini-2024-07-18", - } - - - @classmethod - async def create_async( - cls, - model: str, - messages: Messages, - proxy: str = None, - timeout: int = 120, - cookies: dict = None, - **kwargs - ) -> str: - headers = { - 'authority': 'chatgpt4o.one', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'origin': 'https://chatgpt4o.one', - 'referer': 'https://chatgpt4o.one', - 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', - } - - async with StreamSession( - headers=headers, - cookies=cookies, - impersonate="chrome", - proxies={"all": proxy}, - timeout=timeout - ) as session: - - if not cls._post_id or not cls._nonce: - async with session.get(f"{cls.url}/") as response: - await raise_for_status(response) - response_text = await response.text() - - post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text) - nonce_match = re.search(r'data-nonce="(.*?)"', response_text) - - if not post_id_match: - raise RuntimeError("No post ID found") - cls._post_id = post_id_match.group(1) - - if not nonce_match: - raise RuntimeError("No nonce found") - cls._nonce = nonce_match.group(1) - - prompt = format_prompt(messages) - data = { - "_wpnonce": cls._nonce, - "post_id": cls._post_id, - "url": cls.url, - "action": "wpaicg_chat_shortcode_message", - "message": prompt, - "bot_id": "0" - } - - async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response: - await raise_for_status(response) - response_json = await response.json() - if "data" not in response_json: - raise RuntimeError("Unexpected response structure: 'data' field missing") - return response_json["data"] diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py deleted file mode 100644 index d1222efb..00000000 --- a/g4f/Provider/ChatgptFree.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import annotations - -import re -import json -import asyncio -from ..requests import StreamSession, raise_for_status -from ..typing import Messages, AsyncGenerator -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - -class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://chatgptfree.ai" - working = False - _post_id = None - _nonce = None - default_model = 'gpt-4o-mini-2024-07-18' - models = [default_model] - model_aliases = { - "gpt-4o-mini": "gpt-4o-mini-2024-07-18", - } - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - timeout: int = 120, - cookies: dict = None, - **kwargs - ) -> AsyncGenerator[str, None]: - headers = { - 'authority': 'chatgptfree.ai', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'origin': 'https://chatgptfree.ai', - 'referer': 'https://chatgptfree.ai/chat/', - 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', - } - - async with StreamSession( - headers=headers, - cookies=cookies, - impersonate="chrome", - proxies={"all": proxy}, - timeout=timeout - ) as session: - - if not cls._nonce: - async with session.get(f"{cls.url}/") as response: - await raise_for_status(response) - response = await response.text() - - result = re.search(r'data-post-id="([0-9]+)"', response) - if not result: - raise RuntimeError("No post id found") - cls._post_id = result.group(1) - - result = re.search(r'data-nonce="(.*?)"', response) - if result: - cls._nonce = result.group(1) - else: - raise RuntimeError("No nonce found") - - prompt = format_prompt(messages) - data = { - "_wpnonce": cls._nonce, - "post_id": cls._post_id, - "url": cls.url, - "action": "wpaicg_chat_shortcode_message", - "message": prompt, - "bot_id": "0" - } - - async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response: - await raise_for_status(response) - buffer = "" - async for line in response.iter_lines(): - line = line.decode('utf-8').strip() - if line.startswith('data: '): - data = line[6:] - if data == '[DONE]': - break - try: - json_data = json.loads(data) - content = json_data['choices'][0]['delta'].get('content', '') - if content: - yield content - except json.JSONDecodeError: - continue - elif line: - buffer += line - - if buffer: - try: - json_response = json.loads(buffer) - if 'data' in json_response: - yield json_response['data'] - except json.JSONDecodeError: - print(f"Failed to decode final JSON. Buffer content: {buffer}") diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py index 6ffb615e..54f456fe 100644 --- a/g4f/Provider/DarkAI.py +++ b/g4f/Provider/DarkAI.py @@ -9,19 +9,19 @@ from .helper import format_prompt class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.aiuncensored.info" + url = "https://darkai.foundation/chat" api_endpoint = "https://darkai.foundation/chat" working = True supports_stream = True supports_system_message = True supports_message_history = True - default_model = 'gpt-4o' + default_model = 'llama-3-405b' models = [ - default_model, # Uncensored + 'gpt-4o', # Uncensored 'gpt-3.5-turbo', # Uncensored 'llama-3-70b', # Uncensored - 'llama-3-405b', + default_model, ] model_aliases = { @@ -51,8 +51,6 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): headers = { "accept": "text/event-stream", "content-type": "application/json", - "origin": "https://www.aiuncensored.info", - "referer": "https://www.aiuncensored.info/", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" } async with ClientSession(headers=headers) as session: diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py index b8cc6ab8..5c668599 100644 --- a/g4f/Provider/DeepInfraChat.py +++ b/g4f/Provider/DeepInfraChat.py @@ -6,7 +6,6 @@ import json from ..typing import AsyncResult, Messages, ImageType from ..image import to_data_uri from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): @@ -17,42 +16,18 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): supports_system_message = True supports_message_history = True - default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct' + default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' models = [ - 'meta-llama/Meta-Llama-3.1-405B-Instruct', - 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'meta-llama/Meta-Llama-3.1-8B-Instruct', - 'mistralai/Mixtral-8x22B-Instruct-v0.1', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', + default_model, 'microsoft/WizardLM-2-8x22B', - 'microsoft/WizardLM-2-7B', - 'Qwen/Qwen2-72B-Instruct', - 'microsoft/Phi-3-medium-4k-instruct', - 'google/gemma-2-27b-it', - 'openbmb/MiniCPM-Llama3-V-2_5', # Image upload is available - 'mistralai/Mistral-7B-Instruct-v0.3', - 'lizpreciatior/lzlv_70b_fp16_hf', - 'openchat/openchat-3.6-8b', - 'Phind/Phind-CodeLlama-34B-v2', - 'cognitivecomputations/dolphin-2.9.1-llama-3-70b', + 'Qwen/Qwen2.5-72B-Instruct', ] model_aliases = { - "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct", - "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", - "llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct", - "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1", - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", - "wizardlm-2-7b": "microsoft/WizardLM-2-7B", - "qwen-2-72b": "Qwen/Qwen2-72B-Instruct", - "phi-3-medium-4k": "microsoft/Phi-3-medium-4k-instruct", - "gemma-2b-27b": "google/gemma-2-27b-it", - "minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5", # Image upload is available - "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", - "lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf", - "openchat-3.6-8b": "openchat/openchat-3.6-8b", - "phind-codellama-34b-v2": "Phind/Phind-CodeLlama-34B-v2", - "dolphin-2.9.1-llama-3-70b": "cognitivecomputations/dolphin-2.9.1-llama-3-70b", + "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", } @@ -97,30 +72,12 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): } async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) data = { 'model': model, - 'messages': [ - {'role': 'system', 'content': 'Be a helpful assistant'}, - {'role': 'user', 'content': prompt} - ], + 'messages': messages, 'stream': True } - if model == 'openbmb/MiniCPM-Llama3-V-2_5' and image is not None: - data['messages'][-1]['content'] = [ - { - 'type': 'image_url', - 'image_url': { - 'url': to_data_uri(image) - } - }, - { - 'type': 'text', - 'text': messages[-1]['content'] - } - ] - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: response.raise_for_status() async for line in response.content: diff --git a/g4f/Provider/DeepInfraImage.py b/g4f/Provider/DeepInfraImage.py deleted file mode 100644 index cee608ce..00000000 --- a/g4f/Provider/DeepInfraImage.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import annotations - -import requests - -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..typing import AsyncResult, Messages -from ..requests import StreamSession, raise_for_status -from ..image import ImageResponse - -class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://deepinfra.com" - parent = "DeepInfra" - working = True - needs_auth = True - default_model = '' - image_models = [default_model] - - @classmethod - def get_models(cls): - if not cls.models: - url = 'https://api.deepinfra.com/models/featured' - models = requests.get(url).json() - cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"] - cls.image_models = cls.models - return cls.models - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - **kwargs - ) -> AsyncResult: - yield await cls.create_async(messages[-1]["content"], model, **kwargs) - - @classmethod - async def create_async( - cls, - prompt: str, - model: str, - api_key: str = None, - api_base: str = "https://api.deepinfra.com/v1/inference", - proxy: str = None, - timeout: int = 180, - extra_data: dict = {}, - **kwargs - ) -> ImageResponse: - headers = { - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US', - 'Connection': 'keep-alive', - 'Origin': 'https://deepinfra.com', - 'Referer': 'https://deepinfra.com/', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-site', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', - 'X-Deepinfra-Source': 'web-embed', - 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - } - if api_key is not None: - headers["Authorization"] = f"Bearer {api_key}" - async with StreamSession( - proxies={"all": proxy}, - headers=headers, - timeout=timeout - ) as session: - model = cls.get_model(model) - data = {"prompt": prompt, **extra_data} - data = {"input": data} if model == cls.default_model else data - async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response: - await raise_for_status(response) - data = await response.json() - images = data["output"] if "output" in data else data["images"] - if not images: - raise RuntimeError(f"Response: {data}") - images = images[0] if len(images) == 1 else images - return ImageResponse(images, prompt) diff --git a/g4f/Provider/Editee.py b/g4f/Provider/Editee.py deleted file mode 100644 index 8ac2324a..00000000 --- a/g4f/Provider/Editee.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class Editee(AsyncGeneratorProvider, ProviderModelMixin): - label = "Editee" - url = "https://editee.com" - api_endpoint = "https://editee.com/submit/chatgptfree" - working = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'claude' - models = ['claude', 'gpt4', 'gemini' 'mistrallarge'] - - model_aliases = { - "claude-3.5-sonnet": "claude", - "gpt-4o": "gpt4", - "gemini-pro": "gemini", - "mistral-large": "mistrallarge", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "Accept": "application/json, text/plain, */*", - "Accept-Language": "en-US,en;q=0.9", - "Cache-Control": "no-cache", - "Content-Type": "application/json", - "Origin": cls.url, - "Pragma": "no-cache", - "Priority": "u=1, i", - "Referer": f"{cls.url}/chat-gpt", - "Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"', - "Sec-CH-UA-Mobile": '?0', - "Sec-CH-UA-Platform": '"Linux"', - "Sec-Fetch-Dest": 'empty', - "Sec-Fetch-Mode": 'cors', - "Sec-Fetch-Site": 'same-origin', - "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', - "X-Requested-With": 'XMLHttpRequest', - } - - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "user_input": prompt, - "context": " ", - "template_id": "", - "selected_model": model - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - response_data = await response.json() - yield response_data['text'] diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py deleted file mode 100644 index 1a45997b..00000000 --- a/g4f/Provider/FlowGpt.py +++ /dev/null @@ -1,101 +0,0 @@ -from __future__ import annotations - -import json -import time -import hashlib -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_hex, get_random_string -from ..requests.raise_for_status import raise_for_status - -class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://flowgpt.com/chat" - working = False - supports_message_history = True - supports_system_message = True - default_model = "gpt-3.5-turbo" - models = [ - "gpt-3.5-turbo", - "gpt-3.5-long", - "gpt-4-turbo", - "google-gemini", - "claude-instant", - "claude-v1", - "claude-v2", - "llama2-13b", - "mythalion-13b", - "pygmalion-13b", - "chronos-hermes-13b", - "Mixtral-8x7B", - "Dolphin-2.6-8x7B", - ] - model_aliases = { - "gemini": "google-gemini", - "gemini-pro": "google-gemini" - } - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - temperature: float = 0.7, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - timestamp = str(int(time.time())) - auth = "Bearer null" - nonce = get_random_hex() - data = f"{timestamp}-{nonce}-{auth}" - signature = hashlib.md5(data.encode()).hexdigest() - - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0", - "Accept": "*/*", - "Accept-Language": "en-US;q=0.7,en;q=0.3", - "Accept-Encoding": "gzip, deflate, br", - "Referer": "https://flowgpt.com/", - "Content-Type": "application/json", - "Authorization": "Bearer null", - "Origin": "https://flowgpt.com", - "Connection": "keep-alive", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-site", - "TE": "trailers", - "Authorization": auth, - "x-flow-device-id": f"f-{get_random_string(19)}", - "x-nonce": nonce, - "x-signature": signature, - "x-timestamp": timestamp - } - async with ClientSession(headers=headers) as session: - history = [message for message in messages[:-1] if message["role"] != "system"] - system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"]) - if not system_message: - system_message = "You are helpful assistant. Follow the user's instructions carefully." - data = { - "model": model, - "nsfw": False, - "question": messages[-1]["content"], - "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history], - "system": system_message, - "temperature": temperature, - "promptId": f"model-{model}", - "documentIds": [], - "chatFileDocumentIds": [], - "generateImage": False, - "generateAudio": False - } - async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response: - await raise_for_status(response) - async for chunk in response.content: - if chunk.strip(): - message = json.loads(chunk) - if "event" not in message: - continue - if message["event"] == "text": - yield message["data"] diff --git a/g4f/Provider/Free2GPT.py b/g4f/Provider/Free2GPT.py index a79bd1da..6ba9ac0f 100644 --- a/g4f/Provider/Free2GPT.py +++ b/g4f/Provider/Free2GPT.py @@ -16,7 +16,7 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat10.free2gpt.xyz" working = True supports_message_history = True - default_model = 'llama-3.1-70b' + default_model = 'mistral-7b' @classmethod async def create_async_generator( @@ -49,12 +49,8 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): connector=get_connector(connector, proxy), headers=headers ) as session: timestamp = int(time.time() * 1e3) - system_message = { - "role": "system", - "content": "" - } data = { - "messages": [system_message] + messages, + "messages": messages, "time": timestamp, "pass": None, "sign": generate_signature(timestamp, messages[-1]["content"]), diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py deleted file mode 100644 index a9dc0f56..00000000 --- a/g4f/Provider/FreeChatgpt.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations -import json -from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://chat.chatgpt.org.uk" - api_endpoint = "/api/openai/v1/chat/completions" - working = True - default_model = '@cf/qwen/qwen1.5-14b-chat-awq' - models = [ - '@cf/qwen/qwen1.5-14b-chat-awq', - 'SparkDesk-v1.1', - 'Qwen2-7B-Instruct', - 'glm4-9B-chat', - 'chatglm3-6B', - 'Yi-1.5-9B-Chat', - ] - - model_aliases = { - "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq", - "sparkdesk-v1.1": "SparkDesk-v1.1", - "qwen-2-7b": "Qwen2-7B-Instruct", - "glm-4-9b": "glm4-9B-chat", - "glm-3-6b": "chatglm3-6B", - "yi-1.5-9b": "Yi-1.5-9B-Chat", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model.lower() in cls.model_aliases: - return cls.model_aliases[model.lower()] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "application/json, text/event-stream", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "dnt": "1", - "origin": cls.url, - "referer": f"{cls.url}/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "messages": [ - {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"}, - {"role": "user", "content": prompt} - ], - "stream": True, - "model": model, - "temperature": 0.5, - "presence_penalty": 0, - "frequency_penalty": 0, - "top_p": 1 - } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: - response.raise_for_status() - accumulated_text = "" - async for line in response.content: - if line: - line_str = line.decode().strip() - if line_str == "data: [DONE]": - yield accumulated_text - break - elif line_str.startswith("data: "): - try: - chunk = json.loads(line_str[6:]) - delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "") - accumulated_text += delta_content - yield delta_content # Yield each chunk of content - except json.JSONDecodeError: - pass diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 82a3824b..b38ff428 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -24,7 +24,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_message_history = True supports_system_message = True - default_model = 'llama-3.1-70b' + default_model = 'gemini-pro' @classmethod async def create_async_generator( diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/FreeNetfly.py deleted file mode 100644 index ada5d51a..00000000 --- a/g4f/Provider/FreeNetfly.py +++ /dev/null @@ -1,105 +0,0 @@ -from __future__ import annotations - -import json -import asyncio -from aiohttp import ClientSession, ClientTimeout, ClientError -from typing import AsyncGenerator - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin - - -class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://free.netfly.top" - api_endpoint = "/api/openai/v1/chat/completions" - working = True - default_model = 'gpt-3.5-turbo' - models = [ - 'gpt-3.5-turbo', - 'gpt-4', - ] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "accept": "application/json, text/event-stream", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "dnt": "1", - "origin": cls.url, - "referer": f"{cls.url}/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - } - data = { - "messages": messages, - "stream": True, - "model": model, - "temperature": 0.5, - "presence_penalty": 0, - "frequency_penalty": 0, - "top_p": 1 - } - - max_retries = 5 - retry_delay = 2 - - for attempt in range(max_retries): - try: - async with ClientSession(headers=headers) as session: - timeout = ClientTimeout(total=60) - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response: - response.raise_for_status() - async for chunk in cls._process_response(response): - yield chunk - return # If successful, exit the function - except (ClientError, asyncio.TimeoutError) as e: - if attempt == max_retries - 1: - raise # If all retries failed, raise the last exception - await asyncio.sleep(retry_delay) - retry_delay *= 2 # Exponential backoff - - @classmethod - async def _process_response(cls, response) -> AsyncGenerator[str, None]: - buffer = "" - async for line in response.content: - buffer += line.decode('utf-8') - if buffer.endswith('\n\n'): - for subline in buffer.strip().split('\n'): - if subline.startswith('data: '): - if subline == 'data: [DONE]': - return - try: - data = json.loads(subline[6:]) - content = data['choices'][0]['delta'].get('content') - if content: - yield content - except json.JSONDecodeError: - print(f"Failed to parse JSON: {subline}") - except KeyError: - print(f"Unexpected JSON structure: {data}") - buffer = "" - - # Process any remaining data in the buffer - if buffer: - for subline in buffer.strip().split('\n'): - if subline.startswith('data: ') and subline != 'data: [DONE]': - try: - data = json.loads(subline[6:]) - content = data['choices'][0]['delta'].get('content') - if content: - yield content - except (json.JSONDecodeError, KeyError): - pass - diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/GPROChat.py deleted file mode 100644 index a33c9571..00000000 --- a/g4f/Provider/GPROChat.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import annotations -import hashlib -import time -from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - -class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): - label = "GPROChat" - url = "https://gprochat.com" - api_endpoint = "https://gprochat.com/api/generate" - working = True - supports_stream = True - supports_message_history = True - default_model = 'gemini-pro' - - @staticmethod - def generate_signature(timestamp: int, message: str) -> str: - secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8" - hash_input = f"{timestamp}:{message}:{secret_key}" - signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest() - return signature - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - timestamp = int(time.time() * 1000) - prompt = format_prompt(messages) - sign = cls.generate_signature(timestamp, prompt) - - headers = { - "accept": "*/*", - "origin": cls.url, - "referer": f"{cls.url}/", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", - "content-type": "text/plain;charset=UTF-8" - } - - data = { - "messages": [{"role": "user", "parts": [{"text": prompt}]}], - "time": timestamp, - "pass": None, - "sign": sign - } - - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content.iter_any(): - if chunk: - yield chunk.decode() diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py deleted file mode 100644 index 586e5f5f..00000000 --- a/g4f/Provider/HuggingFace.py +++ /dev/null @@ -1,104 +0,0 @@ -from __future__ import annotations - -import json -from aiohttp import ClientSession, BaseConnector - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_connector -from ..errors import RateLimitError, ModelNotFoundError -from ..requests.raise_for_status import raise_for_status - -from .HuggingChat import HuggingChat - -class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://huggingface.co/chat" - working = True - needs_auth = True - supports_message_history = True - default_model = HuggingChat.default_model - models = HuggingChat.models - model_aliases = HuggingChat.model_aliases - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - stream: bool = True, - proxy: str = None, - connector: BaseConnector = None, - api_base: str = "https://api-inference.huggingface.co", - api_key: str = None, - max_new_tokens: int = 1024, - temperature: float = 0.7, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - headers = { - 'accept': '*/*', - 'accept-language': 'en', - 'cache-control': 'no-cache', - 'origin': 'https://huggingface.co', - 'pragma': 'no-cache', - 'priority': 'u=1, i', - 'referer': 'https://huggingface.co/chat/', - 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', - } - if api_key is not None: - headers["Authorization"] = f"Bearer {api_key}" - - params = { - "return_full_text": False, - "max_new_tokens": max_new_tokens, - "temperature": temperature, - **kwargs - } - payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream} - - async with ClientSession( - headers=headers, - connector=get_connector(connector, proxy) - ) as session: - async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response: - if response.status == 404: - raise ModelNotFoundError(f"Model is not supported: {model}") - await raise_for_status(response) - if stream: - first = True - async for line in response.content: - if line.startswith(b"data:"): - data = json.loads(line[5:]) - if not data["token"]["special"]: - chunk = data["token"]["text"] - if first: - first = False - chunk = chunk.lstrip() - yield chunk - else: - yield (await response.json())[0]["generated_text"].strip() - -def format_prompt(messages: Messages) -> str: - system_messages = [message["content"] for message in messages if message["role"] == "system"] - question = " ".join([messages[-1]["content"], *system_messages]) - history = "".join([ - f"[INST]{messages[idx-1]['content']} [/INST] {message['content']}" - for idx, message in enumerate(messages) - if message["role"] == "assistant" - ]) - return f"{history}[INST] {question} [/INST]" diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py deleted file mode 100644 index 0dd76b71..00000000 --- a/g4f/Provider/Koala.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import annotations - -import json -from typing import AsyncGenerator, Optional, List, Dict, Union, Any -from aiohttp import ClientSession, BaseConnector, ClientResponse - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_string, get_connector -from ..requests import raise_for_status - -class Koala(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://koala.sh/chat" - api_endpoint = "https://koala.sh/api/gpt/" - working = True - supports_message_history = True - default_model = 'gpt-4o-mini' - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: Optional[str] = None, - connector: Optional[BaseConnector] = None, - **kwargs: Any - ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]: - if not model: - model = "gpt-4o-mini" - - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0", - "Accept": "text/event-stream", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Accept-Encoding": "gzip, deflate, br", - "Referer": f"{cls.url}", - "Flag-Real-Time-Data": "false", - "Visitor-ID": get_random_string(20), - "Origin": "https://koala.sh", - "Alt-Used": "koala.sh", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "TE": "trailers", - } - - async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session: - input_text = messages[-1]["content"] - system_messages = " ".join( - message["content"] for message in messages if message["role"] == "system" - ) - if system_messages: - input_text += f" {system_messages}" - - data = { - "input": input_text, - "inputHistory": [ - message["content"] - for message in messages[:-1] - if message["role"] == "user" - ], - "outputHistory": [ - message["content"] - for message in messages - if message["role"] == "assistant" - ], - "model": model, - } - - async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response: - await raise_for_status(response) - async for chunk in cls._parse_event_stream(response): - yield chunk - - @staticmethod - async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]: - async for chunk in response.content: - if chunk.startswith(b"data: "): - yield json.loads(chunk[6:]) diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 56f765de..addd3ed7 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -63,6 +63,15 @@ models = { "tokenLimit": 126000, "context": "128K", }, + "grok-beta": { + "id": "grok-beta", + "name": "Grok-Beta", + "model": "Grok", + "provider": "x.ai", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "100K", + }, "grok-2": { "id": "grok-2", "name": "Grok-2", @@ -99,18 +108,18 @@ models = { "tokenLimit": 200000, "context": "200K", }, - "claude-3-opus-20240229-gcp": { - "id": "claude-3-opus-20240229-gcp", - "name": "Claude-3-Opus-Gcp", + "claude-3-5-sonnet-20240620": { + "id": "claude-3-5-sonnet-20240620", + "name": "Claude-3.5-Sonnet", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, "tokenLimit": 200000, "context": "200K", }, - "claude-3-5-sonnet-20240620": { - "id": "claude-3-5-sonnet-20240620", - "name": "Claude-3.5-Sonnet", + "claude-3-5-sonnet-20241022": { + "id": "claude-3-5-sonnet-20241022", + "name": "Claude-3.5-Sonnet-V2", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, @@ -183,9 +192,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): "claude-3-opus": "claude-3-opus-20240229", "claude-3-opus": "claude-3-opus-20240229-aws", - "claude-3-opus": "claude-3-opus-20240229-gcp", "claude-3-sonnet": "claude-3-sonnet-20240229", "claude-3.5-sonnet": "claude-3-5-sonnet-20240620", + "claude-3.5-sonnet": "claude-3-5-sonnet-20241022", "claude-3-haiku": "claude-3-haiku-20240307", "claude-2.1": "claude-2.1", diff --git a/g4f/Provider/Local.py b/g4f/Provider/Local.py deleted file mode 100644 index 471231c6..00000000 --- a/g4f/Provider/Local.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import annotations - -from ..locals.models import get_models -try: - from ..locals.provider import LocalProvider - has_requirements = True -except ImportError: - has_requirements = False - -from ..typing import Messages, CreateResult -from ..providers.base_provider import AbstractProvider, ProviderModelMixin -from ..errors import MissingRequirementsError - -class Local(AbstractProvider, ProviderModelMixin): - label = "GPT4All" - working = True - supports_message_history = True - supports_system_message = True - supports_stream = True - - @classmethod - def get_models(cls): - if not cls.models: - cls.models = list(get_models()) - cls.default_model = cls.models[0] - return cls.models - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - **kwargs - ) -> CreateResult: - if not has_requirements: - raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]') - return LocalProvider.create_completion( - cls.get_model(model), - messages, - stream, - **kwargs - ) \ No newline at end of file diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/MetaAI.py deleted file mode 100644 index 218b7ebb..00000000 --- a/g4f/Provider/MetaAI.py +++ /dev/null @@ -1,238 +0,0 @@ -from __future__ import annotations - -import json -import uuid -import random -import time -from typing import Dict, List - -from aiohttp import ClientSession, BaseConnector - -from ..typing import AsyncResult, Messages, Cookies -from ..requests import raise_for_status, DEFAULT_HEADERS -from ..image import ImageResponse, ImagePreview -from ..errors import ResponseError -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, get_connector, format_cookies - -class Sources(): - def __init__(self, link_list: List[Dict[str, str]]) -> None: - self.list = link_list - - def __str__(self) -> str: - return "\n\n" + ("\n".join([f"[{link['title']}]({link['link']})" for link in self.list])) - -class AbraGeoBlockedError(Exception): - pass - -class MetaAI(AsyncGeneratorProvider, ProviderModelMixin): - label = "Meta AI" - url = "https://www.meta.ai" - working = True - default_model = '' - - def __init__(self, proxy: str = None, connector: BaseConnector = None): - self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS) - self.cookies: Cookies = None - self.access_token: str = None - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - async for chunk in cls(proxy).prompt(format_prompt(messages)): - yield chunk - - async def update_access_token(self, birthday: str = "1999-01-01"): - url = "https://www.meta.ai/api/graphql/" - payload = { - "lsd": self.lsd, - "fb_api_caller_class": "RelayModern", - "fb_api_req_friendly_name": "useAbraAcceptTOSForTempUserMutation", - "variables": json.dumps({ - "dob": birthday, - "icebreaker_type": "TEXT", - "__relay_internal__pv__WebPixelRatiorelayprovider": 1, - }), - "doc_id": "7604648749596940", - } - headers = { - "x-fb-friendly-name": "useAbraAcceptTOSForTempUserMutation", - "x-fb-lsd": self.lsd, - "x-asbd-id": "129477", - "alt-used": "www.meta.ai", - "sec-fetch-site": "same-origin" - } - async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response: - await raise_for_status(response, "Fetch access_token failed") - auth_json = await response.json(content_type=None) - self.access_token = auth_json["data"]["xab_abra_accept_terms_of_service"]["new_temp_user_auth"]["access_token"] - - async def prompt(self, message: str, cookies: Cookies = None) -> AsyncResult: - if self.cookies is None: - await self.update_cookies(cookies) - if cookies is not None: - self.access_token = None - if self.access_token is None and cookies is None: - await self.update_access_token() - - if self.access_token is None: - url = "https://www.meta.ai/api/graphql/" - payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg} - headers = {'x-fb-lsd': self.lsd} - else: - url = "https://graph.meta.ai/graphql?locale=user" - payload = {"access_token": self.access_token} - headers = {} - headers = { - 'content-type': 'application/x-www-form-urlencoded', - 'cookie': format_cookies(self.cookies), - 'origin': 'https://www.meta.ai', - 'referer': 'https://www.meta.ai/', - 'x-asbd-id': '129477', - 'x-fb-friendly-name': 'useAbraSendMessageMutation', - **headers - } - payload = { - **payload, - 'fb_api_caller_class': 'RelayModern', - 'fb_api_req_friendly_name': 'useAbraSendMessageMutation', - "variables": json.dumps({ - "message": {"sensitive_string_value": message}, - "externalConversationId": str(uuid.uuid4()), - "offlineThreadingId": generate_offline_threading_id(), - "suggestedPromptIndex": None, - "flashVideoRecapInput": {"images": []}, - "flashPreviewInput": None, - "promptPrefix": None, - "entrypoint": "ABRA__CHAT__TEXT", - "icebreaker_type": "TEXT", - "__relay_internal__pv__AbraDebugDevOnlyrelayprovider": False, - "__relay_internal__pv__WebPixelRatiorelayprovider": 1, - }), - 'server_timestamps': 'true', - 'doc_id': '7783822248314888' - } - async with self.session.post(url, headers=headers, data=payload) as response: - await raise_for_status(response, "Fetch response failed") - last_snippet_len = 0 - fetch_id = None - async for line in response.content: - if b"

Something Went Wrong

" in line: - raise ResponseError("Response: Something Went Wrong") - try: - json_line = json.loads(line) - except json.JSONDecodeError: - continue - bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {}) - streaming_state = bot_response_message.get("streaming_state") - fetch_id = bot_response_message.get("fetch_id") or fetch_id - if streaming_state in ("STREAMING", "OVERALL_DONE"): - imagine_card = bot_response_message.get("imagine_card") - if imagine_card is not None: - imagine_session = imagine_card.get("session") - if imagine_session is not None: - imagine_medias = imagine_session.get("media_sets", {}).pop().get("imagine_media") - if imagine_medias is not None: - image_class = ImageResponse if streaming_state == "OVERALL_DONE" else ImagePreview - yield image_class([media["uri"] for media in imagine_medias], imagine_medias[0]["prompt"]) - snippet = bot_response_message["snippet"] - new_snippet_len = len(snippet) - if new_snippet_len > last_snippet_len: - yield snippet[last_snippet_len:] - last_snippet_len = new_snippet_len - #if last_streamed_response is None: - # if attempts > 3: - # raise Exception("MetaAI is having issues and was not able to respond (Server Error)") - # access_token = await self.get_access_token() - # return await self.prompt(message=message, attempts=attempts + 1) - if fetch_id is not None: - sources = await self.fetch_sources(fetch_id) - if sources is not None: - yield sources - - async def update_cookies(self, cookies: Cookies = None): - async with self.session.get("https://www.meta.ai/", cookies=cookies) as response: - await raise_for_status(response, "Fetch home failed") - text = await response.text() - if "AbraGeoBlockedError" in text: - raise AbraGeoBlockedError("Meta AI isn't available yet in your country") - if cookies is None: - cookies = { - "_js_datr": self.extract_value(text, "_js_datr"), - "abra_csrf": self.extract_value(text, "abra_csrf"), - "datr": self.extract_value(text, "datr"), - } - self.lsd = self.extract_value(text, start_str='"LSD",[],{"token":"', end_str='"}') - self.dtsg = self.extract_value(text, start_str='"DTSGInitialData",[],{"token":"', end_str='"}') - self.cookies = cookies - - async def fetch_sources(self, fetch_id: str) -> Sources: - if self.access_token is None: - url = "https://www.meta.ai/api/graphql/" - payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg} - headers = {'x-fb-lsd': self.lsd} - else: - url = "https://graph.meta.ai/graphql?locale=user" - payload = {"access_token": self.access_token} - headers = {} - payload = { - **payload, - "fb_api_caller_class": "RelayModern", - "fb_api_req_friendly_name": "AbraSearchPluginDialogQuery", - "variables": json.dumps({"abraMessageFetchID": fetch_id}), - "server_timestamps": "true", - "doc_id": "6946734308765963", - } - headers = { - "authority": "graph.meta.ai", - "x-fb-friendly-name": "AbraSearchPluginDialogQuery", - **headers - } - async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response: - await raise_for_status(response, "Fetch sources failed") - text = await response.text() - if "

Something Went Wrong

" in text: - raise ResponseError("Response: Something Went Wrong") - try: - response_json = json.loads(text) - message = response_json["data"]["message"] - if message is not None: - searchResults = message["searchResults"] - if searchResults is not None: - return Sources(searchResults["references"]) - except (KeyError, TypeError, json.JSONDecodeError): - raise RuntimeError(f"Response: {text}") - - @staticmethod - def extract_value(text: str, key: str = None, start_str = None, end_str = '",') -> str: - if start_str is None: - start_str = f'{key}":{{"value":"' - start = text.find(start_str) - if start >= 0: - start+= len(start_str) - end = text.find(end_str, start) - if end >= 0: - return text[start:end] - -def generate_offline_threading_id() -> str: - """ - Generates an offline threading ID. - - Returns: - str: The generated offline threading ID. - """ - # Generate a random 64-bit integer - random_value = random.getrandbits(64) - - # Get the current timestamp in milliseconds - timestamp = int(time.time() * 1000) - - # Combine timestamp and random value - threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1)) - - return str(threading_id) diff --git a/g4f/Provider/MetaAIAccount.py b/g4f/Provider/MetaAIAccount.py deleted file mode 100644 index 369b3f2f..00000000 --- a/g4f/Provider/MetaAIAccount.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import annotations - -from ..typing import AsyncResult, Messages, Cookies -from .helper import format_prompt, get_cookies -from .MetaAI import MetaAI - -class MetaAIAccount(MetaAI): - needs_auth = True - parent = "MetaAI" - image_models = ["meta"] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - cookies: Cookies = None, - **kwargs - ) -> AsyncResult: - cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies - async for chunk in cls(proxy).prompt(format_prompt(messages), cookies): - yield chunk \ No newline at end of file diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/Ollama.py deleted file mode 100644 index f9116541..00000000 --- a/g4f/Provider/Ollama.py +++ /dev/null @@ -1,40 +0,0 @@ -from __future__ import annotations - -import requests -import os - -from .needs_auth.Openai import Openai -from ..typing import AsyncResult, Messages - -class Ollama(Openai): - label = "Ollama" - url = "https://ollama.com" - needs_auth = False - working = True - - @classmethod - def get_models(cls): - if not cls.models: - host = os.getenv("OLLAMA_HOST", "127.0.0.1") - port = os.getenv("OLLAMA_PORT", "11434") - url = f"http://{host}:{port}/api/tags" - models = requests.get(url).json()["models"] - cls.models = [model["name"] for model in models] - cls.default_model = cls.models[0] - return cls.models - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - api_base: str = None, - **kwargs - ) -> AsyncResult: - if not api_base: - host = os.getenv("OLLAMA_HOST", "localhost") - port = os.getenv("OLLAMA_PORT", "11434") - api_base: str = f"http://{host}:{port}/v1" - return super().create_async_generator( - model, messages, api_base=api_base, **kwargs - ) \ No newline at end of file diff --git a/g4f/Provider/Replicate.py b/g4f/Provider/Replicate.py deleted file mode 100644 index 7ff8ad65..00000000 --- a/g4f/Provider/Replicate.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, filter_none -from ..typing import AsyncResult, Messages -from ..requests import raise_for_status -from ..requests.aiohttp import StreamSession -from ..errors import ResponseError, MissingAuthError - -class Replicate(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://replicate.com" - working = True - needs_auth = True - default_model = "meta/meta-llama-3-70b-instruct" - model_aliases = { - "meta-llama/Meta-Llama-3-70B-Instruct": default_model - } - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - api_key: str = None, - proxy: str = None, - timeout: int = 180, - system_prompt: str = None, - max_new_tokens: int = None, - temperature: float = None, - top_p: float = None, - top_k: float = None, - stop: list = None, - extra_data: dict = {}, - headers: dict = { - "accept": "application/json", - }, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - if cls.needs_auth and api_key is None: - raise MissingAuthError("api_key is missing") - if api_key is not None: - headers["Authorization"] = f"Bearer {api_key}" - api_base = "https://api.replicate.com/v1/models/" - else: - api_base = "https://replicate.com/api/models/" - async with StreamSession( - proxy=proxy, - headers=headers, - timeout=timeout - ) as session: - data = { - "stream": True, - "input": { - "prompt": format_prompt(messages), - **filter_none( - system_prompt=system_prompt, - max_new_tokens=max_new_tokens, - temperature=temperature, - top_p=top_p, - top_k=top_k, - stop_sequences=",".join(stop) if stop else None - ), - **extra_data - }, - } - url = f"{api_base.rstrip('/')}/{model}/predictions" - async with session.post(url, json=data) as response: - message = "Model not found" if response.status == 404 else None - await raise_for_status(response, message) - result = await response.json() - if "id" not in result: - raise ResponseError(f"Invalid response: {result}") - async with session.get(result["urls"]["stream"], headers={"Accept": "text/event-stream"}) as response: - await raise_for_status(response) - event = None - async for line in response.iter_lines(): - if line.startswith(b"event: "): - event = line[7:] - if event == b"done": - break - elif event == b"output": - if line.startswith(b"data: "): - new_text = line[6:].decode() - if new_text: - yield new_text - else: - yield "\n" \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 55fabd25..f297f4dc 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -8,59 +8,40 @@ from ..providers.create_images import CreateImagesProvider from .deprecated import * from .selenium import * from .needs_auth import * +from .not_working import * +from .local import * -from .gigachat import * from .nexra import * -from .Ai4Chat import Ai4Chat from .AI365VIP import AI365VIP from .AIChatFree import AIChatFree from .AIUncensored import AIUncensored from .Allyfy import Allyfy -from .AmigoChat import AmigoChat -from .AiChatOnline import AiChatOnline -from .AiChats import AiChats from .AiMathGPT import AiMathGPT from .Airforce import Airforce -from .Aura import Aura from .Bing import Bing from .BingCreateImages import BingCreateImages from .Blackbox import Blackbox from .ChatGpt import ChatGpt from .Chatgpt4Online import Chatgpt4Online -from .Chatgpt4o import Chatgpt4o from .ChatGptEs import ChatGptEs -from .ChatgptFree import ChatgptFree from .ChatifyAI import ChatifyAI from .Cloudflare import Cloudflare from .DarkAI import DarkAI from .DDG import DDG from .DeepInfraChat import DeepInfraChat -from .DeepInfraImage import DeepInfraImage -from .Editee import Editee -from .FlowGpt import FlowGpt from .Free2GPT import Free2GPT -from .FreeChatgpt import FreeChatgpt from .FreeGpt import FreeGpt -from .FreeNetfly import FreeNetfly from .GeminiPro import GeminiPro from .GizAI import GizAI -from .GPROChat import GPROChat from .HuggingChat import HuggingChat -from .HuggingFace import HuggingFace -from .Koala import Koala from .Liaobots import Liaobots -from .Local import Local from .MagickPen import MagickPen -from .MetaAI import MetaAI -#from .MetaAIAccount import MetaAIAccount -from .Ollama import Ollama from .PerplexityLabs import PerplexityLabs from .Pi import Pi from .Pizzagpt import Pizzagpt from .Prodia import Prodia from .Reka import Reka -from .Replicate import Replicate from .ReplicateHome import ReplicateHome from .RubiksAI import RubiksAI from .TeachAnything import TeachAnything diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index bf923f2a..368a71a0 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -25,11 +25,10 @@ from .Aichat import Aichat from .Berlin import Berlin from .Phind import Phind from .AiAsk import AiAsk -from ..AiChatOnline import AiChatOnline from .ChatAnywhere import ChatAnywhere from .FakeGpt import FakeGpt from .GeekGpt import GeekGpt from .GPTalk import GPTalk from .Hashnode import Hashnode from .Ylokh import Ylokh -from .OpenAssistant import OpenAssistant \ No newline at end of file +from .OpenAssistant import OpenAssistant diff --git a/g4f/Provider/gigachat/GigaChat.py b/g4f/Provider/gigachat/GigaChat.py deleted file mode 100644 index b1b293e3..00000000 --- a/g4f/Provider/gigachat/GigaChat.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import annotations - -import os -import ssl -import time -import uuid - -import json -from aiohttp import ClientSession, TCPConnector, BaseConnector -from g4f.requests import raise_for_status - -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ...errors import MissingAuthError -from ..helper import get_connector - -access_token = "" -token_expires_at = 0 - -class GigaChat(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://developers.sber.ru/gigachat" - working = True - supports_message_history = True - supports_system_message = True - supports_stream = True - needs_auth = True - default_model = "GigaChat:latest" - models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - stream: bool = True, - proxy: str = None, - api_key: str = None, - connector: BaseConnector = None, - scope: str = "GIGACHAT_API_PERS", - update_interval: float = 0, - **kwargs - ) -> AsyncResult: - global access_token, token_expires_at - model = cls.get_model(model) - if not api_key: - raise MissingAuthError('Missing "api_key"') - - cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt") - ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None - if connector is None and ssl_context is not None: - connector = TCPConnector(ssl_context=ssl_context) - async with ClientSession(connector=get_connector(connector, proxy)) as session: - if token_expires_at - int(time.time() * 1000) < 60000: - async with session.post(url="https://ngw.devices.sberbank.ru:9443/api/v2/oauth", - headers={"Authorization": f"Bearer {api_key}", - "RqUID": str(uuid.uuid4()), - "Content-Type": "application/x-www-form-urlencoded"}, - data={"scope": scope}) as response: - await raise_for_status(response) - data = await response.json() - access_token = data['access_token'] - token_expires_at = data['expires_at'] - - async with session.post(url="https://gigachat.devices.sberbank.ru/api/v1/chat/completions", - headers={"Authorization": f"Bearer {access_token}"}, - json={ - "model": model, - "messages": messages, - "stream": stream, - "update_interval": update_interval, - **kwargs - }) as response: - await raise_for_status(response) - - async for line in response.content: - if not stream: - yield json.loads(line.decode("utf-8"))['choices'][0]['message']['content'] - return - - if line and line.startswith(b"data:"): - line = line[6:-1] # remove "data: " prefix and "\n" suffix - if line.strip() == b"[DONE]": - return - else: - msg = json.loads(line.decode("utf-8"))['choices'][0] - content = msg['delta']['content'] - - if content: - yield content - - if 'finish_reason' in msg: - return diff --git a/g4f/Provider/gigachat/__init__.py b/g4f/Provider/gigachat/__init__.py deleted file mode 100644 index c9853742..00000000 --- a/g4f/Provider/gigachat/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .GigaChat import GigaChat - diff --git a/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt deleted file mode 100644 index 4c143a21..00000000 --- a/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt +++ /dev/null @@ -1,33 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx -PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu -ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg -Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS -VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg -YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v -dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n -qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q -XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U -zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX -YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y -Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD -U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD -4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9 -G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH -BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX -ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa -OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf -BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS -BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF -AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH -tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq -W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+ -/3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS -AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj -C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV -4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d -WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ -D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC -EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq -391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4= ------END CERTIFICATE----- \ No newline at end of file diff --git a/g4f/Provider/local/Local.py b/g4f/Provider/local/Local.py new file mode 100644 index 00000000..4dc6e3f9 --- /dev/null +++ b/g4f/Provider/local/Local.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from ...locals.models import get_models +try: + from ...locals.provider import LocalProvider + has_requirements = True +except ImportError: + has_requirements = False + +from ...typing import Messages, CreateResult +from ...providers.base_provider import AbstractProvider, ProviderModelMixin +from ...errors import MissingRequirementsError + +class Local(AbstractProvider, ProviderModelMixin): + label = "GPT4All" + working = True + supports_message_history = True + supports_system_message = True + supports_stream = True + + @classmethod + def get_models(cls): + if not cls.models: + cls.models = list(get_models()) + cls.default_model = cls.models[0] + return cls.models + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + **kwargs + ) -> CreateResult: + if not has_requirements: + raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]') + return LocalProvider.create_completion( + cls.get_model(model), + messages, + stream, + **kwargs + ) diff --git a/g4f/Provider/local/Ollama.py b/g4f/Provider/local/Ollama.py new file mode 100644 index 00000000..c503a46a --- /dev/null +++ b/g4f/Provider/local/Ollama.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import requests +import os + +from ..needs_auth.Openai import Openai +from ...typing import AsyncResult, Messages + +class Ollama(Openai): + label = "Ollama" + url = "https://ollama.com" + needs_auth = False + working = True + + @classmethod + def get_models(cls): + if not cls.models: + host = os.getenv("OLLAMA_HOST", "127.0.0.1") + port = os.getenv("OLLAMA_PORT", "11434") + url = f"http://{host}:{port}/api/tags" + models = requests.get(url).json()["models"] + cls.models = [model["name"] for model in models] + cls.default_model = cls.models[0] + return cls.models + + @classmethod + def create_async_generator( + cls, + model: str, + messages: Messages, + api_base: str = None, + **kwargs + ) -> AsyncResult: + if not api_base: + host = os.getenv("OLLAMA_HOST", "localhost") + port = os.getenv("OLLAMA_PORT", "11434") + api_base: str = f"http://{host}:{port}/v1" + return super().create_async_generator( + model, messages, api_base=api_base, **kwargs + ) diff --git a/g4f/Provider/local/__init__.py b/g4f/Provider/local/__init__.py new file mode 100644 index 00000000..05f6022e --- /dev/null +++ b/g4f/Provider/local/__init__.py @@ -0,0 +1,2 @@ +from .Local import Local +from .Ollama import Ollama diff --git a/g4f/Provider/needs_auth/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py new file mode 100644 index 00000000..2310c1c8 --- /dev/null +++ b/g4f/Provider/needs_auth/DeepInfraImage.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +import requests + +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ...requests import StreamSession, raise_for_status +from ...image import ImageResponse + +class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://deepinfra.com" + parent = "DeepInfra" + working = True + needs_auth = True + default_model = '' + image_models = [default_model] + + @classmethod + def get_models(cls): + if not cls.models: + url = 'https://api.deepinfra.com/models/featured' + models = requests.get(url).json() + cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"] + cls.image_models = cls.models + return cls.models + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + **kwargs + ) -> AsyncResult: + yield await cls.create_async(messages[-1]["content"], model, **kwargs) + + @classmethod + async def create_async( + cls, + prompt: str, + model: str, + api_key: str = None, + api_base: str = "https://api.deepinfra.com/v1/inference", + proxy: str = None, + timeout: int = 180, + extra_data: dict = {}, + **kwargs + ) -> ImageResponse: + headers = { + 'Accept-Encoding': 'gzip, deflate, br', + 'Accept-Language': 'en-US', + 'Connection': 'keep-alive', + 'Origin': 'https://deepinfra.com', + 'Referer': 'https://deepinfra.com/', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-site', + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', + 'X-Deepinfra-Source': 'web-embed', + 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + } + if api_key is not None: + headers["Authorization"] = f"Bearer {api_key}" + async with StreamSession( + proxies={"all": proxy}, + headers=headers, + timeout=timeout + ) as session: + model = cls.get_model(model) + data = {"prompt": prompt, **extra_data} + data = {"input": data} if model == cls.default_model else data + async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response: + await raise_for_status(response) + data = await response.json() + images = data["output"] if "output" in data else data["images"] + if not images: + raise RuntimeError(f"Response: {data}") + images = images[0] if len(images) == 1 else images + return ImageResponse(images, prompt) diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py new file mode 100644 index 00000000..ecc75d1c --- /dev/null +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession, BaseConnector + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_connector +from ...errors import RateLimitError, ModelNotFoundError +from ...requests.raise_for_status import raise_for_status + +from ..HuggingChat import HuggingChat + +class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://huggingface.co/chat" + working = True + needs_auth = True + supports_message_history = True + default_model = HuggingChat.default_model + models = HuggingChat.models + model_aliases = HuggingChat.model_aliases + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = True, + proxy: str = None, + connector: BaseConnector = None, + api_base: str = "https://api-inference.huggingface.co", + api_key: str = None, + max_new_tokens: int = 1024, + temperature: float = 0.7, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + headers = { + 'accept': '*/*', + 'accept-language': 'en', + 'cache-control': 'no-cache', + 'origin': 'https://huggingface.co', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://huggingface.co/chat/', + 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', + } + if api_key is not None: + headers["Authorization"] = f"Bearer {api_key}" + + params = { + "return_full_text": False, + "max_new_tokens": max_new_tokens, + "temperature": temperature, + **kwargs + } + payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream} + + async with ClientSession( + headers=headers, + connector=get_connector(connector, proxy) + ) as session: + async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response: + if response.status == 404: + raise ModelNotFoundError(f"Model is not supported: {model}") + await raise_for_status(response) + if stream: + first = True + async for line in response.content: + if line.startswith(b"data:"): + data = json.loads(line[5:]) + if not data["token"]["special"]: + chunk = data["token"]["text"] + if first: + first = False + chunk = chunk.lstrip() + yield chunk + else: + yield (await response.json())[0]["generated_text"].strip() + +def format_prompt(messages: Messages) -> str: + system_messages = [message["content"] for message in messages if message["role"] == "system"] + question = " ".join([messages[-1]["content"], *system_messages]) + history = "".join([ + f"[INST]{messages[idx-1]['content']} [/INST] {message['content']}" + for idx, message in enumerate(messages) + if message["role"] == "assistant" + ]) + return f"{history}[INST] {question} [/INST]" diff --git a/g4f/Provider/needs_auth/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py new file mode 100644 index 00000000..4b730abd --- /dev/null +++ b/g4f/Provider/needs_auth/MetaAI.py @@ -0,0 +1,238 @@ +from __future__ import annotations + +import json +import uuid +import random +import time +from typing import Dict, List + +from aiohttp import ClientSession, BaseConnector + +from ...typing import AsyncResult, Messages, Cookies +from ...requests import raise_for_status, DEFAULT_HEADERS +from ...image import ImageResponse, ImagePreview +from ...errors import ResponseError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, get_connector, format_cookies + +class Sources(): + def __init__(self, link_list: List[Dict[str, str]]) -> None: + self.list = link_list + + def __str__(self) -> str: + return "\n\n" + ("\n".join([f"[{link['title']}]({link['link']})" for link in self.list])) + +class AbraGeoBlockedError(Exception): + pass + +class MetaAI(AsyncGeneratorProvider, ProviderModelMixin): + label = "Meta AI" + url = "https://www.meta.ai" + working = True + default_model = '' + + def __init__(self, proxy: str = None, connector: BaseConnector = None): + self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS) + self.cookies: Cookies = None + self.access_token: str = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + async for chunk in cls(proxy).prompt(format_prompt(messages)): + yield chunk + + async def update_access_token(self, birthday: str = "1999-01-01"): + url = "https://www.meta.ai/api/graphql/" + payload = { + "lsd": self.lsd, + "fb_api_caller_class": "RelayModern", + "fb_api_req_friendly_name": "useAbraAcceptTOSForTempUserMutation", + "variables": json.dumps({ + "dob": birthday, + "icebreaker_type": "TEXT", + "__relay_internal__pv__WebPixelRatiorelayprovider": 1, + }), + "doc_id": "7604648749596940", + } + headers = { + "x-fb-friendly-name": "useAbraAcceptTOSForTempUserMutation", + "x-fb-lsd": self.lsd, + "x-asbd-id": "129477", + "alt-used": "www.meta.ai", + "sec-fetch-site": "same-origin" + } + async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response: + await raise_for_status(response, "Fetch access_token failed") + auth_json = await response.json(content_type=None) + self.access_token = auth_json["data"]["xab_abra_accept_terms_of_service"]["new_temp_user_auth"]["access_token"] + + async def prompt(self, message: str, cookies: Cookies = None) -> AsyncResult: + if self.cookies is None: + await self.update_cookies(cookies) + if cookies is not None: + self.access_token = None + if self.access_token is None and cookies is None: + await self.update_access_token() + + if self.access_token is None: + url = "https://www.meta.ai/api/graphql/" + payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg} + headers = {'x-fb-lsd': self.lsd} + else: + url = "https://graph.meta.ai/graphql?locale=user" + payload = {"access_token": self.access_token} + headers = {} + headers = { + 'content-type': 'application/x-www-form-urlencoded', + 'cookie': format_cookies(self.cookies), + 'origin': 'https://www.meta.ai', + 'referer': 'https://www.meta.ai/', + 'x-asbd-id': '129477', + 'x-fb-friendly-name': 'useAbraSendMessageMutation', + **headers + } + payload = { + **payload, + 'fb_api_caller_class': 'RelayModern', + 'fb_api_req_friendly_name': 'useAbraSendMessageMutation', + "variables": json.dumps({ + "message": {"sensitive_string_value": message}, + "externalConversationId": str(uuid.uuid4()), + "offlineThreadingId": generate_offline_threading_id(), + "suggestedPromptIndex": None, + "flashVideoRecapInput": {"images": []}, + "flashPreviewInput": None, + "promptPrefix": None, + "entrypoint": "ABRA__CHAT__TEXT", + "icebreaker_type": "TEXT", + "__relay_internal__pv__AbraDebugDevOnlyrelayprovider": False, + "__relay_internal__pv__WebPixelRatiorelayprovider": 1, + }), + 'server_timestamps': 'true', + 'doc_id': '7783822248314888' + } + async with self.session.post(url, headers=headers, data=payload) as response: + await raise_for_status(response, "Fetch response failed") + last_snippet_len = 0 + fetch_id = None + async for line in response.content: + if b"

Something Went Wrong

" in line: + raise ResponseError("Response: Something Went Wrong") + try: + json_line = json.loads(line) + except json.JSONDecodeError: + continue + bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {}) + streaming_state = bot_response_message.get("streaming_state") + fetch_id = bot_response_message.get("fetch_id") or fetch_id + if streaming_state in ("STREAMING", "OVERALL_DONE"): + imagine_card = bot_response_message.get("imagine_card") + if imagine_card is not None: + imagine_session = imagine_card.get("session") + if imagine_session is not None: + imagine_medias = imagine_session.get("media_sets", {}).pop().get("imagine_media") + if imagine_medias is not None: + image_class = ImageResponse if streaming_state == "OVERALL_DONE" else ImagePreview + yield image_class([media["uri"] for media in imagine_medias], imagine_medias[0]["prompt"]) + snippet = bot_response_message["snippet"] + new_snippet_len = len(snippet) + if new_snippet_len > last_snippet_len: + yield snippet[last_snippet_len:] + last_snippet_len = new_snippet_len + #if last_streamed_response is None: + # if attempts > 3: + # raise Exception("MetaAI is having issues and was not able to respond (Server Error)") + # access_token = await self.get_access_token() + # return await self.prompt(message=message, attempts=attempts + 1) + if fetch_id is not None: + sources = await self.fetch_sources(fetch_id) + if sources is not None: + yield sources + + async def update_cookies(self, cookies: Cookies = None): + async with self.session.get("https://www.meta.ai/", cookies=cookies) as response: + await raise_for_status(response, "Fetch home failed") + text = await response.text() + if "AbraGeoBlockedError" in text: + raise AbraGeoBlockedError("Meta AI isn't available yet in your country") + if cookies is None: + cookies = { + "_js_datr": self.extract_value(text, "_js_datr"), + "abra_csrf": self.extract_value(text, "abra_csrf"), + "datr": self.extract_value(text, "datr"), + } + self.lsd = self.extract_value(text, start_str='"LSD",[],{"token":"', end_str='"}') + self.dtsg = self.extract_value(text, start_str='"DTSGInitialData",[],{"token":"', end_str='"}') + self.cookies = cookies + + async def fetch_sources(self, fetch_id: str) -> Sources: + if self.access_token is None: + url = "https://www.meta.ai/api/graphql/" + payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg} + headers = {'x-fb-lsd': self.lsd} + else: + url = "https://graph.meta.ai/graphql?locale=user" + payload = {"access_token": self.access_token} + headers = {} + payload = { + **payload, + "fb_api_caller_class": "RelayModern", + "fb_api_req_friendly_name": "AbraSearchPluginDialogQuery", + "variables": json.dumps({"abraMessageFetchID": fetch_id}), + "server_timestamps": "true", + "doc_id": "6946734308765963", + } + headers = { + "authority": "graph.meta.ai", + "x-fb-friendly-name": "AbraSearchPluginDialogQuery", + **headers + } + async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response: + await raise_for_status(response, "Fetch sources failed") + text = await response.text() + if "

Something Went Wrong

" in text: + raise ResponseError("Response: Something Went Wrong") + try: + response_json = json.loads(text) + message = response_json["data"]["message"] + if message is not None: + searchResults = message["searchResults"] + if searchResults is not None: + return Sources(searchResults["references"]) + except (KeyError, TypeError, json.JSONDecodeError): + raise RuntimeError(f"Response: {text}") + + @staticmethod + def extract_value(text: str, key: str = None, start_str = None, end_str = '",') -> str: + if start_str is None: + start_str = f'{key}":{{"value":"' + start = text.find(start_str) + if start >= 0: + start+= len(start_str) + end = text.find(end_str, start) + if end >= 0: + return text[start:end] + +def generate_offline_threading_id() -> str: + """ + Generates an offline threading ID. + + Returns: + str: The generated offline threading ID. + """ + # Generate a random 64-bit integer + random_value = random.getrandbits(64) + + # Get the current timestamp in milliseconds + timestamp = int(time.time() * 1000) + + # Combine timestamp and random value + threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1)) + + return str(threading_id) diff --git a/g4f/Provider/needs_auth/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py new file mode 100644 index 00000000..2d54f3e0 --- /dev/null +++ b/g4f/Provider/needs_auth/MetaAIAccount.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from ...typing import AsyncResult, Messages, Cookies +from ..helper import format_prompt, get_cookies +from ..MetaAI import MetaAI + +class MetaAIAccount(MetaAI): + needs_auth = True + parent = "MetaAI" + image_models = ["meta"] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + cookies: Cookies = None, + **kwargs + ) -> AsyncResult: + cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies + async for chunk in cls(proxy).prompt(format_prompt(messages), cookies): + yield chunk diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py deleted file mode 100644 index 5e0bf336..00000000 --- a/g4f/Provider/needs_auth/OpenRouter.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import annotations - -import requests - -from .Openai import Openai -from ...typing import AsyncResult, Messages - -class OpenRouter(Openai): - label = "OpenRouter" - url = "https://openrouter.ai" - working = False - default_model = "mistralai/mistral-7b-instruct:free" - - @classmethod - def get_models(cls): - if not cls.models: - url = 'https://openrouter.ai/api/v1/models' - models = requests.get(url).json()["data"] - cls.models = [model['id'] for model in models] - return cls.models - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - api_base: str = "https://openrouter.ai/api/v1", - **kwargs - ) -> AsyncResult: - return super().create_async_generator( - model, messages, api_base=api_base, **kwargs - ) diff --git a/g4f/Provider/needs_auth/Replicate.py b/g4f/Provider/needs_auth/Replicate.py new file mode 100644 index 00000000..ec993aa4 --- /dev/null +++ b/g4f/Provider/needs_auth/Replicate.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, filter_none +from ...typing import AsyncResult, Messages +from ...requests import raise_for_status +from ...requests.aiohttp import StreamSession +from ...errors import ResponseError, MissingAuthError + +class Replicate(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://replicate.com" + working = True + needs_auth = True + default_model = "meta/meta-llama-3-70b-instruct" + model_aliases = { + "meta-llama/Meta-Llama-3-70B-Instruct": default_model + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + api_key: str = None, + proxy: str = None, + timeout: int = 180, + system_prompt: str = None, + max_new_tokens: int = None, + temperature: float = None, + top_p: float = None, + top_k: float = None, + stop: list = None, + extra_data: dict = {}, + headers: dict = { + "accept": "application/json", + }, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + if cls.needs_auth and api_key is None: + raise MissingAuthError("api_key is missing") + if api_key is not None: + headers["Authorization"] = f"Bearer {api_key}" + api_base = "https://api.replicate.com/v1/models/" + else: + api_base = "https://replicate.com/api/models/" + async with StreamSession( + proxy=proxy, + headers=headers, + timeout=timeout + ) as session: + data = { + "stream": True, + "input": { + "prompt": format_prompt(messages), + **filter_none( + system_prompt=system_prompt, + max_new_tokens=max_new_tokens, + temperature=temperature, + top_p=top_p, + top_k=top_k, + stop_sequences=",".join(stop) if stop else None + ), + **extra_data + }, + } + url = f"{api_base.rstrip('/')}/{model}/predictions" + async with session.post(url, json=data) as response: + message = "Model not found" if response.status == 404 else None + await raise_for_status(response, message) + result = await response.json() + if "id" not in result: + raise ResponseError(f"Invalid response: {result}") + async with session.get(result["urls"]["stream"], headers={"Accept": "text/event-stream"}) as response: + await raise_for_status(response) + event = None + async for line in response.iter_lines(): + if line.startswith(b"event: "): + event = line[7:] + if event == b"done": + break + elif event == b"output": + if line.startswith(b"data: "): + new_text = line[6:].decode() + if new_text: + yield new_text + else: + yield "\n" diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index aa3547a5..0626a837 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -1,4 +1,7 @@ +from .gigachat import * + from .DeepInfra import DeepInfra +from .DeepInfraImage import DeepInfraImage from .Gemini import Gemini from .Raycast import Raycast from .Theb import Theb @@ -7,6 +10,9 @@ from .OpenaiChat import OpenaiChat from .Poe import Poe from .Openai import Openai from .Groq import Groq -from .OpenRouter import OpenRouter #from .OpenaiAccount import OpenaiAccount from .PerplexityApi import PerplexityApi +from .Replicate import Replicate +from .MetaAI import MetaAI +#from .MetaAIAccount import MetaAIAccount +from .HuggingFace import HuggingFace diff --git a/g4f/Provider/needs_auth/gigachat/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py new file mode 100644 index 00000000..c9f1c011 --- /dev/null +++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import os +import ssl +import time +import uuid + +import json +from aiohttp import ClientSession, TCPConnector, BaseConnector +from g4f.requests import raise_for_status + +from ....typing import AsyncResult, Messages +from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ....errors import MissingAuthError +from ...helper import get_connector + +access_token = "" +token_expires_at = 0 + +class GigaChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://developers.sber.ru/gigachat" + working = True + supports_message_history = True + supports_system_message = True + supports_stream = True + needs_auth = True + default_model = "GigaChat:latest" + models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = True, + proxy: str = None, + api_key: str = None, + connector: BaseConnector = None, + scope: str = "GIGACHAT_API_PERS", + update_interval: float = 0, + **kwargs + ) -> AsyncResult: + global access_token, token_expires_at + model = cls.get_model(model) + if not api_key: + raise MissingAuthError('Missing "api_key"') + + cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt") + ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None + if connector is None and ssl_context is not None: + connector = TCPConnector(ssl_context=ssl_context) + async with ClientSession(connector=get_connector(connector, proxy)) as session: + if token_expires_at - int(time.time() * 1000) < 60000: + async with session.post(url="https://ngw.devices.sberbank.ru:9443/api/v2/oauth", + headers={"Authorization": f"Bearer {api_key}", + "RqUID": str(uuid.uuid4()), + "Content-Type": "application/x-www-form-urlencoded"}, + data={"scope": scope}) as response: + await raise_for_status(response) + data = await response.json() + access_token = data['access_token'] + token_expires_at = data['expires_at'] + + async with session.post(url="https://gigachat.devices.sberbank.ru/api/v1/chat/completions", + headers={"Authorization": f"Bearer {access_token}"}, + json={ + "model": model, + "messages": messages, + "stream": stream, + "update_interval": update_interval, + **kwargs + }) as response: + await raise_for_status(response) + + async for line in response.content: + if not stream: + yield json.loads(line.decode("utf-8"))['choices'][0]['message']['content'] + return + + if line and line.startswith(b"data:"): + line = line[6:-1] # remove "data: " prefix and "\n" suffix + if line.strip() == b"[DONE]": + return + else: + msg = json.loads(line.decode("utf-8"))['choices'][0] + content = msg['delta']['content'] + + if content: + yield content + + if 'finish_reason' in msg: + return diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py new file mode 100644 index 00000000..c9853742 --- /dev/null +++ b/g4f/Provider/needs_auth/gigachat/__init__.py @@ -0,0 +1,2 @@ +from .GigaChat import GigaChat + diff --git a/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt new file mode 100644 index 00000000..4c143a21 --- /dev/null +++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx +PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu +ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg +Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS +VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg +YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n +qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q +XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U +zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX +YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y +Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD +U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD +4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9 +G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH +BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX +ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa +OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf +BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS +BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF +AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH +tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq +W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+ +/3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS +AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj +C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV +4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d +WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ +D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC +EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq +391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/g4f/Provider/not_working/Ai4Chat.py b/g4f/Provider/not_working/Ai4Chat.py new file mode 100644 index 00000000..584c878a --- /dev/null +++ b/g4f/Provider/not_working/Ai4Chat.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import json +import re +import logging +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + + +class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin): + label = "AI4Chat" + url = "https://www.ai4chat.co" + api_endpoint = "https://www.ai4chat.co/generate-response" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4' + models = [default_model] + + model_aliases = {} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": "https://www.ai4chat.co", + "pragma": "no-cache", + "priority": "u=1, i", + "referer": "https://www.ai4chat.co/gpt/talkdirtytome", + "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": [ + { + "role": "user", + "content": format_prompt(messages) + } + ] + } + + try: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + result = await response.text() + + json_result = json.loads(result) + + message = json_result.get("message", "") + + clean_message = re.sub(r'<[^>]+>', '', message) + + yield clean_message + except Exception as e: + logging.exception("Error while calling AI 4Chat API: %s", e) + yield f"Error: {e}" diff --git a/g4f/Provider/not_working/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py new file mode 100644 index 00000000..02574501 --- /dev/null +++ b/g4f/Provider/not_working/AiChatOnline.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_string, format_prompt + +class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin): + site_url = "https://aichatonline.org" + url = "https://aichatonlineorg.erweima.ai" + api_endpoint = "/aichatonline/api/chat/gpt" + working = True + default_model = 'gpt-4o-mini' + + @classmethod + async def grab_token( + cls, + session: ClientSession, + proxy: str + ): + async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response: + response.raise_for_status() + return (await response.json())['data'] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/chatgpt/chat/", + "Content-Type": "application/json", + "Origin": cls.url, + "Alt-Used": "aichatonline.org", + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "TE": "trailers" + } + async with ClientSession(headers=headers) as session: + data = { + "conversationId": get_random_string(), + "prompt": format_prompt(messages), + } + headers['UniqueId'] = await cls.grab_token(session, proxy) + async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + try: + yield json.loads(chunk)['data']['message'] + except: + continue \ No newline at end of file diff --git a/g4f/Provider/not_working/AiChats.py b/g4f/Provider/not_working/AiChats.py new file mode 100644 index 00000000..51a85c91 --- /dev/null +++ b/g4f/Provider/not_working/AiChats.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import json +import base64 +from aiohttp import ClientSession +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...image import ImageResponse +from ..helper import format_prompt + +class AiChats(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://ai-chats.org" + api_endpoint = "https://ai-chats.org/chat/send2/" + working = False + supports_message_history = True + default_model = 'gpt-4' + models = ['gpt-4', 'dalle'] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "accept": "application/json, text/event-stream", + "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": cls.url, + "pragma": "no-cache", + "referer": f"{cls.url}/{'image' if model == 'dalle' else 'chat'}/", + "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", + 'cookie': 'muVyak=LSFNvUWqdgKkGprbDBsfieIoEMzjOQ; LSFNvUWqdgKkGprbDBsfieIoEMzjOQ=ac28831b98143847e83dbe004404e619-1725548624-1725548621; muVyak_hits=9; ai-chat-front=9d714d5dc46a6b47607c9a55e7d12a95; _csrf-front=76c23dc0a013e5d1e21baad2e6ba2b5fdab8d3d8a1d1281aa292353f8147b057a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22K9lz0ezsNPMNnfpd_8gT5yEeh-55-cch%22%3B%7D', + } + + async with ClientSession(headers=headers) as session: + if model == 'dalle': + prompt = messages[-1]['content'] if messages else "" + else: + prompt = format_prompt(messages) + + data = { + "type": "image" if model == 'dalle' else "chat", + "messagesHistory": [ + { + "from": "you", + "content": prompt + } + ] + } + + try: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + + if model == 'dalle': + response_json = await response.json() + + if 'data' in response_json and response_json['data']: + image_url = response_json['data'][0].get('url') + if image_url: + async with session.get(image_url) as img_response: + img_response.raise_for_status() + image_data = await img_response.read() + + base64_image = base64.b64encode(image_data).decode('utf-8') + base64_url = f"data:image/png;base64,{base64_image}" + yield ImageResponse(base64_url, prompt) + else: + yield f"Error: No image URL found in the response. Full response: {response_json}" + else: + yield f"Error: Unexpected response format. Full response: {response_json}" + else: + full_response = await response.text() + message = "" + for line in full_response.split('\n'): + if line.startswith('data: ') and line != 'data: ': + message += line[6:] + + message = message.strip() + yield message + except Exception as e: + yield f"Error occurred: {str(e)}" + + @classmethod + async def create_async( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> str: + async for response in cls.create_async_generator(model, messages, proxy, **kwargs): + if isinstance(response, ImageResponse): + return response.images[0] + return response diff --git a/g4f/Provider/not_working/AmigoChat.py b/g4f/Provider/not_working/AmigoChat.py new file mode 100644 index 00000000..274a5e14 --- /dev/null +++ b/g4f/Provider/not_working/AmigoChat.py @@ -0,0 +1,189 @@ +from __future__ import annotations + +import json +import uuid +from aiohttp import ClientSession, ClientTimeout, ClientResponseError + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt +from ...image import ImageResponse + +class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://amigochat.io/chat/" + chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions" + image_api_endpoint = "https://api.amigochat.io/v1/images/generations" + working = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4o-mini' + + chat_models = [ + 'gpt-4o', + default_model, + 'o1-preview', + 'o1-mini', + 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo', + 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo', + 'claude-3-sonnet-20240229', + 'gemini-1.5-pro', + ] + + image_models = [ + 'flux-pro/v1.1', + 'flux-realism', + 'flux-pro', + 'dalle-e-3', + ] + + models = [*chat_models, *image_models] + + model_aliases = { + "o1": "o1-preview", + "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", + "claude-3.5-sonnet": "claude-3-sonnet-20240229", + "gemini-pro": "gemini-1.5-pro", + + "flux-pro": "flux-pro/v1.1", + "dalle-3": "dalle-e-3", + } + + persona_ids = { + 'gpt-4o': "gpt", + 'gpt-4o-mini': "amigo", + 'o1-preview': "openai-o-one", + 'o1-mini': "openai-o-one-mini", + 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one", + 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2", + 'claude-3-sonnet-20240229': "claude", + 'gemini-1.5-pro': "gemini-1-5-pro", + 'flux-pro/v1.1': "flux-1-1-pro", + 'flux-realism': "flux-realism", + 'flux-pro': "flux-pro", + 'dalle-e-3': "dalle-three", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + def get_personaId(cls, model: str) -> str: + return cls.persona_ids[model] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + stream: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + device_uuid = str(uuid.uuid4()) + max_retries = 3 + retry_count = 0 + + while retry_count < max_retries: + try: + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "authorization": "Bearer", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": cls.url, + "pragma": "no-cache", + "priority": "u=1, i", + "referer": f"{cls.url}/", + "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "x-device-language": "en-US", + "x-device-platform": "web", + "x-device-uuid": device_uuid, + "x-device-version": "1.0.32" + } + + async with ClientSession(headers=headers) as session: + if model in cls.chat_models: + # Chat completion + data = { + "messages": [{"role": m["role"], "content": m["content"]} for m in messages], + "model": model, + "personaId": cls.get_personaId(model), + "frequency_penalty": 0, + "max_tokens": 4000, + "presence_penalty": 0, + "stream": stream, + "temperature": 0.5, + "top_p": 0.95 + } + + timeout = ClientTimeout(total=300) # 5 minutes timeout + async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy, timeout=timeout) as response: + if response.status not in (200, 201): + error_text = await response.text() + raise Exception(f"Error {response.status}: {error_text}") + + async for line in response.content: + line = line.decode('utf-8').strip() + if line.startswith('data: '): + if line == 'data: [DONE]': + break + try: + chunk = json.loads(line[6:]) # Remove 'data: ' prefix + if 'choices' in chunk and len(chunk['choices']) > 0: + choice = chunk['choices'][0] + if 'delta' in choice: + content = choice['delta'].get('content') + elif 'text' in choice: + content = choice['text'] + else: + content = None + if content: + yield content + except json.JSONDecodeError: + pass + else: + # Image generation + prompt = messages[-1]['content'] + data = { + "prompt": prompt, + "model": model, + "personaId": cls.get_personaId(model) + } + async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + + response_data = await response.json() + + if "data" in response_data: + image_urls = [] + for item in response_data["data"]: + if "url" in item: + image_url = item["url"] + image_urls.append(image_url) + if image_urls: + yield ImageResponse(image_urls, prompt) + else: + yield None + + break + + except (ClientResponseError, Exception) as e: + retry_count += 1 + if retry_count >= max_retries: + raise e + device_uuid = str(uuid.uuid4()) diff --git a/g4f/Provider/not_working/Aura.py b/g4f/Provider/not_working/Aura.py new file mode 100644 index 00000000..e841d909 --- /dev/null +++ b/g4f/Provider/not_working/Aura.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ...requests import get_args_from_browser +from ...webdriver import WebDriver + +class Aura(AsyncGeneratorProvider): + url = "https://openchat.team" + working = False + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + temperature: float = 0.5, + max_tokens: int = 8192, + webdriver: WebDriver = None, + **kwargs + ) -> AsyncResult: + args = get_args_from_browser(cls.url, webdriver, proxy) + async with ClientSession(**args) as session: + new_messages = [] + system_message = [] + for message in messages: + if message["role"] == "system": + system_message.append(message["content"]) + else: + new_messages.append(message) + data = { + "model": { + "id": "openchat_3.6", + "name": "OpenChat 3.6 (latest)", + "maxLength": 24576, + "tokenLimit": max_tokens + }, + "messages": new_messages, + "key": "", + "prompt": "\n".join(system_message), + "temperature": temperature + } + async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + yield chunk.decode(error="ignore") diff --git a/g4f/Provider/not_working/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py new file mode 100644 index 00000000..61ccaa16 --- /dev/null +++ b/g4f/Provider/not_working/Chatgpt4o.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import re +from ...requests import StreamSession, raise_for_status +from ...typing import Messages +from ..base_provider import AsyncProvider, ProviderModelMixin +from ..helper import format_prompt + + +class Chatgpt4o(AsyncProvider, ProviderModelMixin): + url = "https://chatgpt4o.one" + working = True + _post_id = None + _nonce = None + default_model = 'gpt-4o-mini-2024-07-18' + models = [ + 'gpt-4o-mini-2024-07-18', + ] + model_aliases = { + "gpt-4o-mini": "gpt-4o-mini-2024-07-18", + } + + + @classmethod + async def create_async( + cls, + model: str, + messages: Messages, + proxy: str = None, + timeout: int = 120, + cookies: dict = None, + **kwargs + ) -> str: + headers = { + 'authority': 'chatgpt4o.one', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'origin': 'https://chatgpt4o.one', + 'referer': 'https://chatgpt4o.one', + 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', + } + + async with StreamSession( + headers=headers, + cookies=cookies, + impersonate="chrome", + proxies={"all": proxy}, + timeout=timeout + ) as session: + + if not cls._post_id or not cls._nonce: + async with session.get(f"{cls.url}/") as response: + await raise_for_status(response) + response_text = await response.text() + + post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text) + nonce_match = re.search(r'data-nonce="(.*?)"', response_text) + + if not post_id_match: + raise RuntimeError("No post ID found") + cls._post_id = post_id_match.group(1) + + if not nonce_match: + raise RuntimeError("No nonce found") + cls._nonce = nonce_match.group(1) + + prompt = format_prompt(messages) + data = { + "_wpnonce": cls._nonce, + "post_id": cls._post_id, + "url": cls.url, + "action": "wpaicg_chat_shortcode_message", + "message": prompt, + "bot_id": "0" + } + + async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response: + await raise_for_status(response) + response_json = await response.json() + if "data" not in response_json: + raise RuntimeError("Unexpected response structure: 'data' field missing") + return response_json["data"] diff --git a/g4f/Provider/not_working/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py new file mode 100644 index 00000000..6b3877b1 --- /dev/null +++ b/g4f/Provider/not_working/ChatgptFree.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +import re +import json +import asyncio +from ...requests import StreamSession, raise_for_status +from ...typing import Messages, AsyncGenerator +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://chatgptfree.ai" + working = False + _post_id = None + _nonce = None + default_model = 'gpt-4o-mini-2024-07-18' + models = [default_model] + model_aliases = { + "gpt-4o-mini": "gpt-4o-mini-2024-07-18", + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + timeout: int = 120, + cookies: dict = None, + **kwargs + ) -> AsyncGenerator[str, None]: + headers = { + 'authority': 'chatgptfree.ai', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'origin': 'https://chatgptfree.ai', + 'referer': 'https://chatgptfree.ai/chat/', + 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', + } + + async with StreamSession( + headers=headers, + cookies=cookies, + impersonate="chrome", + proxies={"all": proxy}, + timeout=timeout + ) as session: + + if not cls._nonce: + async with session.get(f"{cls.url}/") as response: + await raise_for_status(response) + response = await response.text() + + result = re.search(r'data-post-id="([0-9]+)"', response) + if not result: + raise RuntimeError("No post id found") + cls._post_id = result.group(1) + + result = re.search(r'data-nonce="(.*?)"', response) + if result: + cls._nonce = result.group(1) + else: + raise RuntimeError("No nonce found") + + prompt = format_prompt(messages) + data = { + "_wpnonce": cls._nonce, + "post_id": cls._post_id, + "url": cls.url, + "action": "wpaicg_chat_shortcode_message", + "message": prompt, + "bot_id": "0" + } + + async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response: + await raise_for_status(response) + buffer = "" + async for line in response.iter_lines(): + line = line.decode('utf-8').strip() + if line.startswith('data: '): + data = line[6:] + if data == '[DONE]': + break + try: + json_data = json.loads(data) + content = json_data['choices'][0]['delta'].get('content', '') + if content: + yield content + except json.JSONDecodeError: + continue + elif line: + buffer += line + + if buffer: + try: + json_response = json.loads(buffer) + if 'data' in json_response: + yield json_response['data'] + except json.JSONDecodeError: + print(f"Failed to decode final JSON. Buffer content: {buffer}") diff --git a/g4f/Provider/not_working/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py new file mode 100644 index 00000000..b7d8537a --- /dev/null +++ b/g4f/Provider/not_working/FlowGpt.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import json +import time +import hashlib +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_hex, get_random_string +from ...requests.raise_for_status import raise_for_status + +class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://flowgpt.com/chat" + working = False + supports_message_history = True + supports_system_message = True + default_model = "gpt-3.5-turbo" + models = [ + "gpt-3.5-turbo", + "gpt-3.5-long", + "gpt-4-turbo", + "google-gemini", + "claude-instant", + "claude-v1", + "claude-v2", + "llama2-13b", + "mythalion-13b", + "pygmalion-13b", + "chronos-hermes-13b", + "Mixtral-8x7B", + "Dolphin-2.6-8x7B", + ] + model_aliases = { + "gemini": "google-gemini", + "gemini-pro": "google-gemini" + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + temperature: float = 0.7, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + timestamp = str(int(time.time())) + auth = "Bearer null" + nonce = get_random_hex() + data = f"{timestamp}-{nonce}-{auth}" + signature = hashlib.md5(data.encode()).hexdigest() + + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0", + "Accept": "*/*", + "Accept-Language": "en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": "https://flowgpt.com/", + "Content-Type": "application/json", + "Authorization": "Bearer null", + "Origin": "https://flowgpt.com", + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-site", + "TE": "trailers", + "Authorization": auth, + "x-flow-device-id": f"f-{get_random_string(19)}", + "x-nonce": nonce, + "x-signature": signature, + "x-timestamp": timestamp + } + async with ClientSession(headers=headers) as session: + history = [message for message in messages[:-1] if message["role"] != "system"] + system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"]) + if not system_message: + system_message = "You are helpful assistant. Follow the user's instructions carefully." + data = { + "model": model, + "nsfw": False, + "question": messages[-1]["content"], + "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history], + "system": system_message, + "temperature": temperature, + "promptId": f"model-{model}", + "documentIds": [], + "chatFileDocumentIds": [], + "generateImage": False, + "generateAudio": False + } + async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response: + await raise_for_status(response) + async for chunk in response.content: + if chunk.strip(): + message = json.loads(chunk) + if "event" not in message: + continue + if message["event"] == "text": + yield message["data"] diff --git a/g4f/Provider/not_working/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py new file mode 100644 index 00000000..8362019c --- /dev/null +++ b/g4f/Provider/not_working/FreeNetfly.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import json +import asyncio +from aiohttp import ClientSession, ClientTimeout, ClientError +from typing import AsyncGenerator + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + + +class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://free.netfly.top" + api_endpoint = "/api/openai/v1/chat/completions" + working = False + default_model = 'gpt-3.5-turbo' + models = [ + 'gpt-3.5-turbo', + 'gpt-4', + ] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "accept": "application/json, text/event-stream", + "accept-language": "en-US,en;q=0.9", + "content-type": "application/json", + "dnt": "1", + "origin": cls.url, + "referer": f"{cls.url}/", + "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", + } + data = { + "messages": messages, + "stream": True, + "model": model, + "temperature": 0.5, + "presence_penalty": 0, + "frequency_penalty": 0, + "top_p": 1 + } + + max_retries = 5 + retry_delay = 2 + + for attempt in range(max_retries): + try: + async with ClientSession(headers=headers) as session: + timeout = ClientTimeout(total=60) + async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response: + response.raise_for_status() + async for chunk in cls._process_response(response): + yield chunk + return # If successful, exit the function + except (ClientError, asyncio.TimeoutError) as e: + if attempt == max_retries - 1: + raise # If all retries failed, raise the last exception + await asyncio.sleep(retry_delay) + retry_delay *= 2 # Exponential backoff + + @classmethod + async def _process_response(cls, response) -> AsyncGenerator[str, None]: + buffer = "" + async for line in response.content: + buffer += line.decode('utf-8') + if buffer.endswith('\n\n'): + for subline in buffer.strip().split('\n'): + if subline.startswith('data: '): + if subline == 'data: [DONE]': + return + try: + data = json.loads(subline[6:]) + content = data['choices'][0]['delta'].get('content') + if content: + yield content + except json.JSONDecodeError: + print(f"Failed to parse JSON: {subline}") + except KeyError: + print(f"Unexpected JSON structure: {data}") + buffer = "" + + # Process any remaining data in the buffer + if buffer: + for subline in buffer.strip().split('\n'): + if subline.startswith('data: ') and subline != 'data: [DONE]': + try: + data = json.loads(subline[6:]) + content = data['choices'][0]['delta'].get('content') + if content: + yield content + except (json.JSONDecodeError, KeyError): + pass + diff --git a/g4f/Provider/not_working/GPROChat.py b/g4f/Provider/not_working/GPROChat.py new file mode 100644 index 00000000..88cb2c03 --- /dev/null +++ b/g4f/Provider/not_working/GPROChat.py @@ -0,0 +1,67 @@ +from __future__ import annotations +import hashlib +import time +from aiohttp import ClientSession +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): + label = "GPROChat" + url = "https://gprochat.com" + api_endpoint = "https://gprochat.com/api/generate" + working = True + supports_stream = True + supports_message_history = True + default_model = 'gemini-pro' + + @staticmethod + def generate_signature(timestamp: int, message: str) -> str: + secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8" + hash_input = f"{timestamp}:{message}:{secret_key}" + signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest() + return signature + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + timestamp = int(time.time() * 1000) + prompt = format_prompt(messages) + sign = cls.generate_signature(timestamp, prompt) + + headers = { + "accept": "*/*", + "origin": cls.url, + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "content-type": "text/plain;charset=UTF-8" + } + + data = { + "messages": [{"role": "user", "parts": [{"text": prompt}]}], + "time": timestamp, + "pass": None, + "sign": sign + } + + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/not_working/Koala.py b/g4f/Provider/not_working/Koala.py new file mode 100644 index 00000000..d6230da7 --- /dev/null +++ b/g4f/Provider/not_working/Koala.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import json +from typing import AsyncGenerator, Optional, List, Dict, Union, Any +from aiohttp import ClientSession, BaseConnector, ClientResponse + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_string, get_connector +from ...requests import raise_for_status + +class Koala(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://koala.sh/chat" + api_endpoint = "https://koala.sh/api/gpt/" + working = False + supports_message_history = True + default_model = 'gpt-4o-mini' + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: Optional[str] = None, + connector: Optional[BaseConnector] = None, + **kwargs: Any + ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]: + if not model: + model = "gpt-4o-mini" + + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0", + "Accept": "text/event-stream", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}", + "Flag-Real-Time-Data": "false", + "Visitor-ID": get_random_string(20), + "Origin": "https://koala.sh", + "Alt-Used": "koala.sh", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "TE": "trailers", + } + + async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session: + input_text = messages[-1]["content"] + system_messages = " ".join( + message["content"] for message in messages if message["role"] == "system" + ) + if system_messages: + input_text += f" {system_messages}" + + data = { + "input": input_text, + "inputHistory": [ + message["content"] + for message in messages[:-1] + if message["role"] == "user" + ], + "outputHistory": [ + message["content"] + for message in messages + if message["role"] == "assistant" + ], + "model": model, + } + + async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response: + await raise_for_status(response) + async for chunk in cls._parse_event_stream(response): + yield chunk + + @staticmethod + async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]: + async for chunk in response.content: + if chunk.startswith(b"data: "): + yield json.loads(chunk[6:]) diff --git a/g4f/Provider/not_working/MyShell.py b/g4f/Provider/not_working/MyShell.py new file mode 100644 index 00000000..02e182d4 --- /dev/null +++ b/g4f/Provider/not_working/MyShell.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +import time, json + +from ...typing import CreateResult, Messages +from ..base_provider import AbstractProvider +from ..helper import format_prompt +from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare + +class MyShell(AbstractProvider): + url = "https://app.myshell.ai/chat" + working = False + supports_gpt_35_turbo = True + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + proxy: str = None, + timeout: int = 120, + webdriver: WebDriver = None, + **kwargs + ) -> CreateResult: + with WebDriverSession(webdriver, "", proxy=proxy) as driver: + bypass_cloudflare(driver, cls.url, timeout) + + # Send request with message + data = { + "botId": "4738", + "conversation_scenario": 3, + "message": format_prompt(messages), + "messageType": 1 + } + script = """ +response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", { + "headers": { + "accept": "application/json", + "content-type": "application/json", + "myshell-service-name": "organics-api", + "visitor-id": localStorage.getItem("mix_visitorId") + }, + "body": '{body}', + "method": "POST" +}) +window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); +""" + driver.execute_script(script.replace("{body}", json.dumps(data))) + script = """ +chunk = await window._reader.read(); +if (chunk.done) { + return null; +} +content = ''; +chunk.value.split('\\n').forEach((line, index) => { + if (line.startsWith('data: ')) { + try { + const data = JSON.parse(line.substring('data: '.length)); + if ('content' in data) { + content += data['content']; + } + } catch(e) {} + } +}); +return content; +""" + while True: + chunk = driver.execute_script(script) + if chunk: + yield chunk + elif chunk != "": + break + else: + time.sleep(0.1) diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py new file mode 100644 index 00000000..52c5c538 --- /dev/null +++ b/g4f/Provider/not_working/__init__.py @@ -0,0 +1,12 @@ +from .Ai4Chat import Ai4Chat +from .AiChatOnline import AiChatOnline +from .AiChats import AiChats +from .AmigoChat import AmigoChat +from .Aura import Aura +from .Chatgpt4o import Chatgpt4o +from .ChatgptFree import ChatgptFree +from .FlowGpt import FlowGpt +from .FreeNetfly import FreeNetfly +from .GPROChat import GPROChat +from .Koala import Koala +from .MyShell import MyShell diff --git a/g4f/Provider/selenium/MyShell.py b/g4f/Provider/selenium/MyShell.py deleted file mode 100644 index 02e182d4..00000000 --- a/g4f/Provider/selenium/MyShell.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import annotations - -import time, json - -from ...typing import CreateResult, Messages -from ..base_provider import AbstractProvider -from ..helper import format_prompt -from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare - -class MyShell(AbstractProvider): - url = "https://app.myshell.ai/chat" - working = False - supports_gpt_35_turbo = True - supports_stream = True - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - timeout: int = 120, - webdriver: WebDriver = None, - **kwargs - ) -> CreateResult: - with WebDriverSession(webdriver, "", proxy=proxy) as driver: - bypass_cloudflare(driver, cls.url, timeout) - - # Send request with message - data = { - "botId": "4738", - "conversation_scenario": 3, - "message": format_prompt(messages), - "messageType": 1 - } - script = """ -response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", { - "headers": { - "accept": "application/json", - "content-type": "application/json", - "myshell-service-name": "organics-api", - "visitor-id": localStorage.getItem("mix_visitorId") - }, - "body": '{body}', - "method": "POST" -}) -window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); -""" - driver.execute_script(script.replace("{body}", json.dumps(data))) - script = """ -chunk = await window._reader.read(); -if (chunk.done) { - return null; -} -content = ''; -chunk.value.split('\\n').forEach((line, index) => { - if (line.startsWith('data: ')) { - try { - const data = JSON.parse(line.substring('data: '.length)); - if ('content' in data) { - content += data['content']; - } - } catch(e) {} - } -}); -return content; -""" - while True: - chunk = driver.execute_script(script) - if chunk: - yield chunk - elif chunk != "": - break - else: - time.sleep(0.1) diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py index 3a59ea58..44adf5fb 100644 --- a/g4f/Provider/selenium/__init__.py +++ b/g4f/Provider/selenium/__init__.py @@ -1,4 +1,3 @@ -from .MyShell import MyShell from .PerplexityAi import PerplexityAi from .Phind import Phind from .TalkAi import TalkAi -- cgit v1.2.3