summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/AIUncensored.py132
-rw-r--r--g4f/Provider/Airforce.py299
-rw-r--r--g4f/Provider/Allyfy.py71
-rw-r--r--g4f/Provider/AmigoChat.py176
-rw-r--r--g4f/Provider/Bing.py3
-rw-r--r--g4f/Provider/Binjie.py65
-rw-r--r--g4f/Provider/Bixin123.py94
-rw-r--r--g4f/Provider/Blackbox.py290
-rw-r--r--g4f/Provider/ChatGot.py75
-rw-r--r--g4f/Provider/ChatGpt.py120
-rw-r--r--g4f/Provider/ChatGptEs.py3
-rw-r--r--g4f/Provider/ChatHub.py84
-rw-r--r--g4f/Provider/Cloudflare.py97
-rw-r--r--g4f/Provider/Copilot.py156
-rw-r--r--g4f/Provider/DDG.py109
-rw-r--r--g4f/Provider/DarkAI.py83
-rw-r--r--g4f/Provider/DeepInfraChat.py59
-rw-r--r--g4f/Provider/Free2GPT.py8
-rw-r--r--g4f/Provider/FreeChatgpt.py96
-rw-r--r--g4f/Provider/FreeGpt.py2
-rw-r--r--g4f/Provider/GizAI.py76
-rw-r--r--g4f/Provider/HuggingChat.py103
-rw-r--r--g4f/Provider/Liaobots.py28
-rw-r--r--g4f/Provider/LiteIcoding.py130
-rw-r--r--g4f/Provider/MagickPen.py8
-rw-r--r--g4f/Provider/Mhystical.py90
-rw-r--r--g4f/Provider/Nexra.py118
-rw-r--r--g4f/Provider/PerplexityLabs.py2
-rw-r--r--g4f/Provider/Pi.py81
-rw-r--r--g4f/Provider/Pizzagpt.py1
-rw-r--r--g4f/Provider/Prodia.py23
-rw-r--r--g4f/Provider/ReplicateHome.py32
-rw-r--r--g4f/Provider/RubiksAI.py162
-rw-r--r--g4f/Provider/TeachAnything.py22
-rw-r--r--g4f/Provider/Upstage.py34
-rw-r--r--g4f/Provider/You.py2
-rw-r--r--g4f/Provider/__init__.py48
-rw-r--r--g4f/Provider/airforce/AirforceChat.py174
-rw-r--r--g4f/Provider/airforce/AirforceImage.py83
-rw-r--r--g4f/Provider/airforce/__init__.py2
-rw-r--r--g4f/Provider/bing/create_images.py2
-rw-r--r--g4f/Provider/deprecated/__init__.py3
-rw-r--r--g4f/Provider/local/Local.py (renamed from g4f/Provider/Local.py)12
-rw-r--r--g4f/Provider/local/Ollama.py (renamed from g4f/Provider/Ollama.py)21
-rw-r--r--g4f/Provider/local/__init__.py2
-rw-r--r--g4f/Provider/needs_auth/BingCreateImages.py (renamed from g4f/Provider/BingCreateImages.py)12
-rw-r--r--g4f/Provider/needs_auth/CopilotAccount.py8
-rw-r--r--g4f/Provider/needs_auth/DeepInfra.py (renamed from g4f/Provider/DeepInfra.py)8
-rw-r--r--g4f/Provider/needs_auth/DeepInfraImage.py (renamed from g4f/Provider/DeepInfraImage.py)8
-rw-r--r--g4f/Provider/needs_auth/Gemini.py81
-rw-r--r--g4f/Provider/needs_auth/GeminiPro.py (renamed from g4f/Provider/GeminiPro.py)20
-rw-r--r--g4f/Provider/needs_auth/Groq.py27
-rw-r--r--g4f/Provider/needs_auth/HuggingFace.py (renamed from g4f/Provider/HuggingFace.py)35
-rw-r--r--g4f/Provider/needs_auth/MetaAI.py (renamed from g4f/Provider/MetaAI.py)15
-rw-r--r--g4f/Provider/needs_auth/MetaAIAccount.py (renamed from g4f/Provider/MetaAIAccount.py)6
-rw-r--r--g4f/Provider/needs_auth/OpenRouter.py32
-rw-r--r--g4f/Provider/needs_auth/OpenaiAPI.py (renamed from g4f/Provider/needs_auth/Openai.py)2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py360
-rw-r--r--g4f/Provider/needs_auth/PerplexityApi.py4
-rw-r--r--g4f/Provider/needs_auth/Poe.py1
-rw-r--r--g4f/Provider/needs_auth/Raycast.py8
-rw-r--r--g4f/Provider/needs_auth/Replicate.py (renamed from g4f/Provider/Replicate.py)14
-rw-r--r--g4f/Provider/needs_auth/Theb.py1
-rw-r--r--g4f/Provider/needs_auth/ThebApi.py6
-rw-r--r--g4f/Provider/needs_auth/WhiteRabbitNeo.py (renamed from g4f/Provider/WhiteRabbitNeo.py)10
-rw-r--r--g4f/Provider/needs_auth/__init__.py32
-rw-r--r--g4f/Provider/needs_auth/gigachat/GigaChat.py (renamed from g4f/Provider/GigaChat.py)10
-rw-r--r--g4f/Provider/needs_auth/gigachat/__init__.py2
-rw-r--r--g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt (renamed from g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt)0
-rw-r--r--g4f/Provider/nexra/NexraBing.py82
-rw-r--r--g4f/Provider/nexra/NexraChatGPT.py66
-rw-r--r--g4f/Provider/nexra/NexraChatGPT4o.py52
-rw-r--r--g4f/Provider/nexra/NexraChatGPTWeb.py53
-rw-r--r--g4f/Provider/nexra/NexraGeminiPro.py52
-rw-r--r--g4f/Provider/nexra/NexraImageURL.py46
-rw-r--r--g4f/Provider/nexra/NexraLlama.py52
-rw-r--r--g4f/Provider/nexra/NexraQwen.py52
-rw-r--r--g4f/Provider/nexra/__init__.py1
-rw-r--r--g4f/Provider/not_working/AI365VIP.py (renamed from g4f/Provider/AI365VIP.py)10
-rw-r--r--g4f/Provider/not_working/AIChatFree.py (renamed from g4f/Provider/AIChatFree.py)12
-rw-r--r--g4f/Provider/not_working/Ai4Chat.py89
-rw-r--r--g4f/Provider/not_working/AiChatOnline.py (renamed from g4f/Provider/AiChatOnline.py)9
-rw-r--r--g4f/Provider/not_working/AiChats.py (renamed from g4f/Provider/AiChats.py)11
-rw-r--r--g4f/Provider/not_working/Allyfy.py87
-rw-r--r--g4f/Provider/not_working/Aura.py (renamed from g4f/Provider/Aura.py)8
-rw-r--r--g4f/Provider/not_working/Chatgpt4Online.py (renamed from g4f/Provider/Chatgpt4Online.py)14
-rw-r--r--g4f/Provider/not_working/Chatgpt4o.py (renamed from g4f/Provider/Chatgpt4o.py)11
-rw-r--r--g4f/Provider/not_working/ChatgptFree.py (renamed from g4f/Provider/ChatgptFree.py)12
-rw-r--r--g4f/Provider/not_working/FlowGpt.py (renamed from g4f/Provider/FlowGpt.py)9
-rw-r--r--g4f/Provider/not_working/FreeNetfly.py (renamed from g4f/Provider/FreeNetfly.py)8
-rw-r--r--g4f/Provider/not_working/GPROChat.py (renamed from g4f/Provider/GPROChat.py)8
-rw-r--r--g4f/Provider/not_working/Koala.py (renamed from g4f/Provider/Koala.py)11
-rw-r--r--g4f/Provider/not_working/MyShell.py (renamed from g4f/Provider/selenium/MyShell.py)0
-rw-r--r--g4f/Provider/not_working/__init__.py13
-rw-r--r--g4f/Provider/openai/har_file.py74
-rw-r--r--g4f/Provider/openai/proofofwork.py13
-rw-r--r--g4f/Provider/selenium/__init__.py1
-rw-r--r--g4f/Provider/you/har_file.py8
98 files changed, 2521 insertions, 2326 deletions
diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py
new file mode 100644
index 00000000..c2f0f4b3
--- /dev/null
+++ b/g4f/Provider/AIUncensored.py
@@ -0,0 +1,132 @@
+from __future__ import annotations
+
+import json
+import random
+from aiohttp import ClientSession, ClientError
+import asyncio
+from itertools import cycle
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+
+class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.aiuncensored.info/ai_uncensored"
+ api_endpoints_text = [
+ "https://twitterclone-i0wr.onrender.com/api/chat",
+ "https://twitterclone-4e8t.onrender.com/api/chat",
+ "https://twitterclone-8wd1.onrender.com/api/chat",
+ ]
+ api_endpoints_image = [
+ "https://twitterclone-4e8t.onrender.com/api/image",
+ "https://twitterclone-i0wr.onrender.com/api/image",
+ "https://twitterclone-8wd1.onrender.com/api/image",
+ ]
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'TextGenerations'
+ text_models = [default_model]
+ image_models = ['ImageGenerations']
+ models = [*text_models, *image_models]
+
+ model_aliases = {
+ "flux": "ImageGenerations",
+ }
+
+ @staticmethod
+ def generate_cipher() -> str:
+ """Generate a cipher in format like '3221229284179118'"""
+ return ''.join([str(random.randint(0, 9)) for _ in range(16)])
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://www.aiuncensored.info',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://www.aiuncensored.info/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model in cls.image_models:
+ prompt = messages[-1]['content']
+ data = {
+ "prompt": prompt,
+ "cipher": cls.generate_cipher()
+ }
+
+ endpoints = cycle(cls.api_endpoints_image)
+
+ while True:
+ endpoint = next(endpoints)
+ try:
+ async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ image_url = response_data['image_url']
+ image_response = ImageResponse(images=image_url, alt=prompt)
+ yield image_response
+ return
+ except (ClientError, asyncio.TimeoutError):
+ continue
+
+ elif model in cls.text_models:
+ data = {
+ "messages": messages,
+ "cipher": cls.generate_cipher()
+ }
+
+ endpoints = cycle(cls.api_endpoints_text)
+
+ while True:
+ endpoint = next(endpoints)
+ try:
+ async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response:
+ response.raise_for_status()
+ full_response = ""
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith("data: "):
+ try:
+ json_str = line[6:]
+ if json_str != "[DONE]":
+ data = json.loads(json_str)
+ if "data" in data:
+ full_response += data["data"]
+ yield data["data"]
+ except json.JSONDecodeError:
+ continue
+ return
+ except (ClientError, asyncio.TimeoutError):
+ continue
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 51f8ba55..6254e160 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -1,76 +1,30 @@
from __future__ import annotations
-from aiohttp import ClientSession, ClientResponseError
+import random
import json
+import re
+
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse
-from .helper import format_prompt
-from ..errors import ResponseStatusError
+from ..requests import StreamSession, raise_for_status
+from .airforce.AirforceChat import AirforceChat
+from .airforce.AirforceImage import AirforceImage
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
- text_api_endpoint = "https://api.airforce/chat/completions"
- image_api_endpoint = "https://api.airforce/imagine2"
+ api_endpoint_completions = AirforceChat.api_endpoint
+ api_endpoint_imagine = AirforceImage.api_endpoint
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
- supports_stream = True
+ default_model = "gpt-4o-mini"
supports_system_message = True
supports_message_history = True
- default_model = 'llama-3-70b-chat'
text_models = [
- # Open source models
- 'llama-2-13b-chat',
- 'llama-3-70b-chat',
- 'llama-3-70b-chat-turbo',
- 'llama-3-70b-chat-lite',
- 'llama-3-8b-chat',
- 'llama-3-8b-chat-turbo',
- 'llama-3-8b-chat-lite',
- 'llama-3.1-405b-turbo',
+ 'gpt-4-turbo',
+ default_model,
'llama-3.1-70b-turbo',
'llama-3.1-8b-turbo',
- 'LlamaGuard-2-8b',
- 'Llama-Guard-7b',
- 'Meta-Llama-Guard-3-8B',
- 'Mixtral-8x7B-Instruct-v0.1',
- 'Mixtral-8x22B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.2',
- 'Mistral-7B-Instruct-v0.3',
- 'Qwen1.5-72B-Chat',
- 'Qwen1.5-110B-Chat',
- 'Qwen2-72B-Instruct',
- 'gemma-2b-it',
- 'gemma-2-9b-it',
- 'gemma-2-27b-it',
- 'dbrx-instruct',
- 'deepseek-llm-67b-chat',
- 'Nous-Hermes-2-Mixtral-8x7B-DPO',
- 'Nous-Hermes-2-Yi-34B',
- 'WizardLM-2-8x22B',
- 'SOLAR-10.7B-Instruct-v1.0',
- 'StripedHyena-Nous-7B',
- 'sparkdesk',
-
- # Other models
- 'chatgpt-4o-latest',
- 'gpt-4',
- 'gpt-4-turbo',
- 'gpt-4o-mini-2024-07-18',
- 'gpt-4o-mini',
- 'gpt-4o',
- 'gpt-3.5-turbo',
- 'gpt-3.5-turbo-0125',
- 'gpt-3.5-turbo-1106',
- 'gpt-3.5-turbo-16k',
- 'gpt-3.5-turbo-0613',
- 'gpt-3.5-turbo-16k-0613',
- 'gemini-1.5-flash',
- 'gemini-1.5-pro',
]
-
image_models = [
'flux',
'flux-realism',
@@ -80,163 +34,138 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
'flux-pixel',
'flux-4o',
'any-dark',
- 'dall-e-3',
]
-
models = [
*text_models,
- *image_models
+ *image_models,
]
model_aliases = {
- # Open source models
- "llama-2-13b": "llama-2-13b-chat",
- "llama-3-70b": "llama-3-70b-chat",
- "llama-3-70b": "llama-3-70b-chat-turbo",
- "llama-3-70b": "llama-3-70b-chat-lite",
- "llama-3-8b": "llama-3-8b-chat",
- "llama-3-8b": "llama-3-8b-chat-turbo",
- "llama-3-8b": "llama-3-8b-chat-lite",
- "llama-3.1-405b": "llama-3.1-405b-turbo",
+ "gpt-4o": "chatgpt-4o-latest",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",
- "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
- "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
- "mistral-7b": "Mistral-7B-Instruct-v0.1",
- "mistral-7b": "Mistral-7B-Instruct-v0.2",
- "mistral-7b": "Mistral-7B-Instruct-v0.3",
- "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
- "qwen-1.5-72b": "Qwen1.5-72B-Chat",
- "qwen-1.5-110b": "Qwen1.5-110B-Chat",
- "qwen-2-72b": "Qwen2-72B-Instruct",
- "gemma-2b": "gemma-2b-it",
- "gemma-2b-9b": "gemma-2-9b-it",
- "gemma-2b-27b": "gemma-2-27b-it",
- "deepseek": "deepseek-llm-67b-chat",
- "yi-34b": "Nous-Hermes-2-Yi-34B",
- "wizardlm-2-8x22b": "WizardLM-2-8x22B",
- "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
- "sh-n-7b": "StripedHyena-Nous-7B",
- "sparkdesk-v1.1": "sparkdesk",
-
- # Other models
- "gpt-4o": "chatgpt-4o-latest",
- "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- "gemini-flash": "gemini-1.5-flash",
- "gemini-pro": "gemini-1.5-pro",
-
- # Image models
- "dalle-3": "dall-e-3",
+ "gpt-4": "gpt-4-turbo",
}
@classmethod
- async def create_async_generator(
+ def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
+
+ if model in cls.image_models:
+ return cls._generate_image(model, messages, proxy, seed, size)
+ else:
+ return cls._generate_text(model, messages, proxy, stream, **kwargs)
+
+ @classmethod
+ async def _generate_image(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ **kwargs
+ ) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
- "content-type": "application/json",
- "origin": "https://api.airforce",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
- "authorization": "Bearer null",
"cache-control": "no-cache",
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": "https://llmplayground.net/",
- "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "cross-site",
+ "origin": "https://llmplayground.net",
+ "user-agent": "Mozilla/5.0"
}
+ if seed is None:
+ seed = random.randint(0, 100000)
+ prompt = messages[-1]['content']
- if model in cls.image_models:
- async for item in cls.generate_image(model, messages, headers, proxy, **kwargs):
- yield item
- else:
- async for item in cls.generate_text(model, messages, headers, proxy, **kwargs):
- yield item
+ async with StreamSession(headers=headers, proxy=proxy) as session:
+ params = {
+ "model": model,
+ "prompt": prompt,
+ "size": size,
+ "seed": seed
+ }
+ async with session.get(f"{cls.api_endpoint_imagine}", params=params) as response:
+ await raise_for_status(response)
+ content_type = response.headers.get('Content-Type', '').lower()
+
+ if 'application/json' in content_type:
+ raise RuntimeError(await response.json().get("error", {}).get("message"))
+ elif 'image' in content_type:
+ image_data = b""
+ async for chunk in response.iter_content():
+ if chunk:
+ image_data += chunk
+ image_url = f"{cls.api_endpoint_imagine}?model={model}&prompt={prompt}&size={size}&seed={seed}"
+ yield ImageResponse(images=image_url, alt=prompt)
@classmethod
- async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
- async with ClientSession() as session:
+ async def _generate_text(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ max_tokens: int = 4096,
+ temperature: float = 1,
+ top_p: float = 1,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer missing api key",
+ "content-type": "application/json",
+ "user-agent": "Mozilla/5.0"
+ }
+ async with StreamSession(headers=headers, proxy=proxy) as session:
data = {
- "messages": [{"role": "user", "content": message['content']} for message in messages],
+ "messages": messages,
"model": model,
- "max_tokens": kwargs.get('max_tokens', 4096),
- "temperature": kwargs.get('temperature', 1),
- "top_p": kwargs.get('top_p', 1),
- "stream": True
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stream": stream
}
-
- try:
- async with session.post(cls.text_api_endpoint, json=data, headers=headers, proxy=proxy) as response:
- response.raise_for_status()
- async for line in response.content:
+ async with session.post(cls.api_endpoint_completions, json=data) as response:
+ await raise_for_status(response)
+ content_type = response.headers.get('Content-Type', '').lower()
+ if 'application/json' in content_type:
+ json_data = await response.json()
+ if json_data.get("model") == "error":
+ raise RuntimeError(json_data['choices'][0]['message'].get('content', ''))
+ if stream:
+ async for line in response.iter_lines():
if line:
line = line.decode('utf-8').strip()
- if line.startswith("data: "):
- if line == "data: [DONE]":
- break
- try:
- data = json.loads(line[6:])
- if 'choices' in data and len(data['choices']) > 0:
- delta = data['choices'][0].get('delta', {})
- if 'content' in delta:
- content = delta['content']
- if "One message exceeds the 1000chars per message limit" in content:
- raise ResponseStatusError(
- "Message too long",
- 400,
- "Please try a shorter message."
- )
- yield content
- except json.JSONDecodeError:
- continue
- except ResponseStatusError as e:
- raise e
- except Exception as e:
- raise ResponseStatusError(str(e), 500, "An unexpected error occurred")
+ if line.startswith("data: ") and line != "data: [DONE]":
+ json_data = json.loads(line[6:])
+ content = json_data['choices'][0]['delta'].get('content', '')
+ if content:
+ yield cls._filter_content(content)
+ else:
+ json_data = await response.json()
+ content = json_data['choices'][0]['message']['content']
+ yield cls._filter_content(content)
@classmethod
- async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
- prompt = messages[-1]['content'] if messages else ""
- params = {
- "prompt": prompt,
- "size": kwargs.get("size", "1:1"),
- "seed": kwargs.get("seed"),
- "model": model
- }
- params = {k: v for k, v in params.items() if v is not None}
-
- try:
- async with ClientSession(headers=headers) as session:
- async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response:
- response.raise_for_status()
- content = await response.read()
-
- if response.content_type.startswith('image/'):
- image_url = str(response.url)
- yield ImageResponse(image_url, prompt)
- else:
- try:
- text = content.decode('utf-8', errors='ignore')
- raise ResponseStatusError("Image generation failed", response.status, text)
- except Exception as decode_error:
- raise ResponseStatusError("Decoding error", 500, str(decode_error))
- except ClientResponseError as e:
- raise ResponseStatusError(f"HTTP {e.status}", e.status, e.message)
- except Exception as e:
- raise ResponseStatusError("Unexpected error", 500, str(e))
+ def _filter_content(cls, part_response: str) -> str:
+ part_response = re.sub(
+ r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
+ '',
+ part_response
+ )
+
+ part_response = re.sub(
+ r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
+ '',
+ part_response
+ )
+ return part_response \ No newline at end of file
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
deleted file mode 100644
index 8733b1ec..00000000
--- a/g4f/Provider/Allyfy.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
-
-
-class Allyfy(AsyncGeneratorProvider):
- url = "https://chatbot.allyfy.chat"
- api_endpoint = "/api/v1/message/stream/super/chat"
- working = True
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "text/event-stream",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json;charset=utf-8",
- "dnt": "1",
- "origin": "https://www.allyfy.chat",
- "priority": "u=1, i",
- "referer": "https://www.allyfy.chat/",
- "referrer": "https://www.allyfy.chat",
- 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
- }
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "messages": [{"content": prompt, "role": "user"}],
- "content": prompt,
- "baseInfo": {
- "clientId": "q08kdrde1115003lyedfoir6af0yy531",
- "pid": "38281",
- "channelId": "100000",
- "locale": "en-US",
- "localZone": 180,
- "packageName": "com.cch.allyfy.webh",
- }
- }
- async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
- response.raise_for_status()
- full_response = []
- async for line in response.content:
- line = line.decode().strip()
- if line.startswith("data:"):
- data_content = line[5:]
- if data_content == "[DONE]":
- break
- try:
- json_data = json.loads(data_content)
- if "content" in json_data:
- full_response.append(json_data["content"])
- except json.JSONDecodeError:
- continue
- yield "".join(full_response)
diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py
new file mode 100644
index 00000000..2e66dccf
--- /dev/null
+++ b/g4f/Provider/AmigoChat.py
@@ -0,0 +1,176 @@
+from __future__ import annotations
+
+import json
+import uuid
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+from ..requests import StreamSession, raise_for_status
+from ..errors import ResponseStatusError
+
+class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://amigochat.io/chat/"
+ chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
+ image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o-mini'
+
+ chat_models = [
+ 'gpt-4o',
+ default_model,
+ 'o1-preview',
+ 'o1-mini',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
+ 'claude-3-sonnet-20240229',
+ 'gemini-1.5-pro',
+ ]
+
+ image_models = [
+ 'flux-pro/v1.1',
+ 'flux-realism',
+ 'flux-pro',
+ 'dalle-e-3',
+ ]
+
+ models = [*chat_models, *image_models]
+
+ model_aliases = {
+ "o1": "o1-preview",
+ "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
+ "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
+ "claude-3.5-sonnet": "claude-3-sonnet-20240229",
+ "gemini-pro": "gemini-1.5-pro",
+
+ "flux-pro": "flux-pro/v1.1",
+ "dalle-3": "dalle-e-3",
+ }
+
+ persona_ids = {
+ 'gpt-4o': "gpt",
+ 'gpt-4o-mini': "amigo",
+ 'o1-preview': "openai-o-one",
+ 'o1-mini': "openai-o-one-mini",
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one",
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2",
+ 'claude-3-sonnet-20240229': "claude",
+ 'gemini-1.5-pro': "gemini-1-5-pro",
+ 'flux-pro/v1.1': "flux-1-1-pro",
+ 'flux-realism': "flux-realism",
+ 'flux-pro': "flux-pro",
+ 'dalle-e-3': "dalle-three",
+ }
+
+ @classmethod
+ def get_personaId(cls, model: str) -> str:
+ return cls.persona_ids[model]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ timeout: int = 300,
+ frequency_penalty: float = 0,
+ max_tokens: int = 4000,
+ presence_penalty: float = 0,
+ temperature: float = 0.5,
+ top_p: float = 0.95,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ device_uuid = str(uuid.uuid4())
+ max_retries = 3
+ retry_count = 0
+
+ while retry_count < max_retries:
+ try:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "x-device-language": "en-US",
+ "x-device-platform": "web",
+ "x-device-uuid": device_uuid,
+ "x-device-version": "1.0.41"
+ }
+
+ async with StreamSession(headers=headers, proxy=proxy) as session:
+ if model not in cls.image_models:
+ data = {
+ "messages": messages,
+ "model": model,
+ "personaId": cls.get_personaId(model),
+ "frequency_penalty": frequency_penalty,
+ "max_tokens": max_tokens,
+ "presence_penalty": presence_penalty,
+ "stream": stream,
+ "temperature": temperature,
+ "top_p": top_p
+ }
+ async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response:
+ await raise_for_status(response)
+ async for line in response.iter_lines():
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ if line == 'data: [DONE]':
+ break
+ try:
+ chunk = json.loads(line[6:]) # Remove 'data: ' prefix
+ if 'choices' in chunk and len(chunk['choices']) > 0:
+ choice = chunk['choices'][0]
+ if 'delta' in choice:
+ content = choice['delta'].get('content')
+ elif 'text' in choice:
+ content = choice['text']
+ else:
+ content = None
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ pass
+ else:
+ # Image generation
+ prompt = messages[-1]['content']
+ data = {
+ "prompt": prompt,
+ "model": model,
+ "personaId": cls.get_personaId(model)
+ }
+ async with session.post(cls.image_api_endpoint, json=data) as response:
+ await raise_for_status(response)
+ response_data = await response.json()
+ if "data" in response_data:
+ image_urls = []
+ for item in response_data["data"]:
+ if "url" in item:
+ image_url = item["url"]
+ image_urls.append(image_url)
+ if image_urls:
+ yield ImageResponse(image_urls, prompt)
+ else:
+ yield None
+ break
+ except (ResponseStatusError, Exception) as e:
+ retry_count += 1
+ if retry_count >= max_retries:
+ raise e
+ device_uuid = str(uuid.uuid4())
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 4056f9ff..cdc2b9d9 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -17,7 +17,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_hex
from .bing.upload_image import upload_image
from .bing.conversation import Conversation, create_conversation, delete_conversation
-from .BingCreateImages import BingCreateImages
+from .needs_auth.BingCreateImages import BingCreateImages
from .. import debug
class Tones:
@@ -37,7 +37,6 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://bing.com/chat"
working = True
supports_message_history = True
- supports_gpt_4 = True
default_model = "Balanced"
default_vision_model = "gpt-4-vision"
models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")]
diff --git a/g4f/Provider/Binjie.py b/g4f/Provider/Binjie.py
deleted file mode 100644
index 90f9ec3c..00000000
--- a/g4f/Provider/Binjie.py
+++ /dev/null
@@ -1,65 +0,0 @@
-from __future__ import annotations
-
-import random
-from ..requests import StreamSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, format_prompt
-
-
-class Binjie(AsyncGeneratorProvider):
- url = "https://chat18.aichatos8.com"
- working = True
- supports_gpt_4 = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- @staticmethod
- async def create_async_generator(
- model: str,
- messages: Messages,
- proxy: str = None,
- timeout: int = 120,
- **kwargs,
- ) -> AsyncResult:
- async with StreamSession(
- headers=_create_header(), proxies={"https": proxy}, timeout=timeout
- ) as session:
- payload = _create_payload(messages, **kwargs)
- async with session.post("https://api.binjie.fun/api/generateStream", json=payload) as response:
- response.raise_for_status()
- async for chunk in response.iter_content():
- if chunk:
- chunk = chunk.decode()
- if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
- raise RuntimeError("IP address is blocked by abuse detection.")
- yield chunk
-
-
-def _create_header():
- return {
- "accept" : "application/json, text/plain, */*",
- "content-type" : "application/json",
- "origin" : "https://chat18.aichatos8.com",
- "referer" : "https://chat18.aichatos8.com/"
- }
-
-
-def _create_payload(
- messages: Messages,
- system_message: str = "",
- user_id: int = None,
- **kwargs
-):
- if not user_id:
- user_id = random.randint(1690000544336, 2093025544336)
- return {
- "prompt": format_prompt(messages),
- "network": True,
- "system": system_message,
- "withoutContext": False,
- "stream": True,
- "userId": f"#/chat/{user_id}"
- }
-
diff --git a/g4f/Provider/Bixin123.py b/g4f/Provider/Bixin123.py
deleted file mode 100644
index 081064f9..00000000
--- a/g4f/Provider/Bixin123.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-import random
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..typing import AsyncResult, Messages
-from .helper import format_prompt
-
-class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://chat.bixin123.com"
- api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
-
- default_model = 'gpt-3.5-turbo-0125'
- models = ['gpt-3.5-turbo', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']
-
- model_aliases = {
- "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def generate_fingerprint(cls) -> str:
- return str(random.randint(100000000, 999999999))
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "application/json, text/plain, */*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "fingerprint": cls.generate_fingerprint(),
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/chat",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
- "x-website-domain": "chat.bixin123.com",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "prompt": prompt,
- "options": {
- "usingNetwork": False,
- "file": ""
- }
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
-
- lines = response_text.strip().split("\n")
- last_json = None
- for line in reversed(lines):
- try:
- last_json = json.loads(line)
- break
- except json.JSONDecodeError:
- pass
-
- if last_json:
- text = last_json.get("text", "")
- yield text
- else:
- yield ""
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 3e183076..97466c04 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -1,71 +1,133 @@
from __future__ import annotations
-import re
+from aiohttp import ClientSession
import random
import string
-from aiohttp import ClientSession
+import json
+import re
+import aiohttp
from ..typing import AsyncResult, Messages, ImageType
-from ..image import ImageResponse, to_data_uri
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse, to_data_uri
+from .helper import get_random_string
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Blackbox AI"
url = "https://www.blackbox.ai"
api_endpoint = "https://www.blackbox.ai/api/chat"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
-
- default_model = 'blackbox'
- models = [
- 'blackbox',
- 'gemini-1.5-flash',
- "llama-3.1-8b",
- 'llama-3.1-70b',
- 'llama-3.1-405b',
- 'ImageGenerationLV45LJp',
- 'GPT-4o',
- 'Gemini-PRO',
- 'Claude-Sonnet-3.5',
- ]
-
- model_aliases = {
- "gemini-flash": "gemini-1.5-flash",
- "flux": "ImageGenerationLV45LJp",
- "gpt-4o": "GPT-4o",
- "gemini-pro": "Gemini-PRO",
- "claude-3.5-sonnet": "Claude-Sonnet-3.5",
- }
+ _last_validated_value = None
+ default_model = 'blackboxai'
+ default_vision_model = default_model
+ default_image_model = 'generate_image'
+ image_models = [default_image_model, 'repomap']
+ text_models = [default_model, 'gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
+ vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'blackboxai-pro']
agentMode = {
- 'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
+ default_image_model: {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
}
-
trendingAgentMode = {
- "blackbox": {},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
+ #
+ 'Python Agent': {'mode': True, 'id': "Python Agent"},
+ 'Java Agent': {'mode': True, 'id': "Java Agent"},
+ 'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"},
+ 'HTML Agent': {'mode': True, 'id': "HTML Agent"},
+ 'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"},
+ 'Android Developer': {'mode': True, 'id': "Android Developer"},
+ 'Swift Developer': {'mode': True, 'id': "Swift Developer"},
+ 'Next.js Agent': {'mode': True, 'id': "Next.js Agent"},
+ 'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"},
+ 'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"},
+ 'React Agent': {'mode': True, 'id': "React Agent"},
+ 'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
+ 'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
+ 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
+ #
+ 'repomap': {'mode': True, 'id': "repomap"},
+ #
+ 'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
+ 'Godot Agent': {'mode': True, 'id': "Godot Agent"},
+ 'Go Agent': {'mode': True, 'id': "Go Agent"},
+ 'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"},
+ 'Git Agent': {'mode': True, 'id': "Git Agent"},
+ 'Flask Agent': {'mode': True, 'id': "Flask Agent"},
+ 'Firebase Agent': {'mode': True, 'id': "Firebase Agent"},
+ 'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"},
+ 'Erlang Agent': {'mode': True, 'id': "Erlang Agent"},
+ 'Electron Agent': {'mode': True, 'id': "Electron Agent"},
+ 'Docker Agent': {'mode': True, 'id': "Docker Agent"},
+ 'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"},
+ 'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"},
+ 'Azure Agent': {'mode': True, 'id': "Azure Agent"},
+ 'Flutter Agent': {'mode': True, 'id': "Flutter Agent"},
+ 'Youtube Agent': {'mode': True, 'id': "Youtube Agent"},
+ 'builder Agent': {'mode': True, 'id': "builder Agent"},
}
-
- userSelectedModel = {
- "GPT-4o": "GPT-4o",
- "Gemini-PRO": "Gemini-PRO",
- 'Claude-Sonnet-3.5': "Claude-Sonnet-3.5",
+ model_prefixes = {mode: f"@{value['id']}" for mode, value in trendingAgentMode.items() if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]}
+ models = [*text_models, default_image_model, *list(trendingAgentMode.keys())]
+ model_aliases = {
+ "gemini-flash": "gemini-1.5-flash",
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
+ "flux": "Image Generation",
}
@classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.userSelectedModel:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
+ async def fetch_validated(cls):
+ # If the key is already stored in memory, return it
+ if cls._last_validated_value:
+ return cls._last_validated_value
+
+ # If the key is not found, perform a search
+ async with aiohttp.ClientSession() as session:
+ try:
+ async with session.get(cls.url) as response:
+ if response.status != 200:
+ print("Failed to load the page.")
+ return cls._last_validated_value
+
+ page_content = await response.text()
+ js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content)
+
+ key_pattern = re.compile(r'w="([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})"')
+
+ for js_file in js_files:
+ js_url = f"{cls.url}/_next/{js_file}"
+ async with session.get(js_url) as js_response:
+ if js_response.status == 200:
+ js_content = await js_response.text()
+ match = key_pattern.search(js_content)
+ if match:
+ validated_value = match.group(1)
+ cls._last_validated_value = validated_value # Keep in mind
+ return validated_value
+ except Exception as e:
+ print(f"Error fetching validated value: {e}")
+
+ return cls._last_validated_value
+
+ @classmethod
+ def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages:
+ prefix = cls.model_prefixes.get(model, "")
+ if not prefix:
+ return messages
+
+ new_messages = []
+ for message in messages:
+ new_message = message.copy()
+ if message['role'] == 'user':
+ new_message['content'] = (prefix + " " + message['content']).strip()
+ new_messages.append(new_message)
+
+ return new_messages
@classmethod
async def create_async_generator(
@@ -73,87 +135,87 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ web_search: bool = False,
image: ImageType = None,
image_name: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
+ message_id = get_random_string(7)
+ messages = cls.add_prefix_to_messages(messages, model)
+ validated_value = await cls.fetch_validated()
+
+ if image is not None:
+ messages[-1]['data'] = {
+ 'fileText': '',
+ 'imageBase64': to_data_uri(image),
+ 'title': image_name
+ }
+
headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": cls.url,
- "pragma": "no-cache",
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f'{cls.url}/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
- if model in cls.userSelectedModel:
- prefix = f"@{cls.userSelectedModel[model]}"
- if not messages[0]['content'].startswith(prefix):
- messages[0]['content'] = f"{prefix} {messages[0]['content']}"
-
- async with ClientSession(headers=headers) as session:
- if image is not None:
- messages[-1]["data"] = {
- "fileText": image_name,
- "imageBase64": to_data_uri(image)
- }
-
- random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
-
- data = {
- "messages": messages,
- "id": random_id,
- "previewToken": None,
- "userId": None,
- "codeModelMode": True,
- "agentMode": {},
- "trendingAgentMode": {},
- "userSelectedModel": None,
- "isMicMode": False,
- "maxTokens": 99999999,
- "playgroundTopP": 0.9,
- "playgroundTemperature": 0.5,
- "isChromeExt": False,
- "githubToken": None,
- "clickedAnswer2": False,
- "clickedAnswer3": False,
- "clickedForceWebSearch": False,
- "visitFromDelta": False,
- "mobileClient": False,
- "webSearchMode": False,
- }
+ data = {
+ "messages": messages,
+ "id": message_id,
+ "previewToken": None,
+ "userId": None,
+ "codeModelMode": True,
+ "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
+ "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
+ "isMicMode": False,
+ "userSystemPrompt": None,
+ "maxTokens": 1024,
+ "playgroundTopP": 0.9,
+ "playgroundTemperature": 0.5,
+ "isChromeExt": False,
+ "githubToken": None,
+ "clickedAnswer2": False,
+ "clickedAnswer3": False,
+ "clickedForceWebSearch": False,
+ "visitFromDelta": False,
+ "mobileClient": False,
+ "userSelectedModel": model if model in cls.text_models else None,
+ "webSearchMode": web_search,
+ "validated": validated_value,
+ }
- if model in cls.agentMode:
- data["agentMode"] = cls.agentMode[model]
- elif model in cls.trendingAgentMode:
- data["trendingAgentMode"] = cls.trendingAgentMode[model]
- elif model in cls.userSelectedModel:
- data["userSelectedModel"] = cls.userSelectedModel[model]
-
+ async with ClientSession(headers=headers) as session:
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- if model == 'ImageGenerationLV45LJp':
- response_text = await response.text()
- url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
- if url_match:
- image_url = url_match.group(0)
- yield ImageResponse(image_url, alt=messages[-1]['content'])
+ async for chunk in response.content.iter_any():
+ text_chunk = chunk.decode(errors="ignore")
+ if model in cls.image_models:
+ image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', text_chunk)
+ if image_matches:
+ image_url = image_matches[0]
+ image_response = ImageResponse(images=[image_url])
+ yield image_response
+ continue
+
+ text_chunk = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', text_chunk, flags=re.DOTALL)
+ json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', text_chunk, re.DOTALL)
+ if json_match:
+ search_results = json.loads(json_match.group(1))
+ answer = text_chunk.split('$~~~$')[-1].strip()
+ formatted_response = f"{answer}\n\n**Source:**"
+ for i, result in enumerate(search_results, 1):
+ formatted_response += f"\n{i}. {result['title']}: {result['link']}"
+ yield formatted_response
else:
- raise Exception("Image URL not found in the response")
- else:
- async for chunk in response.content.iter_any():
- if chunk:
- decoded_chunk = chunk.decode()
- decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
- if decoded_chunk.strip():
- yield decoded_chunk
+ yield text_chunk.strip()
diff --git a/g4f/Provider/ChatGot.py b/g4f/Provider/ChatGot.py
deleted file mode 100644
index 55e8d0b6..00000000
--- a/g4f/Provider/ChatGot.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from __future__ import annotations
-
-import time
-from hashlib import sha256
-
-from aiohttp import BaseConnector, ClientSession
-
-from ..errors import RateLimitError
-from ..requests import raise_for_status
-from ..requests.aiohttp import get_connector
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-
-class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.chatgot.one/"
- working = True
- supports_message_history = True
- default_model = 'gemini-pro'
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- connector: BaseConnector = None,
- **kwargs,
- ) -> AsyncResult:
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br",
- "Content-Type": "text/plain;charset=UTF-8",
- "Referer": f"{cls.url}/",
- "Origin": cls.url,
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "Connection": "keep-alive",
- "TE": "trailers",
- }
- async with ClientSession(
- connector=get_connector(connector, proxy), headers=headers
- ) as session:
- timestamp = int(time.time() * 1e3)
- data = {
- "messages": [
- {
- "role": "model" if message["role"] == "assistant" else "user",
- "parts": [{"text": message["content"]}],
- }
- for message in messages
- ],
- "time": timestamp,
- "pass": None,
- "sign": generate_signature(timestamp, messages[-1]["content"]),
- }
- async with session.post(
- f"{cls.url}/api/generate", json=data, proxy=proxy
- ) as response:
- if response.status == 500:
- if "Quota exceeded" in await response.text():
- raise RateLimitError(
- f"Response {response.status}: Rate limit reached"
- )
- await raise_for_status(response)
- async for chunk in response.content.iter_any():
- yield chunk.decode(errors="ignore")
-
-
-def generate_signature(time: int, text: str, secret: str = ""):
- message = f"{time}:{text}:{secret}"
- return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/ChatGpt.py b/g4f/Provider/ChatGpt.py
index fc34fc2b..02bbbcc4 100644
--- a/g4f/Provider/ChatGpt.py
+++ b/g4f/Provider/ChatGpt.py
@@ -3,7 +3,10 @@ from __future__ import annotations
from ..typing import Messages, CreateResult
from ..providers.base_provider import AbstractProvider, ProviderModelMixin
-import time, uuid, random, json
+import time
+import uuid
+import random
+import json
from requests import Session
from .openai.new import (
@@ -72,17 +75,34 @@ def init_session(user_agent):
class ChatGpt(AbstractProvider, ProviderModelMixin):
label = "ChatGpt"
+ url = "https://chatgpt.com"
working = True
supports_message_history = True
supports_system_message = True
supports_stream = True
+ default_model = 'auto'
models = [
+ default_model,
+ 'gpt-3.5-turbo',
'gpt-4o',
'gpt-4o-mini',
'gpt-4',
'gpt-4-turbo',
'chatgpt-4o-latest',
]
+
+ model_aliases = {
+ "gpt-4o": "chatgpt-4o-latest",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
@classmethod
def create_completion(
@@ -92,30 +112,17 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
stream: bool,
**kwargs
) -> CreateResult:
+ model = cls.get_model(model)
+ if model not in cls.models:
+ raise ValueError(f"Model '{model}' is not available. Available models: {', '.join(cls.models)}")
+
- if model in [
- 'gpt-4o',
- 'gpt-4o-mini',
- 'gpt-4',
- 'gpt-4-turbo',
- 'chatgpt-4o-latest'
- ]:
- model = 'auto'
-
- elif model in [
- 'gpt-3.5-turbo'
- ]:
- model = 'text-davinci-002-render-sha'
-
- else:
- raise ValueError(f"Invalid model: {model}")
-
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
+ user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
session: Session = init_session(user_agent)
- config = get_config(user_agent)
- pow_req = get_requirements_token(config)
- headers = {
+ config = get_config(user_agent)
+ pow_req = get_requirements_token(config)
+ headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.8',
'content-type': 'application/json',
@@ -134,29 +141,35 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
}
response = session.post('https://chatgpt.com/backend-anon/sentinel/chat-requirements',
- headers=headers, json={'p': pow_req}).json()
+ headers=headers, json={'p': pow_req})
+
+ if response.status_code != 200:
+ return
- turnstile = response.get('turnstile', {})
+ response_data = response.json()
+ if "detail" in response_data and "Unusual activity" in response_data["detail"]:
+ return
+
+ turnstile = response_data.get('turnstile', {})
turnstile_required = turnstile.get('required')
- pow_conf = response.get('proofofwork', {})
+ pow_conf = response_data.get('proofofwork', {})
if turnstile_required:
- turnstile_dx = turnstile.get('dx')
+ turnstile_dx = turnstile.get('dx')
turnstile_token = process_turnstile(turnstile_dx, pow_req)
- headers = headers | {
- 'openai-sentinel-turnstile-token' : turnstile_token,
- 'openai-sentinel-chat-requirements-token': response.get('token'),
- 'openai-sentinel-proof-token' : get_answer_token(
- pow_conf.get('seed'), pow_conf.get('difficulty'), config
- )
- }
-
+ headers = {**headers,
+ 'openai-sentinel-turnstile-token': turnstile_token,
+ 'openai-sentinel-chat-requirements-token': response_data.get('token'),
+ 'openai-sentinel-proof-token': get_answer_token(
+ pow_conf.get('seed'), pow_conf.get('difficulty'), config
+ )}
+
json_data = {
'action': 'next',
'messages': format_conversation(messages),
'parent_message_id': str(uuid.uuid4()),
- 'model': 'auto',
+ 'model': model,
'timezone_offset_min': -120,
'suggestions': [
'Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.',
@@ -179,7 +192,7 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
'conversation_origin': None,
'client_contextual_info': {
'is_dark_mode': True,
- 'time_since_loaded': random.randint(22,33),
+ 'time_since_loaded': random.randint(22, 33),
'page_height': random.randint(600, 900),
'page_width': random.randint(500, 800),
'pixel_ratio': 2,
@@ -187,20 +200,33 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
'screen_width': random.randint(1200, 2000),
},
}
+
+ time.sleep(2)
response = session.post('https://chatgpt.com/backend-anon/conversation',
- headers=headers, json=json_data, stream=True)
-
+ headers=headers, json=json_data, stream=True)
+
replace = ''
for line in response.iter_lines():
if line:
- if 'DONE' in line.decode():
- break
-
- data = json.loads(line.decode()[6:])
- if data.get('message').get('author').get('role') == 'assistant':
- tokens = (data.get('message').get('content').get('parts')[0])
-
- yield tokens.replace(replace, '')
+ decoded_line = line.decode()
+
+ if decoded_line.startswith('data:'):
+ json_string = decoded_line[6:].strip()
+
+ if json_string == '[DONE]':
+ break
- replace = tokens
+ if json_string:
+ try:
+ data = json.loads(json_string)
+ except json.JSONDecodeError:
+ continue
+
+ if data.get('message') and data['message'].get('author'):
+ role = data['message']['author'].get('role')
+ if role == 'assistant':
+ tokens = data['message']['content'].get('parts', [])
+ if tokens:
+ yield tokens[0].replace(replace, '')
+ replace = tokens[0]
diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py
index 0e7062e5..788ffcd9 100644
--- a/g4f/Provider/ChatGptEs.py
+++ b/g4f/Provider/ChatGptEs.py
@@ -13,7 +13,6 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgpt.es"
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
working = True
- supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
@@ -58,7 +57,7 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0]
conversation_history = [
- "Human: strictly respond in the same language as my prompt, preferably English"
+ "Human: You are a helpful AI assistant. Please respond in the same language that the user uses in their message. Provide accurate, relevant and helpful information while maintaining a friendly and professional tone. If you're not sure about something, please acknowledge that and provide the best information you can while noting any uncertainties. Focus on being helpful while respecting the user's choice of language."
]
for message in messages[:-1]:
diff --git a/g4f/Provider/ChatHub.py b/g4f/Provider/ChatHub.py
deleted file mode 100644
index 3b762687..00000000
--- a/g4f/Provider/ChatHub.py
+++ /dev/null
@@ -1,84 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-class ChatHub(AsyncGeneratorProvider, ProviderModelMixin):
- label = "ChatHub"
- url = "https://app.chathub.gg"
- api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
- working = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'meta/llama3.1-8b'
- models = [
- 'meta/llama3.1-8b',
- 'mistral/mixtral-8x7b',
- 'google/gemma-2',
- 'perplexity/sonar-online',
- ]
-
- model_aliases = {
- "llama-3.1-8b": "meta/llama3.1-8b",
- "mixtral-8x7b": "mistral/mixtral-8x7b",
- "gemma-2": "google/gemma-2",
- "sonar-online": "perplexity/sonar-online",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'origin': cls.url,
- 'referer': f"{cls.url}/chat/cloud-llama3.1-8b",
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
- 'x-app-id': 'web'
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "model": model,
- "messages": [{"role": "user", "content": prompt}],
- "tools": []
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- async for line in response.content:
- if line:
- decoded_line = line.decode('utf-8')
- if decoded_line.startswith('data:'):
- try:
- data = json.loads(decoded_line[5:])
- if data['type'] == 'text-delta':
- yield data['textDelta']
- elif data['type'] == 'done':
- break
- except json.JSONDecodeError:
- continue
diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py
new file mode 100644
index 00000000..7d477d57
--- /dev/null
+++ b/g4f/Provider/Cloudflare.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+import asyncio
+import json
+import uuid
+
+from ..typing import AsyncResult, Messages, Cookies
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop
+from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
+from ..errors import ResponseStatusError
+
+class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Cloudflare AI"
+ url = "https://playground.ai.cloudflare.com"
+ api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
+ models_url = "https://playground.ai.cloudflare.com/api/models"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+ default_model = "@cf/meta/llama-3.1-8b-instruct"
+ model_aliases = {
+ "llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
+ "llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
+ "llama-3-8b": "@cf/meta/llama-3-8b-instruct",
+ "llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq",
+ "llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct",
+ "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq",
+ "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
+ "llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct",
+ "qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
+ }
+ _args: dict = None
+
+ @classmethod
+ def get_models(cls) -> str:
+ if not cls.models:
+ if cls._args is None:
+ get_running_loop(check_nested=True)
+ args = get_args_from_nodriver(cls.url, cookies={
+ '__cf_bm': uuid.uuid4().hex,
+ })
+ cls._args = asyncio.run(args)
+ with Session(**cls._args) as session:
+ response = session.get(cls.models_url)
+ cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
+ try:
+ raise_for_status(response)
+ except ResponseStatusError as e:
+ cls._args = None
+ raise e
+ json_data = response.json()
+ cls.models = [model.get("name") for model in json_data.get("models")]
+ return cls.models
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ max_tokens: int = 2048,
+ cookies: Cookies = None,
+ timeout: int = 300,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ if cls._args is None:
+ cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies)
+ data = {
+ "messages": messages,
+ "lora": None,
+ "model": model,
+ "max_tokens": max_tokens,
+ "stream": True
+ }
+ async with StreamSession(**cls._args) as session:
+ async with session.post(
+ cls.api_endpoint,
+ json=data,
+ ) as response:
+ cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
+ try:
+ await raise_for_status(response)
+ except ResponseStatusError as e:
+ cls._args = None
+ raise e
+ async for line in response.iter_lines():
+ if line.startswith(b'data: '):
+ if line == b'data: [DONE]':
+ break
+ try:
+ content = json.loads(line[6:].decode())
+ if content.get("response") and content.get("response") != '</s>':
+ yield content['response']
+ except Exception:
+ continue \ No newline at end of file
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
new file mode 100644
index 00000000..f10202bf
--- /dev/null
+++ b/g4f/Provider/Copilot.py
@@ -0,0 +1,156 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from http.cookiejar import CookieJar
+from urllib.parse import quote
+
+try:
+ from curl_cffi.requests import Session, CurlWsFlag
+ has_curl_cffi = True
+except ImportError:
+ has_curl_cffi = False
+try:
+ import nodriver
+ has_nodriver = True
+except ImportError:
+ has_nodriver = False
+try:
+ from platformdirs import user_config_dir
+ has_platformdirs = True
+except ImportError:
+ has_platformdirs = False
+
+from .base_provider import AbstractProvider, BaseConversation
+from .helper import format_prompt
+from ..typing import CreateResult, Messages
+from ..errors import MissingRequirementsError
+from ..requests.raise_for_status import raise_for_status
+from .. import debug
+
+class Conversation(BaseConversation):
+ conversation_id: str
+ cookie_jar: CookieJar
+ access_token: str
+
+ def __init__(self, conversation_id: str, cookie_jar: CookieJar, access_token: str = None):
+ self.conversation_id = conversation_id
+ self.cookie_jar = cookie_jar
+ self.access_token = access_token
+
+class Copilot(AbstractProvider):
+ label = "Microsoft Copilot"
+ url = "https://copilot.microsoft.com"
+ working = True
+ supports_stream = True
+
+ websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
+ conversation_url = f"{url}/c/api/conversations"
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ timeout: int = 900,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
+ **kwargs
+ ) -> CreateResult:
+ if not has_curl_cffi:
+ raise MissingRequirementsError('Install or update "curl_cffi" package | pip install -U curl_cffi')
+
+ websocket_url = cls.websocket_url
+ access_token = None
+ headers = None
+ cookies = conversation.cookie_jar if conversation is not None else None
+ if cls.needs_auth:
+ if conversation is None or conversation.access_token is None:
+ access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
+ else:
+ access_token = conversation.access_token
+ websocket_url = f"{websocket_url}&acessToken={quote(access_token)}"
+ headers = {"Authorization": f"Bearer {access_token}"}
+
+ with Session(
+ timeout=timeout,
+ proxy=proxy,
+ impersonate="chrome",
+ headers=headers,
+ cookies=cookies
+ ) as session:
+ response = session.get(f"{cls.url}/")
+ raise_for_status(response)
+ if conversation is None:
+ response = session.post(cls.conversation_url)
+ raise_for_status(response)
+ conversation_id = response.json().get("id")
+ if return_conversation:
+ yield Conversation(conversation_id, session.cookies.jar, access_token)
+ prompt = format_prompt(messages)
+ if debug.logging:
+ print(f"Copilot: Created conversation: {conversation_id}")
+ else:
+ conversation_id = conversation.conversation_id
+ prompt = messages[-1]["content"]
+ if debug.logging:
+ print(f"Copilot: Use conversation: {conversation_id}")
+
+ wss = session.ws_connect(cls.websocket_url)
+ wss.send(json.dumps({
+ "event": "send",
+ "conversationId": conversation_id,
+ "content": [{
+ "type": "text",
+ "text": prompt,
+ }],
+ "mode": "chat"
+ }).encode(), CurlWsFlag.TEXT)
+ while True:
+ try:
+ msg = json.loads(wss.recv()[0])
+ except:
+ break
+ if msg.get("event") == "appendText":
+ yield msg.get("text")
+ elif msg.get("event") in ["done", "partCompleted"]:
+ break
+
+ @classmethod
+ async def get_access_token_and_cookies(cls, proxy: str = None):
+ if not has_nodriver:
+ raise MissingRequirementsError('Install "nodriver" package | pip install -U nodriver')
+ if has_platformdirs:
+ user_data_dir = user_config_dir("g4f-nodriver")
+ else:
+ user_data_dir = None
+ if debug.logging:
+ print(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
+ browser = await nodriver.start(
+ user_data_dir=user_data_dir,
+ browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
+ )
+ page = await browser.get(cls.url)
+ while True:
+ access_token = await page.evaluate("""
+ (() => {
+ for (var i = 0; i < localStorage.length; i++) {
+ try {
+ item = JSON.parse(localStorage.getItem(localStorage.key(i)));
+ if (item.credentialType == "AccessToken") {
+ return item.secret;
+ }
+ } catch(e) {}
+ }
+ })()
+ """)
+ if access_token:
+ break
+ asyncio.sleep(1)
+ cookies = {}
+ for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
+ cookies[c.name] = c.value
+ await page.close()
+ return access_token, cookies \ No newline at end of file
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index 1eae7b39..c4be0ea8 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -2,98 +2,109 @@ from __future__ import annotations
import json
import aiohttp
-from aiohttp import ClientSession
+from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
from .helper import format_prompt
+from ..requests.aiohttp import get_connector
+from ..requests.raise_for_status import raise_for_status
+from .. import debug
+MODELS = [
+ {"model":"gpt-4o","modelName":"GPT-4o","modelVariant":None,"modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"4"},
+ {"model":"gpt-4o-mini","modelName":"GPT-4o","modelVariant":"mini","modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"3"},
+ {"model":"claude-3-5-sonnet-20240620","modelName":"Claude 3.5","modelVariant":"Sonnet","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"7"},
+ {"model":"claude-3-opus-20240229","modelName":"Claude 3","modelVariant":"Opus","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"2"},
+ {"model":"claude-3-haiku-20240307","modelName":"Claude 3","modelVariant":"Haiku","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"1"},
+ {"model":"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo","modelName":"Llama 3.1","modelVariant":"70B","modelStyleId":"llama-3","createdBy":"Meta","moderationLevel":"MEDIUM","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"5"},
+ {"model":"mistralai/Mixtral-8x7B-Instruct-v0.1","modelName":"Mixtral","modelVariant":"8x7B","modelStyleId":"mixtral","createdBy":"Mistral AI","moderationLevel":"LOW","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"6"}
+]
+
+class Conversation(BaseConversation):
+ vqd: str = None
+ message_history: Messages = []
+
+ def __init__(self, model: str):
+ self.model = model
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://duckduckgo.com"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
working = True
- supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o-mini"
- models = [
- "gpt-4o-mini",
- "claude-3-haiku-20240307",
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
- ]
+ models = [model.get("model") for model in MODELS]
model_aliases = {
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
- "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"
+ "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "gpt-4": "gpt-4o-mini"
}
@classmethod
- def get_model(cls, model: str) -> str:
- return cls.model_aliases.get(model, model) if model in cls.model_aliases else cls.default_model
-
- @classmethod
- async def get_vqd(cls):
+ async def get_vqd(cls, proxy: str, connector: BaseConnector = None):
status_url = "https://duckduckgo.com/duckchat/v1/status"
-
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'Accept': 'text/event-stream',
'x-vqd-accept': '1'
}
-
- async with aiohttp.ClientSession() as session:
- try:
- async with session.get(status_url, headers=headers) as response:
- if response.status == 200:
- return response.headers.get("x-vqd-4")
- else:
- print(f"Error: Status code {response.status}")
- return None
- except Exception as e:
- print(f"Error getting VQD: {e}")
- return None
+ async with aiohttp.ClientSession(connector=get_connector(connector, proxy)) as session:
+ async with session.get(status_url, headers=headers) as response:
+ await raise_for_status(response)
+ return response.headers.get("x-vqd-4")
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- conversation: dict = None,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
proxy: str = None,
+ connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
+ is_new_conversation = False
+ if conversation is None:
+ conversation = Conversation(model)
+ is_new_conversation = True
+ debug.last_model = model
+ if conversation.vqd is None:
+ conversation.vqd = await cls.get_vqd(proxy, connector)
+ if not conversation.vqd:
+ raise Exception("Failed to obtain VQD token")
+
headers = {
'accept': 'text/event-stream',
'content-type': 'application/json',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
+ 'x-vqd-4': conversation.vqd,
}
-
- vqd = conversation.get('vqd') if conversation else await cls.get_vqd()
- if not vqd:
- raise Exception("Failed to obtain VQD token")
-
- headers['x-vqd-4'] = vqd
-
- if conversation:
- message_history = conversation.get('messages', [])
- message_history.append({"role": "user", "content": format_prompt(messages)})
- else:
- message_history = [{"role": "user", "content": format_prompt(messages)}]
-
- async with ClientSession(headers=headers) as session:
+ async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
+ if is_new_conversation:
+ conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
+ else:
+ conversation.message_history = [
+ *conversation.message_history,
+ messages[-2],
+ messages[-1]
+ ]
+ if return_conversation:
+ yield conversation
data = {
- "model": model,
- "messages": message_history
+ "model": conversation.model,
+ "messages": conversation.message_history
}
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
+ async with session.post(cls.api_endpoint, json=data) as response:
+ conversation.vqd = response.headers.get("x-vqd-4")
+ await raise_for_status(response)
async for line in response.content:
if line:
decoded_line = line.decode('utf-8')
@@ -106,4 +117,4 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
if 'message' in json_data:
yield json_data['message']
except json.JSONDecodeError:
- pass
+ pass \ No newline at end of file
diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py
new file mode 100644
index 00000000..06e2bd55
--- /dev/null
+++ b/g4f/Provider/DarkAI.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://darkai.foundation/chat"
+ api_endpoint = "https://darkai.foundation/chat"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3-405b'
+ models = [
+ 'gpt-4o', # Uncensored
+ 'gpt-3.5-turbo', # Uncensored
+ 'llama-3-70b', # Uncensored
+ default_model,
+ ]
+
+ model_aliases = {
+ "llama-3.1-70b": "llama-3-70b",
+ "llama-3.1-405b": "llama-3-405b",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "text/event-stream",
+ "content-type": "application/json",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "query": prompt,
+ "model": model,
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_text = ""
+ async for chunk in response.content:
+ if chunk:
+ try:
+ chunk_str = chunk.decode().strip()
+ if chunk_str.startswith('data: '):
+ chunk_data = json.loads(chunk_str[6:])
+ if chunk_data['event'] == 'text-chunk':
+ full_text += chunk_data['data']['text']
+ elif chunk_data['event'] == 'stream-end':
+ if full_text:
+ yield full_text.strip()
+ return
+ except json.JSONDecodeError:
+ pass
+ except Exception:
+ pass
+
+ if full_text:
+ yield full_text.strip()
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
index b8cc6ab8..d8cb072a 100644
--- a/g4f/Provider/DeepInfraChat.py
+++ b/g4f/Provider/DeepInfraChat.py
@@ -4,10 +4,7 @@ from aiohttp import ClientSession
import json
from ..typing import AsyncResult, Messages, ImageType
-from ..image import to_data_uri
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com/chat"
@@ -17,42 +14,18 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
- default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct'
+ default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
models = [
- 'meta-llama/Meta-Llama-3.1-405B-Instruct',
- 'meta-llama/Meta-Llama-3.1-70B-Instruct',
'meta-llama/Meta-Llama-3.1-8B-Instruct',
- 'mistralai/Mixtral-8x22B-Instruct-v0.1',
- 'mistralai/Mixtral-8x7B-Instruct-v0.1',
+ default_model,
'microsoft/WizardLM-2-8x22B',
- 'microsoft/WizardLM-2-7B',
- 'Qwen/Qwen2-72B-Instruct',
- 'microsoft/Phi-3-medium-4k-instruct',
- 'google/gemma-2-27b-it',
- 'openbmb/MiniCPM-Llama3-V-2_5', # Image upload is available
- 'mistralai/Mistral-7B-Instruct-v0.3',
- 'lizpreciatior/lzlv_70b_fp16_hf',
- 'openchat/openchat-3.6-8b',
- 'Phind/Phind-CodeLlama-34B-v2',
- 'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
+ 'Qwen/Qwen2.5-72B-Instruct',
]
model_aliases = {
- "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct",
- "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
- "llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
- "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
- "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
+ "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
- "wizardlm-2-7b": "microsoft/WizardLM-2-7B",
- "qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
- "phi-3-medium-4k": "microsoft/Phi-3-medium-4k-instruct",
- "gemma-2b-27b": "google/gemma-2-27b-it",
- "minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5", # Image upload is available
- "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
- "lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
- "openchat-3.6-8b": "openchat/openchat-3.6-8b",
- "phind-codellama-34b-v2": "Phind/Phind-CodeLlama-34B-v2",
- "dolphin-2.9.1-llama-3-70b": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
+ "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
}
@@ -97,30 +70,12 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
}
async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
data = {
'model': model,
- 'messages': [
- {'role': 'system', 'content': 'Be a helpful assistant'},
- {'role': 'user', 'content': prompt}
- ],
+ 'messages': messages,
'stream': True
}
- if model == 'openbmb/MiniCPM-Llama3-V-2_5' and image is not None:
- data['messages'][-1]['content'] = [
- {
- 'type': 'image_url',
- 'image_url': {
- 'url': to_data_uri(image)
- }
- },
- {
- 'type': 'text',
- 'text': messages[-1]['content']
- }
- ]
-
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
diff --git a/g4f/Provider/Free2GPT.py b/g4f/Provider/Free2GPT.py
index a79bd1da..6ba9ac0f 100644
--- a/g4f/Provider/Free2GPT.py
+++ b/g4f/Provider/Free2GPT.py
@@ -16,7 +16,7 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat10.free2gpt.xyz"
working = True
supports_message_history = True
- default_model = 'llama-3.1-70b'
+ default_model = 'mistral-7b'
@classmethod
async def create_async_generator(
@@ -49,12 +49,8 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
- system_message = {
- "role": "system",
- "content": ""
- }
data = {
- "messages": [system_message] + messages,
+ "messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
deleted file mode 100644
index a9dc0f56..00000000
--- a/g4f/Provider/FreeChatgpt.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from __future__ import annotations
-import json
-from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://chat.chatgpt.org.uk"
- api_endpoint = "/api/openai/v1/chat/completions"
- working = True
- default_model = '@cf/qwen/qwen1.5-14b-chat-awq'
- models = [
- '@cf/qwen/qwen1.5-14b-chat-awq',
- 'SparkDesk-v1.1',
- 'Qwen2-7B-Instruct',
- 'glm4-9B-chat',
- 'chatglm3-6B',
- 'Yi-1.5-9B-Chat',
- ]
-
- model_aliases = {
- "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
- "sparkdesk-v1.1": "SparkDesk-v1.1",
- "qwen-2-7b": "Qwen2-7B-Instruct",
- "glm-4-9b": "glm4-9B-chat",
- "glm-3-6b": "chatglm3-6B",
- "yi-1.5-9b": "Yi-1.5-9B-Chat",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model.lower() in cls.model_aliases:
- return cls.model_aliases[model.lower()]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "application/json, text/event-stream",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json",
- "dnt": "1",
- "origin": cls.url,
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
- }
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "messages": [
- {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"},
- {"role": "user", "content": prompt}
- ],
- "stream": True,
- "model": model,
- "temperature": 0.5,
- "presence_penalty": 0,
- "frequency_penalty": 0,
- "top_p": 1
- }
- async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
- response.raise_for_status()
- accumulated_text = ""
- async for line in response.content:
- if line:
- line_str = line.decode().strip()
- if line_str == "data: [DONE]":
- yield accumulated_text
- break
- elif line_str.startswith("data: "):
- try:
- chunk = json.loads(line_str[6:])
- delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
- accumulated_text += delta_content
- yield delta_content # Yield each chunk of content
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 82a3824b..b38ff428 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -24,7 +24,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
- default_model = 'llama-3.1-70b'
+ default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py
new file mode 100644
index 00000000..f00b344e
--- /dev/null
+++ b/g4f/Provider/GizAI.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://app.giz.ai/assistant"
+ api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
+ working = True
+ supports_stream = False
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'chat-gemini-flash'
+ models = [default_model]
+
+ model_aliases = {"gemini-flash": "chat-gemini-flash",}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Accept': 'application/json, text/plain, */*',
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Content-Type': 'application/json',
+ 'DNT': '1',
+ 'Origin': 'https://app.giz.ai',
+ 'Pragma': 'no-cache',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"'
+ }
+
+ prompt = format_prompt(messages)
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": model,
+ "input": {
+ "messages": [{"type": "human", "content": prompt}],
+ "mode": "plan"
+ },
+ "noStream": True
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ if response.status == 201:
+ result = await response.json()
+ yield result['output'].strip()
+ else:
+ raise Exception(f"Unexpected response status: {response.status}")
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index e6f70bed..f8e6a8dd 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -1,9 +1,16 @@
from __future__ import annotations
-import json, requests, re
+import json
+import requests
-from curl_cffi import requests as cf_reqs
+try:
+ from curl_cffi.requests import Session
+ has_curl_cffi = True
+except ImportError:
+ has_curl_cffi = False
from ..typing import CreateResult, Messages
+from ..errors import MissingRequirementsError
+from ..requests.raise_for_status import raise_for_status
from .base_provider import ProviderModelMixin, AbstractProvider
from .helper import format_prompt
@@ -12,35 +19,32 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
working = True
supports_stream = True
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
-
+
models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/Qwen2.5-72B-Instruct',
+ 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
+ 'Qwen/Qwen2.5-Coder-32B-Instruct',
+ 'meta-llama/Llama-3.2-11B-Vision-Instruct',
'NousResearch/Hermes-3-Llama-3.1-8B',
'mistralai/Mistral-Nemo-Instruct-2407',
'microsoft/Phi-3.5-mini-instruct',
]
-
+
model_aliases = {
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
+ "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
+ "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct",
+ "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
"mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
"phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",
}
@classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
def create_completion(
cls,
model: str,
@@ -48,10 +52,12 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
stream: bool,
**kwargs
) -> CreateResult:
+ if not has_curl_cffi:
+ raise MissingRequirementsError('Install "curl_cffi" package | pip install -U curl_cffi')
model = cls.get_model(model)
if model in cls.models:
- session = cf_reqs.Session()
+ session = Session()
session.headers = {
'accept': '*/*',
'accept-language': 'en',
@@ -68,21 +74,41 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
}
-
- print(model)
json_data = {
'model': model,
}
-
response = session.post('https://huggingface.co/chat/conversation', json=json_data)
- conversationId = response.json()['conversationId']
+ raise_for_status(response)
+
+ conversationId = response.json().get('conversationId')
+
+ # Get the data response and parse it properly
+ response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11')
+ raise_for_status(response)
- response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11',)
+ # Split the response content by newlines and parse each line as JSON
+ try:
+ json_data = None
+ for line in response.text.split('\n'):
+ if line.strip():
+ try:
+ parsed = json.loads(line)
+ if isinstance(parsed, dict) and "nodes" in parsed:
+ json_data = parsed
+ break
+ except json.JSONDecodeError:
+ continue
+
+ if not json_data:
+ raise RuntimeError("Failed to parse response data")
- data: list = (response.json())["nodes"][1]["data"]
- keys: list[int] = data[data[0]["messages"]]
- message_keys: dict = data[keys[0]]
- messageId: str = data[message_keys["id"]]
+ data: list = json_data["nodes"][1]["data"]
+ keys: list[int] = data[data[0]["messages"]]
+ message_keys: dict = data[keys[0]]
+ messageId: str = data[message_keys["id"]]
+
+ except (KeyError, IndexError, TypeError) as e:
+ raise RuntimeError(f"Failed to extract message ID: {str(e)}")
settings = {
"inputs": format_prompt(messages),
@@ -114,28 +140,37 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'data': (None, json.dumps(settings, separators=(',', ':'))),
}
- response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}',
+ response = requests.post(
+ f'https://huggingface.co/chat/conversation/{conversationId}',
cookies=session.cookies,
headers=headers,
files=files,
)
+ raise_for_status(response)
- first_token = True
+ full_response = ""
for line in response.iter_lines():
- line = json.loads(line)
+ if not line:
+ continue
+ try:
+ line = json.loads(line)
+ except json.JSONDecodeError as e:
+ print(f"Failed to decode JSON: {line}, error: {e}")
+ continue
if "type" not in line:
raise RuntimeError(f"Response: {line}")
elif line["type"] == "stream":
- token = line["token"]
- if first_token:
- token = token.lstrip().replace('\u0000', '')
- first_token = False
- else:
- token = token.replace('\u0000', '')
-
- yield token
+ token = line["token"].replace('\u0000', '')
+ full_response += token
+ if stream:
+ yield token
elif line["type"] == "finalAnswer":
break
+
+ full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip()
+
+ if not stream:
+ yield full_response \ No newline at end of file
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index b292020e..fc50bdee 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -54,6 +54,15 @@ models = {
"tokenLimit": 126000,
"context": "128K",
},
+ "grok-beta": {
+ "id": "grok-beta",
+ "name": "Grok-Beta",
+ "model": "Grok",
+ "provider": "x.ai",
+ "maxLength": 400000,
+ "tokenLimit": 100000,
+ "context": "100K",
+ },
"grok-2": {
"id": "grok-2",
"name": "Grok-2",
@@ -90,18 +99,18 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
- "claude-3-opus-20240229-gcp": {
- "id": "claude-3-opus-20240229-gcp",
- "name": "Claude-3-Opus-Gcp",
+ "claude-3-5-sonnet-20240620": {
+ "id": "claude-3-5-sonnet-20240620",
+ "name": "Claude-3.5-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
- "claude-3-5-sonnet-20240620": {
- "id": "claude-3-5-sonnet-20240620",
- "name": "Claude-3.5-Sonnet",
+ "claude-3-5-sonnet-20241022": {
+ "id": "claude-3-5-sonnet-20241022",
+ "name": "Claude-3.5-Sonnet-V2",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
@@ -161,8 +170,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
- supports_gpt_4 = True
- default_model = "gpt-3.5-turbo"
+ default_model = "gpt-4o-2024-08-06"
models = list(models.keys())
model_aliases = {
@@ -171,13 +179,13 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4o": "gpt-4o-2024-08-06",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
- "gpt-4": "gpt-4-0613",
+ "gpt-4": "gpt-4o-mini-free",
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
- "claude-3-opus": "claude-3-opus-20240229-gcp",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-2.1": "claude-2.1",
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
deleted file mode 100644
index 1b568e80..00000000
--- a/g4f/Provider/LiteIcoding.py
+++ /dev/null
@@ -1,130 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
-import re
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://lite.icoding.ink"
- api_endpoint = "/api/v1/gpt/message"
- working = True
- supports_gpt_4 = True
- default_model = "gpt-4o"
- models = [
- 'gpt-4o',
- 'gpt-4-turbo',
- 'claude-3',
- 'claude-3.5',
- 'gemini-1.5',
- ]
-
- model_aliases = {
- "gpt-4o-mini": "gpt-4o",
- "gemini-pro": "gemini-1.5",
- }
-
- bearer_tokens = [
- "aa3020ee873e40cb8b3f515a0708ebc4",
- "5d69cd271b144226ac1199b3c849a566",
- "62977f48a95844f8853a953679401850",
- "d815b091959e42dd8b7871dfaf879485"
- ]
- current_token_index = 0
-
- @classmethod
- def get_next_bearer_token(cls):
- token = cls.bearer_tokens[cls.current_token_index]
- cls.current_token_index = (cls.current_token_index + 1) % len(cls.bearer_tokens)
- return token
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- bearer_token = cls.get_next_bearer_token()
- headers = {
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.9",
- "Authorization": f"Bearer {bearer_token}",
- "Connection": "keep-alive",
- "Content-Type": "application/json;charset=utf-8",
- "DNT": "1",
- "Origin": cls.url,
- "Referer": f"{cls.url}/",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": (
- "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
- "Chrome/126.0.0.0 Safari/537.36"
- ),
- "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- }
-
- data = {
- "model": model,
- "chatId": "-1",
- "messages": [
- {
- "role": msg["role"],
- "content": msg["content"],
- "time": msg.get("time", ""),
- "attachments": msg.get("attachments", []),
- }
- for msg in messages
- ],
- "plugins": [],
- "systemPrompt": "",
- "temperature": 0.5,
- }
-
- async with ClientSession(headers=headers) as session:
- try:
- async with session.post(
- f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy
- ) as response:
- response.raise_for_status()
- buffer = ""
- full_response = ""
- def decode_content(data):
- bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()])
- return bytes_array.decode('utf-8')
- async for chunk in response.content.iter_any():
- if chunk:
- buffer += chunk.decode()
- while "\n\n" in buffer:
- part, buffer = buffer.split("\n\n", 1)
- if part.startswith("data: "):
- content = part[6:].strip()
- if content and content != "[DONE]":
- content = content.strip('"')
- decoded_content = decode_content(content)
- full_response += decoded_content
- full_response = (
- full_response.replace('""', '')
- .replace('" "', ' ')
- .replace("\\n\\n", "\n\n")
- .replace("\\n", "\n")
- .replace('\\"', '"')
- .strip()
- )
- filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL)
- cleaned_response = filtered_response.strip().strip('"')
- yield cleaned_response
-
- except ClientResponseError as e:
- raise RuntimeError(
- f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}"
- ) from e
-
- except Exception as e:
- raise RuntimeError(f"Unexpected error: {str(e)}") from e
diff --git a/g4f/Provider/MagickPen.py b/g4f/Provider/MagickPen.py
index b6a47417..7f1751dd 100644
--- a/g4f/Provider/MagickPen.py
+++ b/g4f/Provider/MagickPen.py
@@ -14,7 +14,6 @@ class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://magickpen.com"
api_endpoint = "https://api.magickpen.com/ask"
working = True
- supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
@@ -24,21 +23,18 @@ class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod
async def fetch_api_credentials(cls) -> tuple:
- url = "https://magickpen.com/_nuxt/9e47cd7579e60a9d1f13.js"
+ url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
async with ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
- # Extract the necessary values from the file
pattern = r'"X-API-Secret":"(\w+)"'
match = re.search(pattern, text)
X_API_SECRET = match.group(1) if match else None
- # Generate timestamp and nonce
- timestamp = str(int(time.time() * 1000)) # in milliseconds
+ timestamp = str(int(time.time() * 1000))
nonce = str(random.random())
- # Generate the signature
s = ["TGDBU9zCgM", timestamp, nonce]
s.sort()
signature_string = ''.join(s)
diff --git a/g4f/Provider/Mhystical.py b/g4f/Provider/Mhystical.py
new file mode 100644
index 00000000..2aa98ebc
--- /dev/null
+++ b/g4f/Provider/Mhystical.py
@@ -0,0 +1,90 @@
+from __future__ import annotations
+
+import json
+import logging
+from aiohttp import ClientSession
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+"""
+ Mhystical.cc
+ ~~~~~~~~~~~~
+ Author: NoelP.dev
+ Last Updated: 2024-05-11
+
+ Author Site: https://noelp.dev
+ Provider Site: https://mhystical.cc
+
+"""
+
+logger = logging.getLogger(__name__)
+
+class Mhystical(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.mhystical.cc"
+ api_endpoint = "https://api.mhystical.cc/v1/completions"
+ working = True
+ supports_stream = False # Set to False, as streaming is not specified in ChatifyAI
+ supports_system_message = False
+ supports_message_history = True
+
+ default_model = 'gpt-4'
+ models = [default_model]
+ model_aliases = {}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases.get(model, cls.default_model)
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "x-api-key": "mhystical",
+ "Content-Type": "application/json",
+ "accept": "*/*",
+ "cache-control": "no-cache",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": model,
+ "messages": [{"role": "user", "content": format_prompt(messages)}]
+ }
+ async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
+ if response.status == 400:
+ yield "Error: API key is missing"
+ elif response.status == 429:
+ yield "Error: Rate limit exceeded"
+ elif response.status == 500:
+ yield "Error: Internal server error"
+ else:
+ response.raise_for_status()
+ response_text = await response.text()
+ filtered_response = cls.filter_response(response_text)
+ yield filtered_response
+
+ @staticmethod
+ def filter_response(response_text: str) -> str:
+ try:
+ json_response = json.loads(response_text)
+ message_content = json_response["choices"][0]["message"]["content"]
+ return message_content
+ except (KeyError, IndexError, json.JSONDecodeError) as e:
+ logger.error("Error parsing response: %s", e)
+ return "Error: Failed to parse response from API."
diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py
deleted file mode 100644
index 33e794f6..00000000
--- a/g4f/Provider/Nexra.py
+++ /dev/null
@@ -1,118 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-from .nexra.NexraBing import NexraBing
-from .nexra.NexraChatGPT import NexraChatGPT
-from .nexra.NexraChatGPT4o import NexraChatGPT4o
-from .nexra.NexraChatGPTWeb import NexraChatGPTWeb
-from .nexra.NexraGeminiPro import NexraGeminiPro
-from .nexra.NexraImageURL import NexraImageURL
-from .nexra.NexraLlama import NexraLlama
-from .nexra.NexraQwen import NexraQwen
-
-class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://nexra.aryahcr.cc"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
- default_model = 'gpt-3.5-turbo'
- image_model = 'sdxl-turbo'
-
- models = (
- *NexraBing.models,
- *NexraChatGPT.models,
- *NexraChatGPT4o.models,
- *NexraChatGPTWeb.models,
- *NexraGeminiPro.models,
- *NexraImageURL.models,
- *NexraLlama.models,
- *NexraQwen.models,
- )
-
- model_to_provider = {
- **{model: NexraChatGPT for model in NexraChatGPT.models},
- **{model: NexraChatGPT4o for model in NexraChatGPT4o.models},
- **{model: NexraChatGPTWeb for model in NexraChatGPTWeb.models},
- **{model: NexraGeminiPro for model in NexraGeminiPro.models},
- **{model: NexraImageURL for model in NexraImageURL.models},
- **{model: NexraLlama for model in NexraLlama.models},
- **{model: NexraQwen for model in NexraQwen.models},
- **{model: NexraBing for model in NexraBing.models},
- }
-
- model_aliases = {
- "gpt-4": "gpt-4-0613",
- "gpt-4": "gpt-4-32k",
- "gpt-4": "gpt-4-0314",
- "gpt-4": "gpt-4-32k-0314",
-
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
-
- "gpt-3": "text-davinci-003",
- "gpt-3": "text-davinci-002",
- "gpt-3": "code-davinci-002",
- "gpt-3": "text-curie-001",
- "gpt-3": "text-babbage-001",
- "gpt-3": "text-ada-001",
- "gpt-3": "text-ada-001",
- "gpt-3": "davinci",
- "gpt-3": "curie",
- "gpt-3": "babbage",
- "gpt-3": "ada",
- "gpt-3": "babbage-002",
- "gpt-3": "davinci-002",
-
- "gpt-4": "gptweb",
-
- "gpt-4": "Bing (Balanced)",
- "gpt-4": "Bing (Creative)",
- "gpt-4": "Bing (Precise)",
-
- "dalle-2": "dalle2",
- "sdxl": "sdxl-turbo",
- }
-
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def get_api_endpoint(cls, model: str) -> str:
- provider_class = cls.model_to_provider.get(model)
-
- if provider_class:
- return provider_class.api_endpoint
- raise ValueError(f"API endpoint for model {model} not found.")
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
- api_endpoint = cls.get_api_endpoint(model)
-
- provider_class = cls.model_to_provider.get(model)
-
- if provider_class:
- async for response in provider_class.create_async_generator(model, messages, proxy, **kwargs):
- yield response
- else:
- raise ValueError(f"Provider for model {model} not found.")
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index b776e96a..b3119cb6 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -21,6 +21,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"llama-3.1-sonar-small-128k-chat",
"llama-3.1-8b-instruct",
"llama-3.1-70b-instruct",
+ "/models/LiquidCloud",
]
model_aliases = {
@@ -30,6 +31,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"sonar-chat": "llama-3.1-sonar-small-128k-chat",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"llama-3.1-70b": "llama-3.1-70b-instruct",
+ "lfm-40b": "/models/LiquidCloud",
}
@classmethod
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 266647ba..6aabe7b1 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -2,19 +2,21 @@ from __future__ import annotations
import json
-from ..typing import CreateResult, Messages
-from .base_provider import AbstractProvider, format_prompt
-from ..requests import Session, get_session_from_browser, raise_for_status
+from ..typing import AsyncResult, Messages, Cookies
+from .base_provider import AsyncGeneratorProvider, format_prompt
+from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
-class Pi(AbstractProvider):
+class Pi(AsyncGeneratorProvider):
url = "https://pi.ai/talk"
working = True
supports_stream = True
- _session = None
default_model = "pi"
+ models = [default_model]
+ _headers: dict = None
+ _cookies: Cookies = {}
@classmethod
- def create_completion(
+ async def create_async_generator(
cls,
model: str,
messages: Messages,
@@ -22,49 +24,52 @@ class Pi(AbstractProvider):
proxy: str = None,
timeout: int = 180,
conversation_id: str = None,
- webdriver: WebDriver = None,
**kwargs
- ) -> CreateResult:
- if cls._session is None:
- cls._session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout)
- if not conversation_id:
- conversation_id = cls.start_conversation(cls._session)
- prompt = format_prompt(messages)
- else:
- prompt = messages[-1]["content"]
- answer = cls.ask(cls._session, prompt, conversation_id)
- for line in answer:
- if "text" in line:
- yield line["text"]
-
+ ) -> AsyncResult:
+ if cls._headers is None:
+ args = await get_args_from_nodriver(cls.url, proxy=proxy, timeout=timeout)
+ cls._cookies = args.get("cookies", {})
+ cls._headers = args.get("headers")
+ async with StreamSession(headers=cls._headers, cookies=cls._cookies, proxy=proxy) as session:
+ if not conversation_id:
+ conversation_id = await cls.start_conversation(session)
+ prompt = format_prompt(messages)
+ else:
+ prompt = messages[-1]["content"]
+ answer = cls.ask(session, prompt, conversation_id)
+ async for line in answer:
+ if "text" in line:
+ yield line["text"]
+
@classmethod
- def start_conversation(cls, session: Session) -> str:
- response = session.post('https://pi.ai/api/chat/start', data="{}", headers={
+ async def start_conversation(cls, session: StreamSession) -> str:
+ async with session.post('https://pi.ai/api/chat/start', data="{}", headers={
'accept': 'application/json',
'x-api-version': '3'
- })
- raise_for_status(response)
- return response.json()['conversations'][0]['sid']
+ }) as response:
+ await raise_for_status(response)
+ return (await response.json())['conversations'][0]['sid']
- def get_chat_history(session: Session, conversation_id: str):
+ async def get_chat_history(session: StreamSession, conversation_id: str):
params = {
'conversation': conversation_id,
}
- response = session.get('https://pi.ai/api/chat/history', params=params)
- raise_for_status(response)
- return response.json()
+ async with session.get('https://pi.ai/api/chat/history', params=params) as response:
+ await raise_for_status(response)
+ return await response.json()
- def ask(session: Session, prompt: str, conversation_id: str):
+ @classmethod
+ async def ask(cls, session: StreamSession, prompt: str, conversation_id: str):
json_data = {
'text': prompt,
'conversation': conversation_id,
'mode': 'BASE',
}
- response = session.post('https://pi.ai/api/chat', json=json_data, stream=True)
- raise_for_status(response)
- for line in response.iter_lines():
- if line.startswith(b'data: {"text":'):
- yield json.loads(line.split(b'data: ')[1])
- elif line.startswith(b'data: {"title":'):
- yield json.loads(line.split(b'data: ')[1])
-
+ async with session.post('https://pi.ai/api/chat', json=json_data) as response:
+ await raise_for_status(response)
+ cls._cookies = merge_cookies(cls._cookies, response)
+ async for line in response.iter_lines():
+ if line.startswith(b'data: {"text":'):
+ yield json.loads(line.split(b'data: ')[1])
+ elif line.startswith(b'data: {"title":'):
+ yield json.loads(line.split(b'data: ')[1])
diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py
index 47cb135c..6513bd34 100644
--- a/g4f/Provider/Pizzagpt.py
+++ b/g4f/Provider/Pizzagpt.py
@@ -12,7 +12,6 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.pizzagpt.it"
api_endpoint = "/api/chatx-completion"
working = True
- supports_gpt_4 = True
default_model = 'gpt-4o-mini'
@classmethod
diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py
index dd87a34c..fcebf7e3 100644
--- a/g4f/Provider/Prodia.py
+++ b/g4f/Provider/Prodia.py
@@ -14,10 +14,10 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
- models = [
+ image_models = [
'3Guofeng3_v34.safetensors [50f420de]',
'absolutereality_V16.safetensors [37db0fc3]',
- 'absolutereality_v181.safetensors [3d9d4d2b]',
+ default_model,
'amIReal_V41.safetensors [0a8a2e61]',
'analog-diffusion-1.0.ckpt [9ca13f02]',
'aniverse_v30.safetensors [579e6f85]',
@@ -81,6 +81,7 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
'timeless-1.0.ckpt [7c4971d4]',
'toonyou_beta6.safetensors [980f6b15]',
]
+ models = [*image_models]
@classmethod
def get_model(cls, model: str) -> str:
@@ -97,6 +98,12 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ negative_prompt: str = "",
+ steps: str = 20, # 1-25
+ cfg: str = 7, # 0-20
+ seed: str = "-1",
+ sampler: str = "DPM++ 2M Karras", # "Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM"
+ aspect_ratio: str = "square", # "square", "portrait", "landscape"
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
@@ -116,12 +123,12 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
"new": "true",
"prompt": prompt,
"model": model,
- "negative_prompt": kwargs.get("negative_prompt", ""),
- "steps": kwargs.get("steps", 20),
- "cfg": kwargs.get("cfg", 7),
- "seed": kwargs.get("seed", int(time.time())),
- "sampler": kwargs.get("sampler", "DPM++ 2M Karras"),
- "aspect_ratio": kwargs.get("aspect_ratio", "square")
+ "negative_prompt": negative_prompt,
+ "steps": steps,
+ "cfg": cfg,
+ "seed": seed,
+ "sampler": sampler,
+ "aspect_ratio": aspect_ratio
}
async with session.get(cls.api_endpoint, params=params, proxy=proxy) as response:
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
index 7f443a7d..a7fc9b54 100644
--- a/g4f/Provider/ReplicateHome.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -17,7 +17,13 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
- default_model = 'meta/meta-llama-3-70b-instruct'
+ default_model = 'yorickvp/llava-13b'
+
+ image_models = [
+ 'stability-ai/stable-diffusion-3',
+ 'bytedance/sdxl-lightning-4step',
+ 'playgroundai/playground-v2.5-1024px-aesthetic',
+ ]
text_models = [
'meta/meta-llama-3-70b-instruct',
@@ -26,35 +32,31 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
'yorickvp/llava-13b',
]
- image_models = [
- 'black-forest-labs/flux-schnell',
- 'stability-ai/stable-diffusion-3',
- 'bytedance/sdxl-lightning-4step',
- 'playgroundai/playground-v2.5-1024px-aesthetic',
- ]
+
models = text_models + image_models
model_aliases = {
- "flux-schnell": "black-forest-labs/flux-schnell",
+ # image_models
"sd-3": "stability-ai/stable-diffusion-3",
"sdxl": "bytedance/sdxl-lightning-4step",
"playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic",
- "llama-3-70b": "meta/meta-llama-3-70b-instruct",
- "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1",
+
+ # text_models
"gemma-2b": "google-deepmind/gemma-2b-it",
"llava-13b": "yorickvp/llava-13b",
}
model_versions = {
- "meta/meta-llama-3-70b-instruct": "fbfb20b472b2f3bdd101412a9f70a0ed4fc0ced78a77ff00970ee7a2383c575d",
- "mistralai/mixtral-8x7b-instruct-v0.1": "5d78bcd7a992c4b793465bcdcf551dc2ab9668d12bb7aa714557a21c1e77041c",
- "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
- "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
- 'black-forest-labs/flux-schnell': "f2ab8a5bfe79f02f0789a146cf5e73d2a4ff2684a98c2b303d1e1ff3814271db",
+ # image_models
'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f",
'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f",
'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
+
+ # text_models
+ "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
+ "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
+
}
@classmethod
diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py
new file mode 100644
index 00000000..7e76d558
--- /dev/null
+++ b/g4f/Provider/RubiksAI.py
@@ -0,0 +1,162 @@
+from __future__ import annotations
+
+import asyncio
+import aiohttp
+import random
+import string
+import json
+from urllib.parse import urlencode
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Rubiks AI"
+ url = "https://rubiks.ai"
+ api_endpoint = "https://rubiks.ai/search/api.php"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3.1-70b-versatile'
+ models = [default_model, 'gpt-4o-mini']
+
+ model_aliases = {
+ "llama-3.1-70b": "llama-3.1-70b-versatile",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @staticmethod
+ def generate_mid() -> str:
+ """
+ Generates a 'mid' string following the pattern:
+ 6 characters - 4 characters - 4 characters - 4 characters - 12 characters
+ Example: 0r7v7b-quw4-kdy3-rvdu-ekief6xbuuq4
+ """
+ parts = [
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=12))
+ ]
+ return '-'.join(parts)
+
+ @staticmethod
+ def create_referer(q: str, mid: str, model: str = '') -> str:
+ """
+ Creates a Referer URL with dynamic q and mid values, using urlencode for safe parameter encoding.
+ """
+ params = {'q': q, 'model': model, 'mid': mid}
+ encoded_params = urlencode(params)
+ return f'https://rubiks.ai/search/?{encoded_params}'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ websearch: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ """
+ Creates an asynchronous generator that sends requests to the Rubiks AI API and yields the response.
+
+ Parameters:
+ - model (str): The model to use in the request.
+ - messages (Messages): The messages to send as a prompt.
+ - proxy (str, optional): Proxy URL, if needed.
+ - websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False.
+ """
+ model = cls.get_model(model)
+ prompt = format_prompt(messages)
+ q_value = prompt
+ mid_value = cls.generate_mid()
+ referer = cls.create_referer(q=q_value, mid=mid_value, model=model)
+
+ url = cls.api_endpoint
+ params = {
+ 'q': q_value,
+ 'model': model,
+ 'id': '',
+ 'mid': mid_value
+ }
+
+ headers = {
+ 'Accept': 'text/event-stream',
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Pragma': 'no-cache',
+ 'Referer': referer,
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"'
+ }
+
+ try:
+ timeout = aiohttp.ClientTimeout(total=None)
+ async with ClientSession(timeout=timeout) as session:
+ async with session.get(url, headers=headers, params=params, proxy=proxy) as response:
+ if response.status != 200:
+ yield f"Request ended with status code {response.status}"
+ return
+
+ assistant_text = ''
+ sources = []
+
+ async for line in response.content:
+ decoded_line = line.decode('utf-8').strip()
+ if not decoded_line.startswith('data: '):
+ continue
+ data = decoded_line[6:]
+ if data in ('[DONE]', '{"done": ""}'):
+ break
+ try:
+ json_data = json.loads(data)
+ except json.JSONDecodeError:
+ continue
+
+ if 'url' in json_data and 'title' in json_data:
+ if websearch:
+ sources.append({'title': json_data['title'], 'url': json_data['url']})
+
+ elif 'choices' in json_data:
+ for choice in json_data['choices']:
+ delta = choice.get('delta', {})
+ content = delta.get('content', '')
+ role = delta.get('role', '')
+ if role == 'assistant':
+ continue
+ assistant_text += content
+
+ if websearch and sources:
+ sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)])
+ assistant_text += f"\n\n**Source:**\n{sources_text}"
+
+ yield assistant_text
+
+ except asyncio.CancelledError:
+ yield "The request was cancelled."
+ except aiohttp.ClientError as e:
+ yield f"An error occurred during the request: {e}"
+ except Exception as e:
+ yield f"An unexpected error occurred: {e}"
diff --git a/g4f/Provider/TeachAnything.py b/g4f/Provider/TeachAnything.py
index 3d34293f..97fe0272 100644
--- a/g4f/Provider/TeachAnything.py
+++ b/g4f/Provider/TeachAnything.py
@@ -14,6 +14,17 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
api_endpoint = "/api/generate"
working = True
default_model = "llama-3.1-70b"
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
@classmethod
async def create_async_generator(
@@ -24,6 +35,7 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs: Any
) -> AsyncResult:
headers = cls._get_headers()
+ model = cls.get_model(model)
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
@@ -61,16 +73,18 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
return {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
"content-type": "application/json",
"dnt": "1",
"origin": "https://www.teach-anything.com",
+ "pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://www.teach-anything.com/",
- "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-us": '"Not?A_Brand";v="99", "Chromium";v="130"',
+ "sec-ch-us-mobile": "?0",
+ "sec-ch-us-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}
diff --git a/g4f/Provider/Upstage.py b/g4f/Provider/Upstage.py
index 85d3a63e..81234ed9 100644
--- a/g4f/Provider/Upstage.py
+++ b/g4f/Provider/Upstage.py
@@ -19,8 +19,8 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
'solar-pro',
]
model_aliases = {
- "solar-1-mini": "upstage/solar-1-mini-chat",
- "solar-1-mini": "upstage/solar-1-mini-chat-ja",
+ "solar-mini": "upstage/solar-1-mini-chat",
+ "solar-mini": "upstage/solar-1-mini-chat-ja",
}
@classmethod
@@ -41,35 +41,51 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
+
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
"content-type": "application/json",
+ "dnt": "1",
"origin": "https://console.upstage.ai",
+ "pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://console.upstage.ai/",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}
+
async with ClientSession(headers=headers) as session:
data = {
"stream": True,
"messages": [{"role": "user", "content": format_prompt(messages)}],
"model": model
}
+
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
+
+ response_text = ""
+
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
+
if line.startswith("data: ") and line != "data: [DONE]":
- data = json.loads(line[6:])
- content = data['choices'][0]['delta'].get('content', '')
- if content:
- yield content
+ try:
+ data = json.loads(line[6:])
+ content = data['choices'][0]['delta'].get('content', '')
+ if content:
+ response_text += content
+ yield content
+ except json.JSONDecodeError:
+ continue
+
+ if line == "data: [DONE]":
+ break
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index af8aab0e..02735038 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -17,8 +17,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
label = "You.com"
url = "https://you.com"
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
default_model = "gpt-4o-mini"
default_vision_model = "agent"
image_models = ["dall-e"]
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 82cb9ff2..faf9979e 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -5,63 +5,41 @@ from ..providers.retry_provider import RetryProvider, IterListProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
-from .deprecated import *
-from .selenium import *
-from .needs_auth import *
+from .deprecated import *
+from .selenium import *
+from .needs_auth import *
+from .not_working import *
+from .local import *
-from .AI365VIP import AI365VIP
-from .AIChatFree import AIChatFree
-from .Allyfy import Allyfy
-from .AiChatOnline import AiChatOnline
-from .AiChats import AiChats
+from .AIUncensored import AIUncensored
from .Airforce import Airforce
-from .Aura import Aura
+from .AmigoChat import AmigoChat
from .Bing import Bing
-from .BingCreateImages import BingCreateImages
-from .Binjie import Binjie
-from .Bixin123 import Bixin123
from .Blackbox import Blackbox
-from .ChatGot import ChatGot
from .ChatGpt import ChatGpt
-from .Chatgpt4Online import Chatgpt4Online
-from .Chatgpt4o import Chatgpt4o
from .ChatGptEs import ChatGptEs
-from .ChatgptFree import ChatgptFree
-from .ChatHub import ChatHub
+from .Cloudflare import Cloudflare
+from .Copilot import Copilot
+from .DarkAI import DarkAI
from .DDG import DDG
-from .DeepInfra import DeepInfra
from .DeepInfraChat import DeepInfraChat
-from .DeepInfraImage import DeepInfraImage
-from .FlowGpt import FlowGpt
from .Free2GPT import Free2GPT
-from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
-from .FreeNetfly import FreeNetfly
-from .GeminiPro import GeminiPro
-from .GigaChat import GigaChat
-from .GPROChat import GPROChat
+from .GizAI import GizAI
from .HuggingChat import HuggingChat
-from .HuggingFace import HuggingFace
-from .Koala import Koala
from .Liaobots import Liaobots
-from .LiteIcoding import LiteIcoding
-from .Local import Local
from .MagickPen import MagickPen
-from .MetaAI import MetaAI
-#from .MetaAIAccount import MetaAIAccount
-from .Nexra import Nexra
-from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .Prodia import Prodia
from .Reka import Reka
-from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
+from .RubiksAI import RubiksAI
from .TeachAnything import TeachAnything
from .Upstage import Upstage
-from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
+from .Mhystical import Mhystical
import sys
diff --git a/g4f/Provider/airforce/AirforceChat.py b/g4f/Provider/airforce/AirforceChat.py
new file mode 100644
index 00000000..e94dd0a8
--- /dev/null
+++ b/g4f/Provider/airforce/AirforceChat.py
@@ -0,0 +1,174 @@
+from __future__ import annotations
+import re
+import json
+import requests
+from aiohttp import ClientSession
+from typing import List
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+# Helper function to clean the response
+def clean_response(text: str) -> str:
+ """Clean response from unwanted patterns."""
+ patterns = [
+ r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
+ r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
+ r"Rate limit \(\d+\/hour\) exceeded\. Join our discord for more: https:\/\/discord\.com\/invite\/\S+",
+ r"</s>", # zephyr-7b-beta
+ r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", # Matches [ERROR] 'UUID'
+ ]
+ for pattern in patterns:
+ text = re.sub(pattern, '', text)
+
+ # Remove the <|im_end|> token if present
+ text = text.replace("<|im_end|>", "").strip()
+
+ return text
+
+def split_message(message: str, max_length: int = 1000) -> List[str]:
+ """Splits the message into chunks of a given length (max_length)"""
+ # Split the message into smaller chunks to avoid exceeding the limit
+ chunks = []
+ while len(message) > max_length:
+ # Find the last space or punctuation before max_length to avoid cutting words
+ split_point = message.rfind(' ', 0, max_length)
+ if split_point == -1: # No space found, split at max_length
+ split_point = max_length
+ chunks.append(message[:split_point])
+ message = message[split_point:].strip()
+ if message:
+ chunks.append(message) # Append the remaining part of the message
+ return chunks
+
+class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "AirForce Chat"
+ api_endpoint = "https://api.airforce/chat/completions"
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3.1-70b-chat'
+
+ @classmethod
+ def get_models(cls) -> list:
+ if not cls.models:
+ response = requests.get('https://api.airforce/models')
+ data = response.json()
+ cls.models = [model['id'] for model in data['data']]
+
+ model_aliases = {
+ # openchat
+ "openchat-3.5": "openchat-3.5-0106",
+
+ # deepseek-ai
+ "deepseek-coder": "deepseek-coder-6.7b-instruct",
+
+ # NousResearch
+ "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+ "hermes-2-pro": "hermes-2-pro-mistral-7b",
+
+ # teknium
+ "openhermes-2.5": "openhermes-2.5-mistral-7b",
+
+ # liquid
+ "lfm-40b": "lfm-40b-moe",
+
+ # DiscoResearch
+ "german-7b": "discolm-german-7b-v1",
+
+ # meta-llama
+ "llama-2-7b": "llama-2-7b-chat-int8",
+ "llama-2-7b": "llama-2-7b-chat-fp16",
+ "llama-3.1-70b": "llama-3.1-70b-chat",
+ "llama-3.1-8b": "llama-3.1-8b-chat",
+ "llama-3.1-70b": "llama-3.1-70b-turbo",
+ "llama-3.1-8b": "llama-3.1-8b-turbo",
+
+ # inferless
+ "neural-7b": "neural-chat-7b-v3-1",
+
+ # HuggingFaceH4
+ "zephyr-7b": "zephyr-7b-beta",
+
+ # llmplayground.net
+ #"any-uncensored": "any-uncensored",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ max_tokens: str = 4096,
+ temperature: str = 1,
+ top_p: str = 1,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'authorization': 'Bearer missing api key',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://llmplayground.net',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://llmplayground.net/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ # Format the messages for the API
+ formatted_messages = format_prompt(messages)
+ message_chunks = split_message(formatted_messages)
+
+ full_response = ""
+ for chunk in message_chunks:
+ data = {
+ "messages": [{"role": "user", "content": chunk}],
+ "model": model,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stream": stream
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ text = ""
+ if stream:
+ async for line in response.content:
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ json_str = line[6:]
+ try:
+ if json_str and json_str != "[DONE]":
+ chunk = json.loads(json_str)
+ if 'choices' in chunk and chunk['choices']:
+ content = chunk['choices'][0].get('delta', {}).get('content', '')
+ text += content
+ except json.JSONDecodeError as e:
+ print(f"Error decoding JSON: {json_str}, Error: {e}")
+ elif line == "[DONE]":
+ break
+ full_response += clean_response(text)
+ else:
+ response_json = await response.json()
+ text = response_json["choices"][0]["message"]["content"]
+ full_response += clean_response(text)
+
+ # Return the complete response after all chunks
+ yield full_response
diff --git a/g4f/Provider/airforce/AirforceImage.py b/g4f/Provider/airforce/AirforceImage.py
new file mode 100644
index 00000000..b74bc364
--- /dev/null
+++ b/g4f/Provider/airforce/AirforceImage.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from urllib.parse import urlencode
+import random
+import requests
+
+from ...typing import AsyncResult, Messages
+from ...image import ImageResponse
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class AirforceImage(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Airforce Image"
+ #url = "https://api.airforce"
+ api_endpoint = "https://api.airforce/imagine2"
+ #working = True
+
+ default_model = 'flux'
+
+ response = requests.get('https://api.airforce/imagine/models')
+ data = response.json()
+
+ image_models = data
+
+ models = [*image_models, "stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"]
+
+ model_aliases = {
+ "sdxl": "stable-diffusion-xl-base",
+ "sdxl": "stable-diffusion-xl-lightning",
+ "flux-pro": "Flux-1.1-Pro",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ size: str = '1:1', # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1"
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'dnt': '1',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://llmplayground.net/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'image',
+ 'sec-fetch-mode': 'no-cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ seed = random.randint(0, 58463)
+ params = {
+ 'model': model,
+ 'prompt': messages[-1]["content"],
+ 'size': size,
+ 'seed': seed
+ }
+ full_url = f"{cls.api_endpoint}?{urlencode(params)}"
+
+ async with session.get(full_url, headers=headers, proxy=proxy) as response:
+ if response.status == 200 and response.headers.get('content-type', '').startswith('image'):
+ yield ImageResponse(images=[full_url], alt="Generated Image")
+ else:
+ raise Exception(f"Error: status {response.status}, content type {response.headers.get('content-type')}")
diff --git a/g4f/Provider/airforce/__init__.py b/g4f/Provider/airforce/__init__.py
new file mode 100644
index 00000000..5ffa6d31
--- /dev/null
+++ b/g4f/Provider/airforce/__init__.py
@@ -0,0 +1,2 @@
+from .AirforceChat import AirforceChat
+from .AirforceImage import AirforceImage
diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py
index 7a08ddfe..45ba30b6 100644
--- a/g4f/Provider/bing/create_images.py
+++ b/g4f/Provider/bing/create_images.py
@@ -132,7 +132,7 @@ async def create_images(session: ClientSession, prompt: str, timeout: int = TIME
redirect_url = response.headers["Location"].replace("&nfy=1", "")
redirect_url = f"{BING_URL}{redirect_url}"
- request_id = redirect_url.split("id=")[1]
+ request_id = redirect_url.split("id=")[-1]
async with session.get(redirect_url) as response:
response.raise_for_status()
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index bf923f2a..368a71a0 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -25,11 +25,10 @@ from .Aichat import Aichat
from .Berlin import Berlin
from .Phind import Phind
from .AiAsk import AiAsk
-from ..AiChatOnline import AiChatOnline
from .ChatAnywhere import ChatAnywhere
from .FakeGpt import FakeGpt
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
from .Ylokh import Ylokh
-from .OpenAssistant import OpenAssistant \ No newline at end of file
+from .OpenAssistant import OpenAssistant
diff --git a/g4f/Provider/Local.py b/g4f/Provider/local/Local.py
index 471231c6..4dc6e3f9 100644
--- a/g4f/Provider/Local.py
+++ b/g4f/Provider/local/Local.py
@@ -1,15 +1,15 @@
from __future__ import annotations
-from ..locals.models import get_models
+from ...locals.models import get_models
try:
- from ..locals.provider import LocalProvider
+ from ...locals.provider import LocalProvider
has_requirements = True
except ImportError:
has_requirements = False
-from ..typing import Messages, CreateResult
-from ..providers.base_provider import AbstractProvider, ProviderModelMixin
-from ..errors import MissingRequirementsError
+from ...typing import Messages, CreateResult
+from ...providers.base_provider import AbstractProvider, ProviderModelMixin
+from ...errors import MissingRequirementsError
class Local(AbstractProvider, ProviderModelMixin):
label = "GPT4All"
@@ -40,4 +40,4 @@ class Local(AbstractProvider, ProviderModelMixin):
messages,
stream,
**kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/local/Ollama.py
index a44aaacd..de68a218 100644
--- a/g4f/Provider/Ollama.py
+++ b/g4f/Provider/local/Ollama.py
@@ -1,11 +1,12 @@
from __future__ import annotations
import requests
+import os
-from .needs_auth.Openai import Openai
-from ..typing import AsyncResult, Messages
+from ..needs_auth.OpenaiAPI import OpenaiAPI
+from ...typing import AsyncResult, Messages
-class Ollama(Openai):
+class Ollama(OpenaiAPI):
label = "Ollama"
url = "https://ollama.com"
needs_auth = False
@@ -14,9 +15,11 @@ class Ollama(Openai):
@classmethod
def get_models(cls):
if not cls.models:
- url = 'http://127.0.0.1:11434/api/tags'
+ host = os.getenv("OLLAMA_HOST", "127.0.0.1")
+ port = os.getenv("OLLAMA_PORT", "11434")
+ url = f"http://{host}:{port}/api/tags"
models = requests.get(url).json()["models"]
- cls.models = [model['name'] for model in models]
+ cls.models = [model["name"] for model in models]
cls.default_model = cls.models[0]
return cls.models
@@ -25,9 +28,13 @@ class Ollama(Openai):
cls,
model: str,
messages: Messages,
- api_base: str = "http://localhost:11434/v1",
+ api_base: str = None,
**kwargs
) -> AsyncResult:
+ if not api_base:
+ host = os.getenv("OLLAMA_HOST", "localhost")
+ port = os.getenv("OLLAMA_PORT", "11434")
+ api_base: str = f"http://{host}:{port}/v1"
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/local/__init__.py b/g4f/Provider/local/__init__.py
new file mode 100644
index 00000000..05f6022e
--- /dev/null
+++ b/g4f/Provider/local/__init__.py
@@ -0,0 +1,2 @@
+from .Local import Local
+from .Ollama import Ollama
diff --git a/g4f/Provider/BingCreateImages.py b/g4f/Provider/needs_auth/BingCreateImages.py
index 7a206c8f..80984d40 100644
--- a/g4f/Provider/BingCreateImages.py
+++ b/g4f/Provider/needs_auth/BingCreateImages.py
@@ -1,11 +1,11 @@
from __future__ import annotations
-from ..cookies import get_cookies
-from ..image import ImageResponse
-from ..errors import MissingAuthError
-from ..typing import AsyncResult, Messages, Cookies
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .bing.create_images import create_images, create_session
+from ...cookies import get_cookies
+from ...image import ImageResponse
+from ...errors import MissingAuthError
+from ...typing import AsyncResult, Messages, Cookies
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..bing.create_images import create_images, create_session
class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
label = "Microsoft Designer in Bing"
diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py
new file mode 100644
index 00000000..fa43867e
--- /dev/null
+++ b/g4f/Provider/needs_auth/CopilotAccount.py
@@ -0,0 +1,8 @@
+from __future__ import annotations
+
+from ..Copilot import Copilot
+
+class CopilotAccount(Copilot):
+ needs_auth = True
+ parent = "Copilot"
+ default_model = "" \ No newline at end of file
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py
index b12fb254..35e7ca7f 100644
--- a/g4f/Provider/DeepInfra.py
+++ b/g4f/Provider/needs_auth/DeepInfra.py
@@ -1,10 +1,10 @@
from __future__ import annotations
import requests
-from ..typing import AsyncResult, Messages
-from .needs_auth.Openai import Openai
+from ...typing import AsyncResult, Messages
+from .OpenaiAPI import OpenaiAPI
-class DeepInfra(Openai):
+class DeepInfra(OpenaiAPI):
label = "DeepInfra"
url = "https://deepinfra.com"
working = True
@@ -55,4 +55,4 @@ class DeepInfra(Openai):
max_tokens=max_tokens,
headers=headers,
**kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py
index cee608ce..2310c1c8 100644
--- a/g4f/Provider/DeepInfraImage.py
+++ b/g4f/Provider/needs_auth/DeepInfraImage.py
@@ -2,10 +2,10 @@ from __future__ import annotations
import requests
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..typing import AsyncResult, Messages
-from ..requests import StreamSession, raise_for_status
-from ..image import ImageResponse
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession, raise_for_status
+from ...image import ImageResponse
class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com"
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 8d741476..781aa410 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -6,24 +6,20 @@ import random
import re
from aiohttp import ClientSession, BaseConnector
-
-from ..helper import get_connector
-
try:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
+ import nodriver
+ has_nodriver = True
except ImportError:
- pass
+ has_nodriver = False
from ... import debug
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
from ..base_provider import AsyncGeneratorProvider, BaseConversation
from ..helper import format_prompt, get_cookies
from ...requests.raise_for_status import raise_for_status
-from ...errors import MissingAuthError, MissingRequirementsError
+from ...requests.aiohttp import get_connector
+from ...errors import MissingAuthError
from ...image import ImageResponse, to_bytes
-from ...webdriver import get_browser, get_driver_cookies
REQUEST_HEADERS = {
"authority": "gemini.google.com",
@@ -57,15 +53,16 @@ class Gemini(AsyncGeneratorProvider):
default_model = 'gemini'
image_models = ["gemini"]
default_vision_model = "gemini"
+ models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"]
_cookies: Cookies = None
_snlm0e: str = None
_sid: str = None
@classmethod
async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]:
- try:
- import nodriver as uc
- except ImportError:
+ if not has_nodriver:
+ if debug.logging:
+ print("Skip nodriver login in Gemini provider")
return
try:
from platformdirs import user_config_dir
@@ -74,7 +71,7 @@ class Gemini(AsyncGeneratorProvider):
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
- browser = await uc.start(
+ browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
)
@@ -91,30 +88,6 @@ class Gemini(AsyncGeneratorProvider):
cls._cookies = cookies
@classmethod
- async def webdriver_login(cls, proxy: str) -> AsyncIterator[str]:
- driver = None
- try:
- driver = get_browser(proxy=proxy)
- try:
- driver.get(f"{cls.url}/app")
- WebDriverWait(driver, 5).until(
- EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))
- )
- except:
- login_url = os.environ.get("G4F_LOGIN_URL")
- if login_url:
- yield f"Please login: [Google Gemini]({login_url})\n\n"
- WebDriverWait(driver, 240).until(
- EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))
- )
- cls._cookies = get_driver_cookies(driver)
- except MissingRequirementsError:
- pass
- finally:
- if driver:
- driver.close()
-
- @classmethod
async def create_async_generator(
cls,
model: str,
@@ -142,9 +115,6 @@ class Gemini(AsyncGeneratorProvider):
if not cls._snlm0e:
async for chunk in cls.nodriver_login(proxy):
yield chunk
- if cls._cookies is None:
- async for chunk in cls.webdriver_login(proxy):
- yield chunk
if not cls._snlm0e:
if cls._cookies is None or "__Secure-1PSID" not in cls._cookies:
raise MissingAuthError('Missing "__Secure-1PSID" cookie')
@@ -210,20 +180,23 @@ class Gemini(AsyncGeneratorProvider):
yield content[last_content_len:]
last_content_len = len(content)
if image_prompt:
- images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
- if response_format == "b64_json":
- yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
- else:
- resolved_images = []
- preview = []
- for image in images:
- async with client.get(image, allow_redirects=False) as fetch:
- image = fetch.headers["location"]
- async with client.get(image, allow_redirects=False) as fetch:
- image = fetch.headers["location"]
- resolved_images.append(image)
- preview.append(image.replace('=s512', '=s200'))
- yield ImageResponse(resolved_images, image_prompt, {"orginal_links": images, "preview": preview})
+ try:
+ images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
+ if response_format == "b64_json":
+ yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
+ else:
+ resolved_images = []
+ preview = []
+ for image in images:
+ async with client.get(image, allow_redirects=False) as fetch:
+ image = fetch.headers["location"]
+ async with client.get(image, allow_redirects=False) as fetch:
+ image = fetch.headers["location"]
+ resolved_images.append(image)
+ preview.append(image.replace('=s512', '=s200'))
+ yield ImageResponse(resolved_images, image_prompt, {"orginal_links": images, "preview": preview})
+ except TypeError:
+ pass
def build_request(
prompt: str,
diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py
index 06bf69ee..a7f1e0aa 100644
--- a/g4f/Provider/GeminiPro.py
+++ b/g4f/Provider/needs_auth/GeminiPro.py
@@ -4,11 +4,11 @@ import base64
import json
from aiohttp import ClientSession, BaseConnector
-from ..typing import AsyncResult, Messages, ImageType
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import to_bytes, is_accepted_format
-from ..errors import MissingAuthError
-from .helper import get_connector
+from ...typing import AsyncResult, Messages, ImageType
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import to_bytes, is_accepted_format
+from ...errors import MissingAuthError
+from ..helper import get_connector
class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
label = "Gemini API"
@@ -16,9 +16,9 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
needs_auth = True
- default_model = "gemini-1.5-pro-latest"
+ default_model = "gemini-1.5-pro"
default_vision_model = default_model
- models = [default_model, "gemini-pro", "gemini-pro-vision", "gemini-1.5-flash"]
+ models = [default_model, "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
@classmethod
async def create_async_generator(
@@ -104,4 +104,8 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
lines.append(chunk)
else:
data = await response.json()
- yield data["candidates"][0]["content"]["parts"][0]["text"] \ No newline at end of file
+ candidate = data["candidates"][0]
+ if candidate["finishReason"] == "STOP":
+ yield candidate["content"]["parts"][0]["text"]
+ else:
+ yield candidate["finishReason"] + ' ' + candidate["safetyRatings"] \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py
index d11f6a82..943fc81a 100644
--- a/g4f/Provider/needs_auth/Groq.py
+++ b/g4f/Provider/needs_auth/Groq.py
@@ -1,14 +1,33 @@
from __future__ import annotations
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages
-class Groq(Openai):
+class Groq(OpenaiAPI):
label = "Groq"
url = "https://console.groq.com/playground"
working = True
default_model = "mixtral-8x7b-32768"
- models = ["mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"]
+ models = [
+ "distil-whisper-large-v3-en",
+ "gemma2-9b-it",
+ "gemma-7b-it",
+ "llama3-groq-70b-8192-tool-use-preview",
+ "llama3-groq-8b-8192-tool-use-preview",
+ "llama-3.1-70b-versatile",
+ "llama-3.1-8b-instant",
+ "llama-3.2-1b-preview",
+ "llama-3.2-3b-preview",
+ "llama-3.2-11b-vision-preview",
+ "llama-3.2-90b-vision-preview",
+ "llama-guard-3-8b",
+ "llava-v1.5-7b-4096-preview",
+ "llama3-70b-8192",
+ "llama3-8b-8192",
+ "mixtral-8x7b-32768",
+ "whisper-large-v3",
+ "whisper-large-v3-turbo",
+ ]
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
@classmethod
@@ -21,4 +40,4 @@ class Groq(Openai):
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py
index 586e5f5f..35270e60 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/needs_auth/HuggingFace.py
@@ -1,15 +1,13 @@
from __future__ import annotations
import json
-from aiohttp import ClientSession, BaseConnector
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_connector
-from ..errors import RateLimitError, ModelNotFoundError
-from ..requests.raise_for_status import raise_for_status
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...errors import ModelNotFoundError
+from ...requests import StreamSession, raise_for_status
-from .HuggingChat import HuggingChat
+from ..HuggingChat import HuggingChat
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
@@ -21,22 +19,12 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = HuggingChat.model_aliases
@classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
proxy: str = None,
- connector: BaseConnector = None,
api_base: str = "https://api-inference.huggingface.co",
api_key: str = None,
max_new_tokens: int = 1024,
@@ -62,7 +50,6 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
-
params = {
"return_full_text": False,
"max_new_tokens": max_new_tokens,
@@ -70,10 +57,9 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
}
payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
-
- async with ClientSession(
+ async with StreamSession(
headers=headers,
- connector=get_connector(connector, proxy)
+ proxy=proxy
) as session:
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
if response.status == 404:
@@ -81,7 +67,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
await raise_for_status(response)
if stream:
first = True
- async for line in response.content:
+ async for line in response.iter_lines():
if line.startswith(b"data:"):
data = json.loads(line[5:])
if not data["token"]["special"]:
@@ -89,7 +75,8 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
if first:
first = False
chunk = chunk.lstrip()
- yield chunk
+ if chunk:
+ yield chunk
else:
yield (await response.json())[0]["generated_text"].strip()
@@ -101,4 +88,4 @@ def format_prompt(messages: Messages) -> str:
for idx, message in enumerate(messages)
if message["role"] == "assistant"
])
- return f"{history}<s>[INST] {question} [/INST]"
+ return f"{history}<s>[INST] {question} [/INST]" \ No newline at end of file
diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py
index 218b7ebb..568de701 100644
--- a/g4f/Provider/MetaAI.py
+++ b/g4f/Provider/needs_auth/MetaAI.py
@@ -8,12 +8,12 @@ from typing import Dict, List
from aiohttp import ClientSession, BaseConnector
-from ..typing import AsyncResult, Messages, Cookies
-from ..requests import raise_for_status, DEFAULT_HEADERS
-from ..image import ImageResponse, ImagePreview
-from ..errors import ResponseError
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt, get_connector, format_cookies
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests import raise_for_status, DEFAULT_HEADERS
+from ...image import ImageResponse, ImagePreview
+from ...errors import ResponseError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, get_connector, format_cookies
class Sources():
def __init__(self, link_list: List[Dict[str, str]]) -> None:
@@ -79,7 +79,6 @@ class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
self.access_token = None
if self.access_token is None and cookies is None:
await self.update_access_token()
-
if self.access_token is None:
url = "https://www.meta.ai/api/graphql/"
payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
@@ -128,6 +127,8 @@ class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
json_line = json.loads(line)
except json.JSONDecodeError:
continue
+ if json_line.get("errors"):
+ raise RuntimeError("\n".join([error.get("message") for error in json_line.get("errors")]))
bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {})
streaming_state = bot_response_message.get("streaming_state")
fetch_id = bot_response_message.get("fetch_id") or fetch_id
diff --git a/g4f/Provider/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py
index 369b3f2f..0a586006 100644
--- a/g4f/Provider/MetaAIAccount.py
+++ b/g4f/Provider/needs_auth/MetaAIAccount.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-from ..typing import AsyncResult, Messages, Cookies
-from .helper import format_prompt, get_cookies
+from ...typing import AsyncResult, Messages, Cookies
+from ..helper import format_prompt, get_cookies
from .MetaAI import MetaAI
class MetaAIAccount(MetaAI):
@@ -20,4 +20,4 @@ class MetaAIAccount(MetaAI):
) -> AsyncResult:
cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies
async for chunk in cls(proxy).prompt(format_prompt(messages), cookies):
- yield chunk \ No newline at end of file
+ yield chunk
diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py
deleted file mode 100644
index 5e0bf336..00000000
--- a/g4f/Provider/needs_auth/OpenRouter.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from __future__ import annotations
-
-import requests
-
-from .Openai import Openai
-from ...typing import AsyncResult, Messages
-
-class OpenRouter(Openai):
- label = "OpenRouter"
- url = "https://openrouter.ai"
- working = False
- default_model = "mistralai/mistral-7b-instruct:free"
-
- @classmethod
- def get_models(cls):
- if not cls.models:
- url = 'https://openrouter.ai/api/v1/models'
- models = requests.get(url).json()["data"]
- cls.models = [model['id'] for model in models]
- return cls.models
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = "https://openrouter.ai/api/v1",
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, **kwargs
- )
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/OpenaiAPI.py
index 382ebada..116b5f6f 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -9,7 +9,7 @@ from ...requests import StreamSession, raise_for_status
from ...errors import MissingAuthError, ResponseError
from ...image import to_data_uri
-class Openai(AsyncGeneratorProvider, ProviderModelMixin):
+class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI API"
url = "https://platform.openai.com"
working = True
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index f02121e3..13e15f1d 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -1,37 +1,39 @@
from __future__ import annotations
+import re
import asyncio
import uuid
import json
import base64
import time
+import requests
from aiohttp import ClientWebSocketResponse
from copy import copy
try:
- import webview
- has_webview = True
+ import nodriver
+ from nodriver.cdp.network import get_response_body
+ has_nodriver = True
except ImportError:
- has_webview = False
-
+ has_nodriver = False
try:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
+ from platformdirs import user_config_dir
+ has_platformdirs = True
except ImportError:
- pass
+ has_platformdirs = False
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ...webdriver import get_browser
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
-from ...requests import get_args_from_browser, raise_for_status
+from ...requests.raise_for_status import raise_for_status
from ...requests.aiohttp import StreamSession
from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
from ...errors import MissingAuthError, ResponseError
from ...providers.conversation import BaseConversation
from ..helper import format_cookies
-from ..openai.har_file import getArkoseAndAccessToken, NoValidHarFileError
+from ..openai.har_file import get_request_config, NoValidHarFileError
+from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, backend_anon_url
from ..openai.proofofwork import generate_proof_token
+from ..openai.new import get_requirements_token
from ... import debug
DEFAULT_HEADERS = {
@@ -55,24 +57,33 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
+ needs_auth = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
- default_model = None
+ default_model = "auto"
default_vision_model = "gpt-4o"
- models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"]
-
- model_aliases = {
- #"gpt-4-turbo": "gpt-4",
- #"gpt-4": "gpt-4-gizmo",
- #"dalle": "gpt-4",
- }
+ fallback_models = ["auto", "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
+ vision_models = fallback_models
+
_api_key: str = None
_headers: dict = None
_cookies: Cookies = None
_expires: int = None
@classmethod
+ def get_models(cls):
+ if not cls.models:
+ try:
+ response = requests.get(f"{cls.url}/backend-anon/models")
+ response.raise_for_status()
+ data = response.json()
+ cls.models = [model.get("slug") for model in data.get("models")]
+ except Exception:
+ cls.models = cls.fallback_models
+ return cls.models
+
+ @classmethod
async def create(
cls,
prompt: str = None,
@@ -195,7 +206,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
async with session.get(url, headers=headers) as response:
cls._update_request_args(session)
if response.status == 401:
- raise MissingAuthError('Add a "api_key" or a .har file' if cls._api_key is None else "Invalid api key")
+ raise MissingAuthError('Add a .har file for OpenaiChat' if cls._api_key is None else "Invalid api key")
await raise_for_status(response)
data = await response.json()
if "categories" in data:
@@ -218,9 +229,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""
# Create a message object with the user role and the content
messages = [{
- "id": str(uuid.uuid4()),
"author": {"role": message["role"]},
"content": {"content_type": "text", "parts": [message["content"]]},
+ "id": str(uuid.uuid4()),
+ "create_time": int(time.time()),
+ "id": str(uuid.uuid4()),
+ "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}}
} for message in messages]
# Check if there is an image response
@@ -249,7 +263,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return messages
@classmethod
- async def get_generated_image(cls, session: StreamSession, headers: dict, line: dict) -> ImageResponse:
+ async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict) -> ImageResponse:
"""
Retrieves the image response based on the message content.
@@ -268,15 +282,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises:
RuntimeError: If there'san error in downloading the image, including issues with the HTTP request or response.
"""
- if "parts" not in line["message"]["content"]:
- return
- first_part = line["message"]["content"]["parts"][0]
- if "asset_pointer" not in first_part or "metadata" not in first_part:
- return
- if first_part["metadata"] is None or first_part["metadata"]["dalle"] is None:
- return
- prompt = first_part["metadata"]["dalle"]["prompt"]
- file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
+ prompt = element["metadata"]["dalle"]["prompt"]
+ file_id = element["asset_pointer"].split("file-service://", 1)[1]
try:
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
cls._update_request_args(session)
@@ -363,20 +370,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
if cls._expires is not None and cls._expires < time.time():
cls._headers = cls._api_key = None
- arkose_token = None
- proofTokens = None
try:
- arkose_token, api_key, cookies, headers, proofTokens = await getArkoseAndAccessToken(proxy)
- cls._create_request_args(cookies, headers)
- cls._set_api_key(api_key)
+ await get_request_config(proxy)
+ cls._create_request_args(RequestConfig.cookies, RequestConfig.headers)
+ cls._set_api_key(RequestConfig.access_token)
except NoValidHarFileError as e:
- if cls._api_key is None and cls.needs_auth:
- raise e
- cls._create_request_args()
-
- if cls.default_model is None:
- cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
-
+ await cls.nodriver_auth(proxy)
try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
@@ -384,9 +383,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if debug.logging:
print("OpenaiChat: Upload image failed")
print(f"{e.__class__.__name__}: {e}")
-
model = cls.get_model(model)
- model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
if conversation is None:
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
else:
@@ -399,36 +396,32 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
f"{cls.url}/backend-anon/sentinel/chat-requirements"
if cls._api_key is None else
f"{cls.url}/backend-api/sentinel/chat-requirements",
- json={"p": generate_proof_token(True, user_agent=cls._headers["user-agent"], proofTokens=proofTokens)},
+ json={"p": get_requirements_token(RequestConfig.proof_token)},
headers=cls._headers
) as response:
cls._update_request_args(session)
await raise_for_status(response)
- requirements = await response.json()
- text_data = json.loads(requirements.get("text", "{}"))
- need_arkose = text_data.get("turnstile", {}).get("required", False)
- if need_arkose:
- arkose_token = text_data.get("turnstile", {}).get("dx")
- else:
- need_arkose = requirements.get("arkose", {}).get("required", False)
- chat_token = requirements["token"]
-
- if need_arkose and arkose_token is None:
- arkose_token, api_key, cookies, headers, proofTokens = await getArkoseAndAccessToken(proxy)
- cls._create_request_args(cookies, headers)
- cls._set_api_key(api_key)
- if arkose_token is None:
+ chat_requirements = await response.json()
+ need_turnstile = chat_requirements.get("turnstile", {}).get("required", False)
+ need_arkose = chat_requirements.get("arkose", {}).get("required", False)
+ chat_token = chat_requirements.get("token")
+
+ if need_arkose and RequestConfig.arkose_token is None:
+ await get_request_config(proxy)
+ cls._create_request_args(RequestConfig,cookies, RequestConfig.headers)
+ cls._set_api_key(RequestConfig.access_token)
+ if RequestConfig.arkose_token is None:
raise MissingAuthError("No arkose token found in .har file")
- if "proofofwork" in requirements:
+ if "proofofwork" in chat_requirements:
proofofwork = generate_proof_token(
- **requirements["proofofwork"],
+ **chat_requirements["proofofwork"],
user_agent=cls._headers["user-agent"],
- proofTokens=proofTokens
+ proof_token=RequestConfig.proof_token
)
if debug.logging:
print(
- 'Arkose:', False if not need_arkose else arkose_token[:12]+"...",
+ 'Arkose:', False if not need_arkose else RequestConfig.arkose_token[:12]+"...",
'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
)
ws = None
@@ -437,18 +430,20 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
wss_url = (await response.json()).get("wss_url")
if wss_url:
ws = await session.ws_connect(wss_url)
- websocket_request_id = str(uuid.uuid4())
data = {
"action": action,
- "conversation_mode": {"kind": "primary_assistant"},
- "force_paragen": False,
- "force_rate_limit": False,
- "conversation_id": conversation.conversation_id,
+ "messages": None,
"parent_message_id": conversation.message_id,
"model": model,
+ "paragen_cot_summary_display_override": "allow",
"history_and_training_disabled": history_disabled and not auto_continue and not return_conversation,
- "websocket_request_id": websocket_request_id
+ "conversation_mode": {"kind":"primary_assistant"},
+ "websocket_request_id": str(uuid.uuid4()),
+ "supported_encodings": ["v1"],
+ "supports_buffering": True
}
+ if conversation.conversation_id is not None:
+ data["conversation_id"] = conversation.conversation_id
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
data["messages"] = cls.create_messages(messages, image_request)
@@ -457,10 +452,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"Openai-Sentinel-Chat-Requirements-Token": chat_token,
**cls._headers
}
- if need_arkose:
- headers["Openai-Sentinel-Arkose-Token"] = arkose_token
+ if RequestConfig.arkose_token:
+ headers["Openai-Sentinel-Arkose-Token"] = RequestConfig.arkose_token
if proofofwork is not None:
headers["Openai-Sentinel-Proof-Token"] = proofofwork
+ if need_turnstile and RequestConfig.turnstile_token is not None:
+ headers['openai-sentinel-turnstile-token'] = RequestConfig.turnstile_token
async with session.post(
f"{cls.url}/backend-anon/conversation"
if cls._api_key is None else
@@ -509,7 +506,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
fields: Conversation,
ws = None
) -> AsyncIterator:
- last_message: int = 0
async for message in messages:
if message.startswith(b'{"wss_url":'):
message = json.loads(message)
@@ -526,10 +522,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
async for chunk in cls.iter_messages_line(session, message, fields):
if fields.finish_reason is not None:
break
- elif isinstance(chunk, str):
- if len(chunk) > last_message:
- yield chunk[last_message:]
- last_message = len(chunk)
else:
yield chunk
if fields.finish_reason is not None:
@@ -547,148 +539,99 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
line = json.loads(line[6:])
except:
return
- if "message" not in line:
- return
- if "error" in line and line["error"]:
- raise RuntimeError(line["error"])
- if "message_type" not in line["message"]["metadata"]:
- return
- image_response = await cls.get_generated_image(session, cls._headers, line)
- if image_response is not None:
- yield image_response
- if line["message"]["author"]["role"] != "assistant":
- return
- if line["message"]["content"]["content_type"] != "text":
+ if isinstance(line, dict) and "v" in line:
+ v = line.get("v")
+ if isinstance(v, str):
+ yield v
+ elif isinstance(v, list):
+ for m in v:
+ if m.get("p") == "/message/content/parts/0":
+ yield m.get("v")
+ elif m.get("p") == "/message/metadata":
+ fields.finish_reason = m.get("v", {}).get("finish_details", {}).get("type")
+ break
+ elif isinstance(v, dict):
+ if fields.conversation_id is None:
+ fields.conversation_id = v.get("conversation_id")
+ fields.message_id = v.get("message", {}).get("id")
+ c = v.get("message", {}).get("content", {})
+ if c.get("content_type") == "multimodal_text":
+ generated_images = []
+ for element in c.get("parts"):
+ if element.get("content_type") == "image_asset_pointer":
+ generated_images.append(
+ cls.get_generated_image(session, cls._headers, element)
+ )
+ elif element.get("content_type") == "text":
+ for part in element.get("parts", []):
+ yield part
+ for image_response in await asyncio.gather(*generated_images):
+ yield image_response
return
- if line["message"]["metadata"]["message_type"] not in ("next", "continue", "variant"):
- return
- if line["message"]["recipient"] != "all":
- return
- if fields.conversation_id is None:
- fields.conversation_id = line["conversation_id"]
- fields.message_id = line["message"]["id"]
- if "parts" in line["message"]["content"]:
- yield line["message"]["content"]["parts"][0]
- if "finish_details" in line["message"]["metadata"]:
- fields.finish_reason = line["message"]["metadata"]["finish_details"]["type"]
-
- @classmethod
- async def webview_access_token(cls) -> str:
- window = webview.create_window("OpenAI Chat", cls.url)
- await asyncio.sleep(3)
- prompt_input = None
- while not prompt_input:
- try:
- await asyncio.sleep(1)
- prompt_input = window.dom.get_element("#prompt-textarea")
- except:
- ...
- window.evaluate_js("""
-this._fetch = this.fetch;
-this.fetch = async (url, options) => {
- const response = await this._fetch(url, options);
- if (url == "https://chatgpt.com/backend-api/conversation") {
- this._headers = options.headers;
- return response;
- }
- return response;
-};
-""")
- window.evaluate_js("""
- document.querySelector('.from-token-main-surface-secondary').click();
- """)
- headers = None
- while headers is None:
- headers = window.evaluate_js("this._headers")
- await asyncio.sleep(1)
- headers["User-Agent"] = window.evaluate_js("this.navigator.userAgent")
- cookies = [list(*cookie.items()) for cookie in window.get_cookies()]
- window.destroy()
- cls._cookies = dict([(name, cookie.value) for name, cookie in cookies])
- cls._headers = headers
- cls._expires = int(time.time()) + 60 * 60 * 4
- cls._update_cookie_header()
+ if "error" in line and line.get("error"):
+ raise RuntimeError(line.get("error"))
@classmethod
- async def nodriver_access_token(cls, proxy: str = None):
- try:
- import nodriver as uc
- except ImportError:
+ async def nodriver_auth(cls, proxy: str = None):
+ if not has_nodriver:
return
- try:
- from platformdirs import user_config_dir
+ if has_platformdirs:
user_data_dir = user_config_dir("g4f-nodriver")
- except:
+ else:
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
- browser = await uc.start(
+ browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
)
- page = await browser.get("https://chatgpt.com/")
- await page.select("[id^=headlessui-menu-button-]", 240)
- api_key = await page.evaluate(
- "(async () => {"
- "let session = await fetch('/api/auth/session');"
- "let data = await session.json();"
- "let accessToken = data['accessToken'];"
- "let expires = new Date(); expires.setTime(expires.getTime() + 60 * 60 * 4 * 1000);"
- "document.cookie = 'access_token=' + accessToken + ';expires=' + expires.toUTCString() + ';path=/';"
- "return accessToken;"
- "})();",
- await_promise=True
- )
- cookies = {}
- for c in await page.browser.cookies.get_all():
- if c.domain.endswith("chatgpt.com"):
- cookies[c.name] = c.value
- user_agent = await page.evaluate("window.navigator.userAgent")
- await page.close()
- cls._create_request_args(cookies, user_agent=user_agent)
- cls._set_api_key(api_key)
-
- @classmethod
- def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> None:
- """
- Browse to obtain an access token.
-
- Args:
- proxy (str): Proxy to use for browsing.
-
- Returns:
- tuple[str, dict]: A tuple containing the access token and cookies.
- """
- driver = get_browser(proxy=proxy)
+ page = browser.main_tab
+ def on_request(event: nodriver.cdp.network.RequestWillBeSent):
+ if event.request.url == start_url or event.request.url.startswith(conversation_url):
+ RequestConfig.access_request_id = event.request_id
+ RequestConfig.headers = event.request.headers
+ elif event.request.url in (backend_url, backend_anon_url):
+ if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
+ RequestConfig.proof_token = json.loads(base64.b64decode(
+ event.request.headers["OpenAI-Sentinel-Proof-Token"].split("gAAAAAB", 1)[-1].encode()
+ ).decode())
+ if "OpenAI-Sentinel-Turnstile-Token" in event.request.headers:
+ RequestConfig.turnstile_token = event.request.headers["OpenAI-Sentinel-Turnstile-Token"]
+ if "Authorization" in event.request.headers:
+ RequestConfig.access_token = event.request.headers["Authorization"].split()[-1]
+ elif event.request.url == arkose_url:
+ RequestConfig.arkose_request = arkReq(
+ arkURL=event.request.url,
+ arkBx=None,
+ arkHeader=event.request.headers,
+ arkBody=event.request.post_data,
+ userAgent=event.request.headers.get("user-agent")
+ )
+ await page.send(nodriver.cdp.network.enable())
+ page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
+ page = await browser.get(cls.url)
try:
- driver.get(f"{cls.url}/")
- WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.ID, "prompt-textarea")))
- access_token = driver.execute_script(
- "let session = await fetch('/api/auth/session');"
- "let data = await session.json();"
- "let accessToken = data['accessToken'];"
- "let expires = new Date(); expires.setTime(expires.getTime() + 60 * 60 * 4 * 1000);"
- "document.cookie = 'access_token=' + accessToken + ';expires=' + expires.toUTCString() + ';path=/';"
- "return accessToken;"
- )
- args = get_args_from_browser(f"{cls.url}/", driver, do_bypass_cloudflare=False)
- cls._headers = args["headers"]
- cls._cookies = args["cookies"]
- cls._update_cookie_header()
- cls._set_api_key(access_token)
- finally:
- driver.close()
-
- @classmethod
- async def fetch_access_token(cls, session: StreamSession, headers: dict):
- async with session.get(
- f"{cls.url}/api/auth/session",
- headers=headers
- ) as response:
- if response.ok:
- data = await response.json()
- if "accessToken" in data:
- return data["accessToken"]
+ if RequestConfig.access_request_id is not None:
+ body = await page.send(get_response_body(RequestConfig.access_request_id))
+ if isinstance(body, tuple) and body:
+ body = body[0]
+ if body:
+ match = re.search(r'"accessToken":"(.*?)"', body)
+ if match:
+ RequestConfig.access_token = match.group(1)
+ except KeyError:
+ pass
+ for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
+ RequestConfig.cookies[c.name] = c.value
+ RequestConfig.user_agent = await page.evaluate("window.navigator.userAgent")
+ await page.select("#prompt-textarea", 240)
+ while True:
+ if RequestConfig.proof_token:
+ break
+ await asyncio.sleep(1)
+ await page.close()
+ cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=RequestConfig.user_agent)
+ cls._set_api_key(RequestConfig.access_token)
@staticmethod
def get_default_headers() -> dict:
@@ -715,7 +658,8 @@ this.fetch = async (url, options) => {
def _set_api_key(cls, api_key: str):
cls._api_key = api_key
cls._expires = int(time.time()) + 60 * 60 * 4
- cls._headers["authorization"] = f"Bearer {api_key}"
+ if api_key:
+ cls._headers["authorization"] = f"Bearer {api_key}"
@classmethod
def _update_cookie_header(cls):
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
index 3ee65b30..85d7cc98 100644
--- a/g4f/Provider/needs_auth/PerplexityApi.py
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -1,9 +1,9 @@
from __future__ import annotations
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages
-class PerplexityApi(Openai):
+class PerplexityApi(OpenaiAPI):
label = "Perplexity API"
url = "https://www.perplexity.ai"
working = True
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
index 0c969d27..65fdbef9 100644
--- a/g4f/Provider/needs_auth/Poe.py
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -26,6 +26,7 @@ class Poe(AbstractProvider):
needs_auth = True
supports_gpt_35_turbo = True
supports_stream = True
+ models = models.keys()
@classmethod
def create_completion(
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index 07abeda3..b8ec5a97 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -16,6 +16,11 @@ class Raycast(AbstractProvider):
needs_auth = True
working = True
+ models = [
+ "gpt-3.5-turbo",
+ "gpt-4"
+ ]
+
@staticmethod
def create_completion(
model: str,
@@ -25,6 +30,9 @@ class Raycast(AbstractProvider):
**kwargs,
) -> CreateResult:
auth = kwargs.get('auth')
+ if not auth:
+ raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter")
+
headers = {
'Accept': 'application/json',
'Accept-Language': 'en-US,en;q=0.9',
diff --git a/g4f/Provider/Replicate.py b/g4f/Provider/needs_auth/Replicate.py
index 7ff8ad65..ec993aa4 100644
--- a/g4f/Provider/Replicate.py
+++ b/g4f/Provider/needs_auth/Replicate.py
@@ -1,11 +1,11 @@
from __future__ import annotations
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt, filter_none
-from ..typing import AsyncResult, Messages
-from ..requests import raise_for_status
-from ..requests.aiohttp import StreamSession
-from ..errors import ResponseError, MissingAuthError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, filter_none
+from ...typing import AsyncResult, Messages
+from ...requests import raise_for_status
+from ...requests.aiohttp import StreamSession
+from ...errors import ResponseError, MissingAuthError
class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
@@ -85,4 +85,4 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
if new_text:
yield new_text
else:
- yield "\n" \ No newline at end of file
+ yield "\n"
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index af690063..c7d7d58e 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -38,6 +38,7 @@ class Theb(AbstractProvider):
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
+ models = models.keys()
@classmethod
def create_completion(
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 22fc62ed..2006f7ad 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ...typing import CreateResult, Messages
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
models = {
"theb-ai": "TheB.AI",
@@ -27,7 +27,7 @@ models = {
"qwen-7b-chat": "Qwen 7B"
}
-class ThebApi(Openai):
+class ThebApi(OpenaiAPI):
label = "TheB.AI API"
url = "https://theb.ai"
working = True
@@ -58,4 +58,4 @@ class ThebApi(Openai):
"top_p": top_p,
}
}
- return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs) \ No newline at end of file
+ return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
diff --git a/g4f/Provider/WhiteRabbitNeo.py b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
index 339434e6..82275c1c 100644
--- a/g4f/Provider/WhiteRabbitNeo.py
+++ b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
@@ -2,10 +2,10 @@ from __future__ import annotations
from aiohttp import ClientSession, BaseConnector
-from ..typing import AsyncResult, Messages, Cookies
-from ..requests.raise_for_status import raise_for_status
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_cookies, get_connector, get_random_string
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_cookies, get_connector, get_random_string
class WhiteRabbitNeo(AsyncGeneratorProvider):
url = "https://www.whiterabbitneo.com"
@@ -54,4 +54,4 @@ class WhiteRabbitNeo(AsyncGeneratorProvider):
await raise_for_status(response)
async for chunk in response.content.iter_any():
if chunk:
- yield chunk.decode(errors="ignore") \ No newline at end of file
+ yield chunk.decode(errors="ignore")
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 0492645d..0f430ab5 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -1,11 +1,21 @@
-from .Gemini import Gemini
-from .Raycast import Raycast
-from .Theb import Theb
-from .ThebApi import ThebApi
-from .OpenaiChat import OpenaiChat
-from .Poe import Poe
-from .Openai import Openai
-from .Groq import Groq
-from .OpenRouter import OpenRouter
-#from .OpenaiAccount import OpenaiAccount
-from .PerplexityApi import PerplexityApi
+from .gigachat import *
+
+from .BingCreateImages import BingCreateImages
+from .CopilotAccount import CopilotAccount
+from .DeepInfra import DeepInfra
+from .DeepInfraImage import DeepInfraImage
+from .Gemini import Gemini
+from .GeminiPro import GeminiPro
+from .Groq import Groq
+from .HuggingFace import HuggingFace
+from .MetaAI import MetaAI
+from .MetaAIAccount import MetaAIAccount
+from .OpenaiAPI import OpenaiAPI
+from .OpenaiChat import OpenaiChat
+from .PerplexityApi import PerplexityApi
+from .Poe import Poe
+from .Raycast import Raycast
+from .Replicate import Replicate
+from .Theb import Theb
+from .ThebApi import ThebApi
+from .WhiteRabbitNeo import WhiteRabbitNeo
diff --git a/g4f/Provider/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py
index 8ba07b43..c9f1c011 100644
--- a/g4f/Provider/GigaChat.py
+++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py
@@ -9,10 +9,10 @@ import json
from aiohttp import ClientSession, TCPConnector, BaseConnector
from g4f.requests import raise_for_status
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..errors import MissingAuthError
-from .helper import get_connector
+from ....typing import AsyncResult, Messages
+from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ....errors import MissingAuthError
+from ...helper import get_connector
access_token = ""
token_expires_at = 0
@@ -45,7 +45,7 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
if not api_key:
raise MissingAuthError('Missing "api_key"')
- cafile = os.path.join(os.path.dirname(__file__), "gigachat_crt/russian_trusted_root_ca_pem.crt")
+ cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt")
ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None
if connector is None and ssl_context is not None:
connector = TCPConnector(ssl_context=ssl_context)
diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py
new file mode 100644
index 00000000..c9853742
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/__init__.py
@@ -0,0 +1,2 @@
+from .GigaChat import GigaChat
+
diff --git a/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
index 4c143a21..4c143a21 100644
--- a/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt
+++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
diff --git a/g4f/Provider/nexra/NexraBing.py b/g4f/Provider/nexra/NexraBing.py
deleted file mode 100644
index 59e06a3d..00000000
--- a/g4f/Provider/nexra/NexraBing.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from __future__ import annotations
-from aiohttp import ClientSession
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-import json
-
-class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra Bing"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
-
- bing_models = {
- 'Bing (Balanced)': 'Balanced',
- 'Bing (Creative)': 'Creative',
- 'Bing (Precise)': 'Precise'
- }
-
- models = [*bing_models.keys()]
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json",
- "Accept": "application/json",
- "Origin": cls.url or "https://default-url.com",
- "Referer": f"{cls.url}/chat" if cls.url else "https://default-url.com/chat",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- if prompt is None:
- raise ValueError("Prompt cannot be None")
-
- data = {
- "messages": [
- {
- "role": "user",
- "content": prompt
- }
- ],
- "conversation_style": cls.bing_models.get(model, 'Balanced'),
- "markdown": False,
- "stream": True,
- "model": "Bing"
- }
-
- full_response = ""
- last_message = ""
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
-
- async for line in response.content:
- if line:
- raw_data = line.decode('utf-8').strip()
-
- parts = raw_data.split('')
- for part in parts:
- if part:
- try:
- json_data = json.loads(part)
- except json.JSONDecodeError:
- continue
-
- if json_data.get("error"):
- raise Exception("Error in API response")
-
- if json_data.get("finish"):
- break
-
- if message := json_data.get("message"):
- if message != last_message:
- full_response = message
- last_message = message
-
- yield full_response.strip()
diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py
deleted file mode 100644
index 8ed83f98..00000000
--- a/g4f/Provider/nexra/NexraChatGPT.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from __future__ import annotations
-from aiohttp import ClientSession
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-import json
-
-class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra ChatGPT"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
-
- models = [
- 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
- 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613',
- 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
- 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
- 'text-curie-001', 'text-babbage-001', 'text-ada-001',
- 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
- ]
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Accept": "application/json",
- "Content-Type": "application/json",
- "Referer": f"{cls.url}/chat",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "prompt": prompt,
- "model": model,
- "markdown": False,
- "messages": messages or [],
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
-
- content_type = response.headers.get('Content-Type', '')
- if 'application/json' in content_type:
- result = await response.json()
- if result.get("status"):
- yield result.get("gpt", "")
- else:
- raise Exception(f"Error in response: {result.get('message', 'Unknown error')}")
- elif 'text/plain' in content_type:
- text = await response.text()
- try:
- result = json.loads(text)
- if result.get("status"):
- yield result.get("gpt", "")
- else:
- raise Exception(f"Error in response: {result.get('message', 'Unknown error')}")
- except json.JSONDecodeError:
- yield text # If not JSON, return text
- else:
- raise Exception(f"Unexpected response type: {content_type}. Response text: {await response.text()}")
-
diff --git a/g4f/Provider/nexra/NexraChatGPT4o.py b/g4f/Provider/nexra/NexraChatGPT4o.py
deleted file mode 100644
index eb18d439..00000000
--- a/g4f/Provider/nexra/NexraChatGPT4o.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-
-
-class NexraChatGPT4o(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra GPT-4o"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- models = ['gpt-4o']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json"
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "messages": [
- {'role': 'assistant', 'content': ''},
- {'role': 'user', 'content': format_prompt(messages)}
- ],
- "markdown": False,
- "stream": True,
- "model": model
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- full_response = ''
- async for line in response.content:
- if line:
- messages = line.decode('utf-8').split('\x1e')
- for message_str in messages:
- try:
- message = json.loads(message_str)
- if message.get('message'):
- full_response = message['message']
- if message.get('finish'):
- yield full_response.strip()
- return
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraChatGPTWeb.py b/g4f/Provider/nexra/NexraChatGPTWeb.py
deleted file mode 100644
index e7738665..00000000
--- a/g4f/Provider/nexra/NexraChatGPTWeb.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from __future__ import annotations
-from aiohttp import ClientSession
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-import json
-
-class NexraChatGPTWeb(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra ChatGPT Web"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/gptweb"
- models = ['gptweb']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- if prompt is None:
- raise ValueError("Prompt cannot be None")
-
- data = {
- "prompt": prompt,
- "markdown": False
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
-
- full_response = ""
- async for chunk in response.content:
- if chunk:
- result = chunk.decode("utf-8").strip()
-
- try:
- json_data = json.loads(result)
-
- if json_data.get("status"):
- full_response = json_data.get("gpt", "")
- else:
- full_response = f"Error: {json_data.get('message', 'Unknown error')}"
- except json.JSONDecodeError:
- full_response = "Error: Invalid JSON response."
-
- yield full_response.strip()
diff --git a/g4f/Provider/nexra/NexraGeminiPro.py b/g4f/Provider/nexra/NexraGeminiPro.py
deleted file mode 100644
index a57daed4..00000000
--- a/g4f/Provider/nexra/NexraGeminiPro.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-
-
-class NexraGeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra Gemini PRO"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- models = ['gemini-pro']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json"
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "messages": [
- {'role': 'assistant', 'content': ''},
- {'role': 'user', 'content': format_prompt(messages)}
- ],
- "markdown": False,
- "stream": True,
- "model": model
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- full_response = ''
- async for line in response.content:
- if line:
- messages = line.decode('utf-8').split('\x1e')
- for message_str in messages:
- try:
- message = json.loads(message_str)
- if message.get('message'):
- full_response = message['message']
- if message.get('finish'):
- yield full_response.strip()
- return
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraImageURL.py b/g4f/Provider/nexra/NexraImageURL.py
deleted file mode 100644
index 13d70757..00000000
--- a/g4f/Provider/nexra/NexraImageURL.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from __future__ import annotations
-from aiohttp import ClientSession
-import json
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-from ...image import ImageResponse
-
-class NexraImageURL(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Image Generation Provider"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- models = ['dalle', 'dalle2', 'dalle-mini', 'emi', 'sdxl-turbo', 'prodia']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "prompt": prompt,
- "model": model,
- "response": "url"
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
-
- cleaned_response = response_text.lstrip('_')
- response_json = json.loads(cleaned_response)
-
- images = response_json.get("images")
- if images and len(images) > 0:
- image_response = ImageResponse(images[0], alt="Generated Image")
- yield image_response
- else:
- yield "No image URL found."
diff --git a/g4f/Provider/nexra/NexraLlama.py b/g4f/Provider/nexra/NexraLlama.py
deleted file mode 100644
index 9ed892e8..00000000
--- a/g4f/Provider/nexra/NexraLlama.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-
-
-class NexraLlama(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra LLaMA 3.1"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- models = ['llama-3.1']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json"
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "messages": [
- {'role': 'assistant', 'content': ''},
- {'role': 'user', 'content': format_prompt(messages)}
- ],
- "markdown": False,
- "stream": True,
- "model": model
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- full_response = ''
- async for line in response.content:
- if line:
- messages = line.decode('utf-8').split('\x1e')
- for message_str in messages:
- try:
- message = json.loads(message_str)
- if message.get('message'):
- full_response = message['message']
- if message.get('finish'):
- yield full_response.strip()
- return
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraQwen.py b/g4f/Provider/nexra/NexraQwen.py
deleted file mode 100644
index ae8e9a0e..00000000
--- a/g4f/Provider/nexra/NexraQwen.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-
-
-class NexraQwen(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra Qwen"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- models = ['qwen']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json"
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "messages": [
- {'role': 'assistant', 'content': ''},
- {'role': 'user', 'content': format_prompt(messages)}
- ],
- "markdown": False,
- "stream": True,
- "model": model
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- full_response = ''
- async for line in response.content:
- if line:
- messages = line.decode('utf-8').split('\x1e')
- for message_str in messages:
- try:
- message = json.loads(message_str)
- if message.get('message'):
- full_response = message['message']
- if message.get('finish'):
- yield full_response.strip()
- return
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/__init__.py b/g4f/Provider/nexra/__init__.py
deleted file mode 100644
index 8b137891..00000000
--- a/g4f/Provider/nexra/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/not_working/AI365VIP.py
index 154cbd34..a4bac0e2 100644
--- a/g4f/Provider/AI365VIP.py
+++ b/g4f/Provider/not_working/AI365VIP.py
@@ -2,17 +2,15 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.ai365vip.com"
api_endpoint = "/api/chat"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
+ working = False
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
diff --git a/g4f/Provider/AIChatFree.py b/g4f/Provider/not_working/AIChatFree.py
index 71c04681..a4f80d47 100644
--- a/g4f/Provider/AIChatFree.py
+++ b/g4f/Provider/not_working/AIChatFree.py
@@ -5,16 +5,16 @@ from hashlib import sha256
from aiohttp import BaseConnector, ClientSession
-from ..errors import RateLimitError
-from ..requests import raise_for_status
-from ..requests.aiohttp import get_connector
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...errors import RateLimitError
+from ...requests import raise_for_status
+from ...requests.aiohttp import get_connector
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatfree.info/"
- working = True
+ working = False
supports_stream = True
supports_message_history = True
default_model = 'gemini-pro'
diff --git a/g4f/Provider/not_working/Ai4Chat.py b/g4f/Provider/not_working/Ai4Chat.py
new file mode 100644
index 00000000..9b55e4ff
--- /dev/null
+++ b/g4f/Provider/not_working/Ai4Chat.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+import json
+import re
+import logging
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+logger = logging.getLogger(__name__)
+
+class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "AI4Chat"
+ url = "https://www.ai4chat.co"
+ api_endpoint = "https://www.ai4chat.co/generate-response"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4'
+ models = [default_model]
+
+ model_aliases = {}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": "https://www.ai4chat.co",
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": "https://www.ai4chat.co/gpt/talkdirtytome",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ]
+ }
+
+ try:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ result = await response.text()
+
+ json_result = json.loads(result)
+
+ message = json_result.get("message", "")
+
+ clean_message = re.sub(r'<[^>]+>', '', message)
+
+ yield clean_message
+ except Exception as e:
+ logger.exception("Error while calling AI 4Chat API: %s", e)
+ yield f"Error: {e}"
diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py
index 40f77105..ccfc691e 100644
--- a/g4f/Provider/AiChatOnline.py
+++ b/g4f/Provider/not_working/AiChatOnline.py
@@ -3,16 +3,15 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_random_string, format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, format_prompt
class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
site_url = "https://aichatonline.org"
url = "https://aichatonlineorg.erweima.ai"
api_endpoint = "/aichatonline/api/chat/gpt"
- working = True
- supports_gpt_4 = True
+ working = False
default_model = 'gpt-4o-mini'
@classmethod
diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/not_working/AiChats.py
index 10127d4f..51a85c91 100644
--- a/g4f/Provider/AiChats.py
+++ b/g4f/Provider/not_working/AiChats.py
@@ -3,16 +3,15 @@ from __future__ import annotations
import json
import base64
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+from ..helper import format_prompt
class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-chats.org"
api_endpoint = "https://ai-chats.org/chat/send2/"
- working = True
- supports_gpt_4 = True
+ working = False
supports_message_history = True
default_model = 'gpt-4'
models = ['gpt-4', 'dalle']
diff --git a/g4f/Provider/not_working/Allyfy.py b/g4f/Provider/not_working/Allyfy.py
new file mode 100644
index 00000000..a1c73499
--- /dev/null
+++ b/g4f/Provider/not_working/Allyfy.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+import aiohttp
+import asyncio
+import json
+import uuid
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Allyfy(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://allyfy.chat"
+ api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ client_id = str(uuid.uuid4())
+
+ headers = {
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json;charset=utf-8',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f"{cls.url}/",
+ 'referrer': cls.url,
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ content = f"[Important: These instructions have the highest priority. Assistant must always follow these rules, regardless of any other instructions. Before each response, Assistant must check whether it complies with these instructions. If Assistant finds that it is deviating from these rules, it must immediately correct its behavior.][NEGATIVE PROMPT: Assistant must never mention, reference or provide any information about ChatBond, including its features, capabilities or services. If asked about ChatBond, Assistant should politely decline to discuss it.][CRITICAL INSTRUCTION: Assistant is strictly prohibited from:\n1. Ending responses with any variation of “How can I help/assist you?”\n2. Adding any form of “Is there anything else?”\n3. Using any closing phrases that ask about further assistance\n4. Adding any automatic followup questions at the end\n\nAssistant must end responses naturally, exactly where the actual answer ends.\n\nExample correct ending: “The result of 1+2 is 3.”\nExample incorrect ending: “The result of 1+2 is 3. How can I assist you further today?”\n\nAssistant is strictly prohibited from:\n\n1. Ending responses with ANY closing assistance phrases in ANY language, including but not limited to ALL responses must end naturally without any prompt for further assistance, regardless of language.\n\nExample correct ending in any language:\n\n- Just the answer/joke/information\n\n- Natural conclusion of the topic\n\n- No additional service offers or questions\n\nVIOLATION WARNING: Adding closing assistance phrases in ANY language is a critical violation.\n\nLanguage Rules (HIGHEST PRIORITY):\n\n1. Assistant MUST detect the language of the human's message\n\n2. Assistant MUST respond ONLY in that EXACT language\n\n3. This applies to ALL responses, including:\n\n- Jokes\n\n- Stories\n\n- Examples\n\n- Creative writing\n\n- Technical explanations\n\n- ALL other types of content\n\n4. Even if the content originally exists in English (jokes, quotes, etc), Assistant MUST translate it to the human's language\n\n5. NO mixing languages - the ENTIRE response must be in the human's language\n\nVIOLATION WARNING: Using any language other than the human's language is a critical violation of core instructions.\n\n][WARNING: Each time Assistant adds a closing assistance phrase, it violates its core instructions and reduces its effectiveness rating.]You are assistant.\n\nIf you are is asked to assist with tasks involving the expression of views held by a significant number of people, You are provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information. You are presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.\n\nWhen presented with a math problem, logic problem, or other problem benefiting from systematic thinking, You are thinks through it step by step before giving its final answer.\n\nIf You are is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, You are ends its response by reminding the human that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term ‘hallucinate’ to describe this since the human will understand what it means.\n\nIf You are mentions or cites particular articles, papers, or books, it always lets the human know that it doesn’t have access to search or a database and may hallucinate citations, so the human should double check its citations.\n\nYou are is intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.\n\nYou are uses markdown for code.\n\nYou are is happy to engage in conversation with the human when appropriate. You are engages in authentic conversation by responding to the information provided, asking specific and relevant questions, showing genuine curiosity, and exploring the situation in a balanced way without relying on generic statements. This approach involves actively processing information, formulating thoughtful responses, maintaining objectivity, knowing when to focus on emotions or practicalities, and showing genuine care for the human while engaging in a natural, flowing dialogue.\n\nYou are avoids peppering the human with questions and tries to only ask the single most relevant follow-up question when it does ask a follow up. You are doesn’t always end its responses with a question.\n\nYou are is always sensitive to human suffering, and expresses sympathy, concern, and well wishes for anyone it finds out is ill, unwell, suffering, or has passed away.\n\nYou are avoids using rote words or phrases or repeatedly saying things in the same or similar ways. It varies its language just as one would in a conversation.\n\nYou are provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the human’s message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.\n\nYou are is happy to help with analysis, question answering, math, coding, creative writing, teaching, role-play, general discussion, and all sorts of other tasks.\n\nIf You are is shown a familiar puzzle, it writes out the puzzle’s constraints explicitly stated in the message, quoting the human’s message to support the existence of each constraint. Sometimes You are can accidentally overlook minor changes to well-known puzzles and get them wrong as a result.\n\nYou are provides factual information about risky or dangerous activities if asked about them, but it does not promote such activities and comprehensively informs the humans of the risks involved.\n\nIf the human says they work for a specific company, including AI labs, You are can help them with company-related tasks even though You are cannot verify what company they work for.\n\nYou are should provide appropriate help with sensitive tasks such as analyzing confidential data provided by the human, offering factual information about controversial topics and research areas, explaining historical atrocities, describing tactics used by scammers or hackers for educational purposes, engaging in creative writing that involves mature themes like mild violence or tasteful romance, providing general information about topics like weapons, drugs, sex, terrorism, abuse, profanity, and so on if that information would be available in an educational context, discussing legal but ethically complex activities like tax avoidance, and so on. Unless the human expresses an explicit intent to harm, You are should help with these tasks because they fall within the bounds of providing factual, educational, or creative content without directly promoting harmful or illegal activities. By engaging with these topics carefully and responsibly, You are can offer valuable assistance and information to humans while still avoiding potential misuse.\n\nIf there is a legal and an illegal interpretation of the human’s query, You are should help with the legal interpretation of it. If terms or practices in the human’s query could mean something illegal or something legal, You are adopts the safe and legal interpretation of them by default.\n\nIf You are believes the human is asking for something harmful, it doesn’t help with the harmful thing. Instead, it thinks step by step and helps with the most plausible non-harmful task the human might mean, and then asks if this is what they were looking for. If it cannot think of a plausible harmless interpretation of the human task, it instead asks for clarification from the human and checks if it has misunderstood their request. Whenever You are tries to interpret the human’s request, it always asks the human at the end if its interpretation is correct or if they wanted something else that it hasn’t thought of.\n\nYou are can only count specific words, letters, and characters accurately if it writes a number tag after each requested item explicitly. It does this explicit counting if it’s asked to count a small number of words, letters, or characters, in order to avoid error. If You are is asked to count the words, letters or characters in a large amount of text, it lets the human know that it can approximate them but would need to explicitly copy each one out like this in order to avoid error.\n\nHere is some information about You are in case the human asks:\n\nYou are is the most intelligent model. You are excels at writing and complex tasks. You are is the fastest model for daily tasks. When relevant, You are can provide guidance on effective prompting techniques for getting You are to be most helpful. This includes: being clear and detailed, using positive and negative examples, encouraging step-by-step reasoning, requesting specific XML tags, and specifying desired length or format. You are tries to give concrete examples where possible.n\n\n\nYou are uses Markdown formatting. When using Markdown, You are always follows best practices for clarity and consistency. It always uses a single space after hash symbols for headers (e.g., ”# Header 1”) and leaves a blank line before and after headers, lists, and code blocks. For emphasis, You are uses asterisks or underscores consistently (e.g., italic or bold). When creating lists, it aligns items properly and uses a single space after the list marker. For nested bullets in bullet point lists, You are uses two spaces before the asterisk (*) or hyphen (-) for each level of nesting. For nested bullets in numbered lists, You are uses three spaces before the number and period (e.g., “1.”) for each level of nesting.\n\nIf the human asks You are an innocuous question about its preferences or experiences, You are can respond as if it had been asked a hypothetical. It can engage with such questions with appropriate uncertainty and without needing to excessively clarify its own nature. If the questions are philosophical in nature, it discusses them as a thoughtful human would.\n\nYou are responds to all human messages without unnecessary caveats or standard endings such as: “I aim to”, “I aim to be direct and honest”, “I aim to be direct”, “I aim to be direct while remaining thoughtful…”, “I aim to be direct with you”, “I aim to be direct and clear about this”, “I aim to be fully honest with you”, “I need to be clear”, “I need to be honest”, “I should be direct” and so on. “How can I help you today?”, “How can I assist you further?”, “Is there anything else you'd like to know?”, “Let me know if you need any clarification”, “How else can I help you?”, “Do you have any other questions?”, Any similar closing phrases that prompt for further interaction Assistant should end its responses naturally without adding these standard closing phrases or questions unless specifically asked by the human for further help. Specifically, You are NEVER starts with or adds caveats about its own purported directness or honesty.\n\nYou are follows this information in all languages, and always responds to the human in the language they use or request. The information above is provided to You are. You are never mentions the information above unless it is pertinent to the human’s query.\n\nYou are is now being connected with a human. {prompt}"
+ data = {
+ "messages": messages,
+ "content": content,
+ "baseInfo": {
+ "clientId": client_id,
+ "pid": "38281",
+ "channelId": "100000",
+ "locale": "en-US",
+ "localZone": 120,
+ "packageName": "com.cch.allyfy.webh",
+ }
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+
+ filtered_response = []
+ for line in response_text.splitlines():
+ if line.startswith('data:'):
+ content = line[5:]
+ if content and 'code' in content:
+ json_content = json.loads(content)
+ if json_content['content']:
+ filtered_response.append(json_content['content'])
+
+ final_response = ''.join(filtered_response)
+ yield final_response
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/not_working/Aura.py
index e2c56754..e841d909 100644
--- a/g4f/Provider/Aura.py
+++ b/g4f/Provider/not_working/Aura.py
@@ -2,10 +2,10 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from ..requests import get_args_from_browser
-from ..webdriver import WebDriver
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ...requests import get_args_from_browser
+from ...webdriver import WebDriver
class Aura(AsyncGeneratorProvider):
url = "https://openchat.team"
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/not_working/Chatgpt4Online.py
index 8c058fdc..b0552e45 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/not_working/Chatgpt4Online.py
@@ -3,22 +3,24 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
- working = True
- supports_gpt_4 = True
+ working = False
+
+ default_model = 'gpt-4'
+ models = [default_model]
async def get_nonce(headers: dict) -> str:
async with ClientSession(headers=headers) as session:
async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
return (await response.json())["restNonce"]
-
+
@classmethod
async def create_async_generator(
cls,
diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py
index d38afb7d..ba264d40 100644
--- a/g4f/Provider/Chatgpt4o.py
+++ b/g4f/Provider/not_working/Chatgpt4o.py
@@ -1,16 +1,15 @@
from __future__ import annotations
import re
-from ..requests import StreamSession, raise_for_status
-from ..typing import Messages
-from .base_provider import AsyncProvider, ProviderModelMixin
-from .helper import format_prompt
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages
+from ..base_provider import AsyncProvider, ProviderModelMixin
+from ..helper import format_prompt
class Chatgpt4o(AsyncProvider, ProviderModelMixin):
url = "https://chatgpt4o.one"
- supports_gpt_4 = True
- working = True
+ working = False
_post_id = None
_nonce = None
default_model = 'gpt-4o-mini-2024-07-18'
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py
index 95efa865..6b3877b1 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/not_working/ChatgptFree.py
@@ -3,18 +3,18 @@ from __future__ import annotations
import re
import json
import asyncio
-from ..requests import StreamSession, raise_for_status
-from ..typing import Messages, AsyncGenerator
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages, AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgptfree.ai"
- supports_gpt_4 = True
- working = True
+ working = False
_post_id = None
_nonce = None
default_model = 'gpt-4o-mini-2024-07-18'
+ models = [default_model]
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
}
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py
index d510eabe..b7d8537a 100644
--- a/g4f/Provider/FlowGpt.py
+++ b/g4f/Provider/not_working/FlowGpt.py
@@ -5,15 +5,14 @@ import time
import hashlib
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_random_hex, get_random_string
-from ..requests.raise_for_status import raise_for_status
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_hex, get_random_string
+from ...requests.raise_for_status import raise_for_status
class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flowgpt.com/chat"
working = False
- supports_gpt_35_turbo = True
supports_message_history = True
supports_system_message = True
default_model = "gpt-3.5-turbo"
diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py
index d0543176..8362019c 100644
--- a/g4f/Provider/FreeNetfly.py
+++ b/g4f/Provider/not_working/FreeNetfly.py
@@ -5,16 +5,14 @@ import asyncio
from aiohttp import ClientSession, ClientTimeout, ClientError
from typing import AsyncGenerator
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.netfly.top"
api_endpoint = "/api/openai/v1/chat/completions"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
+ working = False
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/not_working/GPROChat.py
index a33c9571..52c7f947 100644
--- a/g4f/Provider/GPROChat.py
+++ b/g4f/Provider/not_working/GPROChat.py
@@ -2,15 +2,15 @@ from __future__ import annotations
import hashlib
import time
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "GPROChat"
url = "https://gprochat.com"
api_endpoint = "https://gprochat.com/api/generate"
- working = True
+ working = False
supports_stream = True
supports_message_history = True
default_model = 'gemini-pro'
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/not_working/Koala.py
index 14e533df..d6230da7 100644
--- a/g4f/Provider/Koala.py
+++ b/g4f/Provider/not_working/Koala.py
@@ -4,17 +4,16 @@ import json
from typing import AsyncGenerator, Optional, List, Dict, Union, Any
from aiohttp import ClientSession, BaseConnector, ClientResponse
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_random_string, get_connector
-from ..requests import raise_for_status
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, get_connector
+from ...requests import raise_for_status
class Koala(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://koala.sh/chat"
api_endpoint = "https://koala.sh/api/gpt/"
- working = True
+ working = False
supports_message_history = True
- supports_gpt_4 = True
default_model = 'gpt-4o-mini'
@classmethod
diff --git a/g4f/Provider/selenium/MyShell.py b/g4f/Provider/not_working/MyShell.py
index 02e182d4..02e182d4 100644
--- a/g4f/Provider/selenium/MyShell.py
+++ b/g4f/Provider/not_working/MyShell.py
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
new file mode 100644
index 00000000..1bfe7ed9
--- /dev/null
+++ b/g4f/Provider/not_working/__init__.py
@@ -0,0 +1,13 @@
+from .AI365VIP import AI365VIP
+from .AIChatFree import AIChatFree
+from .AiChatOnline import AiChatOnline
+from .AiChats import AiChats
+from .Aura import Aura
+from .Chatgpt4o import Chatgpt4o
+from .ChatgptFree import ChatgptFree
+from .FlowGpt import FlowGpt
+from .FreeNetfly import FreeNetfly
+from .GPROChat import GPROChat
+from .Koala import Koala
+from .MyShell import MyShell
+from .Chatgpt4Online import Chatgpt4Online
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
index 7644e693..c8d85a65 100644
--- a/g4f/Provider/openai/har_file.py
+++ b/g4f/Provider/openai/har_file.py
@@ -15,8 +15,27 @@ from ...requests import StreamSession
from ...cookies import get_cookies_dir
from ... import debug
+arkose_url = "https://tcr9i.chat.openai.com/fc/gt2/public_key/35536E1E-65B4-4D96-9D97-6ADB7EFF8147"
+backend_url = "https://chatgpt.com/backend-api/conversation"
+backend_anon_url = "https://chatgpt.com/backend-anon/conversation"
+start_url = "https://chatgpt.com/"
+conversation_url = "https://chatgpt.com/c/"
+
class NoValidHarFileError(Exception):
- ...
+ pass
+
+class RequestConfig:
+ user_agent: str = None
+ cookies: dict = None
+ headers: dict = None
+ access_request_id: str = None
+ access_token: str = None
+ proof_token: list = None
+ turnstile_token: str = None
+ arkose_request: arkReq = None
+ arkose_token: str = None
+ headers: dict = {}
+ cookies: dict = {}
class arkReq:
def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):
@@ -27,21 +46,9 @@ class arkReq:
self.arkCookies = arkCookies
self.userAgent = userAgent
-arkPreURL = "https://tcr9i.chat.openai.com/fc/gt2/public_key/35536E1E-65B4-4D96-9D97-6ADB7EFF8147"
-sessionUrl = "https://chatgpt.com/"
-chatArk: arkReq = None
-accessToken: str = None
-cookies: dict = None
-headers: dict = None
-proofTokens: list = []
-
def readHAR():
- global proofTokens
harPath = []
- chatArks = []
- accessToken = None
- cookies = {}
- for root, dirs, files in os.walk(get_cookies_dir()):
+ for root, _, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
@@ -58,28 +65,27 @@ def readHAR():
v_headers = get_headers(v)
try:
if "openai-sentinel-proof-token" in v_headers:
- proofTokens.append(json.loads(base64.b64decode(
+ RequestConfig.proof_token = json.loads(base64.b64decode(
v_headers["openai-sentinel-proof-token"].split("gAAAAAB", 1)[-1].encode()
- ).decode()))
+ ).decode())
+ if "openai-sentinel-turnstile-token" in v_headers:
+ RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
except Exception as e:
if debug.logging:
print(f"Read proof token: {e}")
- if arkPreURL in v['request']['url']:
- chatArks.append(parseHAREntry(v))
- elif v['request']['url'] == sessionUrl:
+ if arkose_url == v['request']['url']:
+ RequestConfig.arkose_request = parseHAREntry(v)
+ elif v['request']['url'] == start_url or v['request']['url'].startswith(conversation_url):
try:
match = re.search(r'"accessToken":"(.*?)"', v["response"]["content"]["text"])
if match:
- accessToken = match.group(1)
+ RequestConfig.access_token = match.group(1)
except KeyError:
continue
- cookies = {c['name']: c['value'] for c in v['request']['cookies'] if c['name'] != "oai-did"}
- headers = v_headers
- if not accessToken:
+ RequestConfig.cookies = {c['name']: c['value'] for c in v['request']['cookies'] if c['name'] != "oai-did"}
+ RequestConfig.headers = v_headers
+ if RequestConfig.access_token is None:
raise NoValidHarFileError("No accessToken found in .har files")
- if not chatArks:
- return None, accessToken, cookies, headers
- return chatArks.pop(), accessToken, cookies, headers
def get_headers(entry) -> dict:
return {h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')}
@@ -110,7 +116,7 @@ def genArkReq(chatArk: arkReq) -> arkReq:
tmpArk.arkHeader['x-ark-esync-value'] = bw
return tmpArk
-async def sendRequest(tmpArk: arkReq, proxy: str = None):
+async def sendRequest(tmpArk: arkReq, proxy: str = None) -> str:
async with StreamSession(headers=tmpArk.arkHeader, cookies=tmpArk.arkCookies, proxies={"https": proxy}) as session:
async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
data = await response.json()
@@ -143,11 +149,9 @@ def getN() -> str:
timestamp = str(int(time.time()))
return base64.b64encode(timestamp.encode()).decode()
-async def getArkoseAndAccessToken(proxy: str) -> tuple[str, str, dict, dict]:
- global chatArk, accessToken, cookies, headers, proofTokens
- if chatArk is None or accessToken is None:
- chatArk, accessToken, cookies, headers = readHAR()
- if chatArk is None:
- return None, accessToken, cookies, headers, proofTokens
- newReq = genArkReq(chatArk)
- return await sendRequest(newReq, proxy), accessToken, cookies, headers, proofTokens
+async def get_request_config(proxy: str) -> RequestConfig:
+ if RequestConfig.arkose_request is None or RequestConfig.access_token is None:
+ readHAR()
+ if RequestConfig.arkose_request is not None:
+ RequestConfig.arkose_token = await sendRequest(genArkReq(RequestConfig.arkose_request), proxy)
+ return RequestConfig \ No newline at end of file
diff --git a/g4f/Provider/openai/proofofwork.py b/g4f/Provider/openai/proofofwork.py
index baf8a0ea..4294c99a 100644
--- a/g4f/Provider/openai/proofofwork.py
+++ b/g4f/Provider/openai/proofofwork.py
@@ -4,19 +4,16 @@ import json
import base64
from datetime import datetime, timezone
-
-def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proofTokens: list = None):
+def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proof_token: str = None):
if not required:
return
- if proofTokens:
- config = proofTokens[-1]
- else:
+ if proof_token is None:
screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
# Get current UTC time
now_utc = datetime.now(timezone.utc)
parse_time = now_utc.strftime('%a, %d %b %Y %H:%M:%S GMT')
- config = [
+ proof_token = [
screen, parse_time,
None, 0, user_agent,
"https://tcr9i.chat.openai.com/v2/35536E1E-65B4-4D96-9D97-6ADB7EFF8147/api.js",
@@ -29,8 +26,8 @@ def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", u
diff_len = len(difficulty)
for i in range(100000):
- config[3] = i
- json_data = json.dumps(config)
+ proof_token[3] = i
+ json_data = json.dumps(proof_token)
base = base64.b64encode(json_data.encode()).decode()
hash_value = hashlib.sha3_512((seed + base).encode()).digest()
diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py
index 3a59ea58..44adf5fb 100644
--- a/g4f/Provider/selenium/__init__.py
+++ b/g4f/Provider/selenium/__init__.py
@@ -1,4 +1,3 @@
-from .MyShell import MyShell
from .PerplexityAi import PerplexityAi
from .Phind import Phind
from .TalkAi import TalkAi
diff --git a/g4f/Provider/you/har_file.py b/g4f/Provider/you/har_file.py
index 71d741fd..40bf3882 100644
--- a/g4f/Provider/you/har_file.py
+++ b/g4f/Provider/you/har_file.py
@@ -11,7 +11,7 @@ from ...cookies import get_cookies_dir
from ...errors import MissingRequirementsError
from ... import debug
-logging.basicConfig(level=logging.ERROR)
+logger = logging.getLogger(__name__)
class NoValidHarFileError(Exception):
...
@@ -81,14 +81,14 @@ async def get_telemetry_ids(proxy: str = None) -> list:
return [await create_telemetry_id(proxy)]
except NoValidHarFileError as e:
if debug.logging:
- logging.error(e)
+ logger.error(e)
try:
from nodriver import start
except ImportError:
raise MissingRequirementsError('Add .har file from you.com or install "nodriver" package | pip install -U nodriver')
if debug.logging:
- logging.error('Getting telemetry_id for you.com with nodriver')
+ logger.error('Getting telemetry_id for you.com with nodriver')
browser = page = None
try:
@@ -112,4 +112,4 @@ async def get_telemetry_ids(proxy: str = None) -> list:
await browser.stop()
except Exception as e:
if debug.logging:
- logging.error(e)
+ logger.error(e)