summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/AI365VIP.py8
-rw-r--r--g4f/Provider/AIChatFree.py76
-rw-r--r--g4f/Provider/AIUncensored.py112
-rw-r--r--g4f/Provider/Ai4Chat.py88
-rw-r--r--g4f/Provider/AiChatOnline.py3
-rw-r--r--g4f/Provider/AiChats.py1
-rw-r--r--g4f/Provider/AiMathGPT.py74
-rw-r--r--g4f/Provider/Airforce.py245
-rw-r--r--g4f/Provider/Allyfy.py7
-rw-r--r--g4f/Provider/AmigoChat.py189
-rw-r--r--g4f/Provider/Aura.py4
-rw-r--r--g4f/Provider/Bing.py1
-rw-r--r--g4f/Provider/Binjie.py65
-rw-r--r--g4f/Provider/Bixin123.py89
-rw-r--r--g4f/Provider/Blackbox.py434
-rw-r--r--g4f/Provider/ChatGpt.py225
-rw-r--r--g4f/Provider/ChatGptEs.py84
-rw-r--r--g4f/Provider/ChatHub.py84
-rw-r--r--g4f/Provider/Chatgpt4Online.py6
-rw-r--r--g4f/Provider/Chatgpt4o.py9
-rw-r--r--g4f/Provider/ChatgptFree.py1
-rw-r--r--g4f/Provider/ChatifyAI.py79
-rw-r--r--g4f/Provider/Cloudflare.py212
-rw-r--r--g4f/Provider/CodeNews.py94
-rw-r--r--g4f/Provider/DDG.py162
-rw-r--r--g4f/Provider/DarkAI.py85
-rw-r--r--g4f/Provider/DeepInfraChat.py142
-rw-r--r--g4f/Provider/DeepInfraImage.py5
-rw-r--r--g4f/Provider/Editee.py77
-rw-r--r--g4f/Provider/FlowGpt.py3
-rw-r--r--g4f/Provider/FluxAirforce.py82
-rw-r--r--g4f/Provider/FreeNetfly.py2
-rw-r--r--g4f/Provider/GPROChat.py67
-rw-r--r--g4f/Provider/GeminiPro.py8
-rw-r--r--g4f/Provider/GizAI.py151
-rw-r--r--g4f/Provider/GptTalkRu.py59
-rw-r--r--g4f/Provider/HuggingChat.py65
-rw-r--r--g4f/Provider/HuggingFace.py27
-rw-r--r--g4f/Provider/Koala.py12
-rw-r--r--g4f/Provider/Liaobots.py88
-rw-r--r--g4f/Provider/LiteIcoding.py113
-rw-r--r--g4f/Provider/Llama.py91
-rw-r--r--g4f/Provider/MagickPen.py153
-rw-r--r--g4f/Provider/Nexra.py181
-rw-r--r--g4f/Provider/Ollama.py13
-rw-r--r--g4f/Provider/PerplexityLabs.py11
-rw-r--r--g4f/Provider/Pi.py1
-rw-r--r--g4f/Provider/Pizzagpt.py1
-rw-r--r--g4f/Provider/Prodia.py150
-rw-r--r--g4f/Provider/ReplicateHome.py213
-rw-r--r--g4f/Provider/Rocks.py70
-rw-r--r--g4f/Provider/RubiksAI.py162
-rw-r--r--g4f/Provider/Snova.py133
-rw-r--r--g4f/Provider/TwitterBio.py103
-rw-r--r--g4f/Provider/Upstage.py7
-rw-r--r--g4f/Provider/Vercel.py104
-rw-r--r--g4f/Provider/You.py2
-rw-r--r--g4f/Provider/__init__.py42
-rw-r--r--g4f/Provider/bing/conversation.py6
-rw-r--r--g4f/Provider/gigachat/GigaChat.py (renamed from g4f/Provider/GigaChat.py)10
-rw-r--r--g4f/Provider/gigachat/__init__.py2
-rw-r--r--g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt (renamed from g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt)0
-rw-r--r--g4f/Provider/needs_auth/Gemini.py3
-rw-r--r--g4f/Provider/needs_auth/Groq.py23
-rw-r--r--g4f/Provider/needs_auth/OpenRouter.py4
-rw-r--r--g4f/Provider/needs_auth/Openai.py2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py6
-rw-r--r--g4f/Provider/needs_auth/PerplexityApi.py3
-rw-r--r--g4f/Provider/needs_auth/__init__.py4
-rw-r--r--g4f/Provider/nexra/NexraBing.py93
-rw-r--r--g4f/Provider/nexra/NexraBlackbox.py100
-rw-r--r--g4f/Provider/nexra/NexraChatGPT.py285
-rw-r--r--g4f/Provider/nexra/NexraDallE.py63
-rw-r--r--g4f/Provider/nexra/NexraDallE2.py63
-rw-r--r--g4f/Provider/nexra/NexraEmi.py63
-rw-r--r--g4f/Provider/nexra/NexraFluxPro.py70
-rw-r--r--g4f/Provider/nexra/NexraGeminiPro.py86
-rw-r--r--g4f/Provider/nexra/NexraMidjourney.py63
-rw-r--r--g4f/Provider/nexra/NexraProdiaAI.py151
-rw-r--r--g4f/Provider/nexra/NexraQwen.py86
-rw-r--r--g4f/Provider/nexra/NexraSD15.py72
-rw-r--r--g4f/Provider/nexra/NexraSDLora.py69
-rw-r--r--g4f/Provider/nexra/NexraSDTurbo.py69
-rw-r--r--g4f/Provider/nexra/__init__.py14
-rw-r--r--g4f/Provider/openai/new.py730
-rw-r--r--g4f/Provider/selenium/AItianhuSpace.py116
-rw-r--r--g4f/Provider/selenium/Bard.py80
-rw-r--r--g4f/Provider/selenium/MyShell.py4
-rw-r--r--g4f/Provider/selenium/PerplexityAi.py4
-rw-r--r--g4f/Provider/selenium/TalkAi.py4
-rw-r--r--g4f/Provider/selenium/__init__.py2
-rw-r--r--g4f/Provider/unfinished/AiChatting.py66
-rw-r--r--g4f/Provider/unfinished/ChatAiGpt.py68
-rw-r--r--g4f/Provider/unfinished/Komo.py44
-rw-r--r--g4f/Provider/unfinished/MikuChat.py97
-rw-r--r--g4f/Provider/unfinished/__init__.py4
-rw-r--r--g4f/__init__.py67
-rw-r--r--g4f/api/__init__.py159
-rw-r--r--g4f/client/__init__.py3
-rw-r--r--g4f/client/async_client.py275
-rw-r--r--g4f/client/client.py487
-rw-r--r--g4f/cookies.py3
-rw-r--r--g4f/gui/client/index.html51
-rw-r--r--g4f/gui/client/static/css/style.css5
-rw-r--r--g4f/gui/client/static/js/chat.v1.js52
-rw-r--r--g4f/gui/client/static/js/highlightjs-copy.min.js55
-rw-r--r--g4f/gui/server/api.py195
-rw-r--r--g4f/gui/server/internet.py2
-rw-r--r--g4f/gui/server/website.py6
-rw-r--r--g4f/models.py860
-rw-r--r--g4f/providers/types.py7
-rw-r--r--g4f/version.py2
112 files changed, 6497 insertions, 3136 deletions
diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/AI365VIP.py
index 2dcc8d1c..511ad568 100644
--- a/g4f/Provider/AI365VIP.py
+++ b/g4f/Provider/AI365VIP.py
@@ -10,17 +10,15 @@ from .helper import format_prompt
class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.ai365vip.com"
api_endpoint = "/api/chat"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
+ working = False
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-16k',
'gpt-4o',
- 'claude-3-haiku-20240307',
]
model_aliases = {
- "claude-3-haiku": "claude-3-haiku-20240307",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
}
@classmethod
diff --git a/g4f/Provider/AIChatFree.py b/g4f/Provider/AIChatFree.py
new file mode 100644
index 00000000..71c04681
--- /dev/null
+++ b/g4f/Provider/AIChatFree.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ..errors import RateLimitError
+from ..requests import raise_for_status
+from ..requests.aiohttp import get_connector
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aichatfree.info/"
+ working = True
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "TE": "trailers",
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py
new file mode 100644
index 00000000..d653191c
--- /dev/null
+++ b/g4f/Provider/AIUncensored.py
@@ -0,0 +1,112 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+from ..image import ImageResponse
+
+class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.aiuncensored.info"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'ai_uncensored'
+ chat_models = [default_model]
+ image_models = ['ImageGenerator']
+ models = [*chat_models, *image_models]
+
+ api_endpoints = {
+ 'ai_uncensored': "https://twitterclone-i0wr.onrender.com/api/chat",
+ 'ImageGenerator': "https://twitterclone-4e8t.onrender.com/api/image"
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ if model in cls.chat_models:
+ async with ClientSession(headers={"content-type": "application/json"}) as session:
+ data = {
+ "messages": [
+ {"role": "user", "content": format_prompt(messages)}
+ ],
+ "stream": stream
+ }
+ async with session.post(cls.api_endpoints[model], json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ if stream:
+ async for chunk in cls._handle_streaming_response(response):
+ yield chunk
+ else:
+ yield await cls._handle_non_streaming_response(response)
+ elif model in cls.image_models:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "cross-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[0]['content']
+ data = {"prompt": prompt}
+ async with session.post(cls.api_endpoints[model], json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ result = await response.json()
+ image_url = result.get('image_url', '')
+ if image_url:
+ yield ImageResponse(image_url, alt=prompt)
+ else:
+ yield "Failed to generate image. Please try again."
+
+ @classmethod
+ async def _handle_streaming_response(cls, response):
+ async for line in response.content:
+ line = line.decode('utf-8').strip()
+ if line.startswith("data: "):
+ if line == "data: [DONE]":
+ break
+ try:
+ json_data = json.loads(line[6:])
+ if 'data' in json_data:
+ yield json_data['data']
+ except json.JSONDecodeError:
+ pass
+
+ @classmethod
+ async def _handle_non_streaming_response(cls, response):
+ response_json = await response.json()
+ return response_json.get('content', "Sorry, I couldn't generate a response.")
+
+ @classmethod
+ def validate_response(cls, response: str) -> str:
+ return response
diff --git a/g4f/Provider/Ai4Chat.py b/g4f/Provider/Ai4Chat.py
new file mode 100644
index 00000000..1096279d
--- /dev/null
+++ b/g4f/Provider/Ai4Chat.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import json
+import re
+import logging
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "AI4Chat"
+ url = "https://www.ai4chat.co"
+ api_endpoint = "https://www.ai4chat.co/generate-response"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4'
+ models = [default_model]
+
+ model_aliases = {}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": "https://www.ai4chat.co",
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": "https://www.ai4chat.co/gpt/talkdirtytome",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ]
+ }
+
+ try:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ result = await response.text()
+
+ json_result = json.loads(result)
+
+ message = json_result.get("message", "")
+
+ clean_message = re.sub(r'<[^>]+>', '', message)
+
+ yield clean_message
+ except Exception as e:
+ logging.exception("Error while calling AI 4Chat API: %s", e)
+ yield f"Error: {e}"
diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/AiChatOnline.py
index 152a7d31..26aacef6 100644
--- a/g4f/Provider/AiChatOnline.py
+++ b/g4f/Provider/AiChatOnline.py
@@ -12,10 +12,7 @@ class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatonlineorg.erweima.ai"
api_endpoint = "/aichatonline/api/chat/gpt"
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
default_model = 'gpt-4o-mini'
- supports_message_history = False
@classmethod
async def grab_token(
diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/AiChats.py
index 10127d4f..08492e24 100644
--- a/g4f/Provider/AiChats.py
+++ b/g4f/Provider/AiChats.py
@@ -12,7 +12,6 @@ class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-chats.org"
api_endpoint = "https://ai-chats.org/chat/send2/"
working = True
- supports_gpt_4 = True
supports_message_history = True
default_model = 'gpt-4'
models = ['gpt-4', 'dalle']
diff --git a/g4f/Provider/AiMathGPT.py b/g4f/Provider/AiMathGPT.py
new file mode 100644
index 00000000..90931691
--- /dev/null
+++ b/g4f/Provider/AiMathGPT.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class AiMathGPT(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aimathgpt.forit.ai"
+ api_endpoint = "https://aimathgpt.forit.ai/api/ai"
+ working = True
+ supports_stream = False
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama3'
+ models = ['llama3']
+
+ model_aliases = {"llama-3.1-70b": "llama3",}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f'{cls.url}/',
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ],
+ "model": model
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ filtered_response = response_data['result']['response']
+ yield filtered_response
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
new file mode 100644
index 00000000..015766f4
--- /dev/null
+++ b/g4f/Provider/Airforce.py
@@ -0,0 +1,245 @@
+from __future__ import annotations
+import random
+import json
+import re
+from aiohttp import ClientSession
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+
+def split_long_message(message: str, max_length: int = 4000) -> list[str]:
+ return [message[i:i+max_length] for i in range(0, len(message), max_length)]
+
+class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.airforce"
+ image_api_endpoint = "https://api.airforce/imagine2"
+ text_api_endpoint = "https://api.airforce/chat/completions"
+ working = True
+
+ default_model = 'llama-3-70b-chat'
+
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ text_models = [
+ 'claude-3-haiku-20240307',
+ 'claude-3-sonnet-20240229',
+ 'claude-3-5-sonnet-20240620',
+ 'claude-3-opus-20240229',
+ 'chatgpt-4o-latest',
+ 'gpt-4',
+ 'gpt-4-turbo',
+ 'gpt-4o-mini-2024-07-18',
+ 'gpt-4o-mini',
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-0125',
+ 'gpt-3.5-turbo-1106',
+ default_model,
+ 'llama-3-70b-chat-turbo',
+ 'llama-3-8b-chat',
+ 'llama-3-8b-chat-turbo',
+ 'llama-3-70b-chat-lite',
+ 'llama-3-8b-chat-lite',
+ 'llama-2-13b-chat',
+ 'llama-3.1-405b-turbo',
+ 'llama-3.1-70b-turbo',
+ 'llama-3.1-8b-turbo',
+ 'LlamaGuard-2-8b',
+ 'Llama-Guard-7b',
+ 'Llama-3.2-90B-Vision-Instruct-Turbo',
+ 'Mixtral-8x7B-Instruct-v0.1',
+ 'Mixtral-8x22B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.2',
+ 'Mistral-7B-Instruct-v0.3',
+ 'Qwen1.5-7B-Chat',
+ 'Qwen1.5-14B-Chat',
+ 'Qwen1.5-72B-Chat',
+ 'Qwen1.5-110B-Chat',
+ 'Qwen2-72B-Instruct',
+ 'gemma-2b-it',
+ 'gemma-2-9b-it',
+ 'gemma-2-27b-it',
+ 'gemini-1.5-flash',
+ 'gemini-1.5-pro',
+ 'deepseek-llm-67b-chat',
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO',
+ 'Nous-Hermes-2-Yi-34B',
+ 'WizardLM-2-8x22B',
+ 'SOLAR-10.7B-Instruct-v1.0',
+ 'MythoMax-L2-13b',
+ 'cosmosrp',
+ ]
+
+ image_models = [
+ 'flux',
+ 'flux-realism',
+ 'flux-anime',
+ 'flux-3d',
+ 'flux-disney',
+ 'flux-pixel',
+ 'flux-4o',
+ 'any-dark',
+ ]
+
+ models = [
+ *text_models,
+ *image_models,
+ ]
+
+ model_aliases = {
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "gpt-4o": "chatgpt-4o-latest",
+ "llama-3-70b": "llama-3-70b-chat",
+ "llama-3-8b": "llama-3-8b-chat",
+ "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
+ "qwen-1.5-7b": "Qwen1.5-7B-Chat",
+ "gemma-2b": "gemma-2b-it",
+ "gemini-flash": "gemini-1.5-flash",
+ "mythomax-l2-13b": "MythoMax-L2-13b",
+ "solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases.get(model, cls.default_model)
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ if model in cls.image_models:
+ async for result in cls._generate_image(model, messages, proxy, seed, size):
+ yield result
+ elif model in cls.text_models:
+ async for result in cls._generate_text(model, messages, proxy, stream):
+ yield result
+
+ @classmethod
+ async def _generate_image(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "origin": "https://llmplayground.net",
+ "user-agent": "Mozilla/5.0"
+ }
+
+ if seed is None:
+ seed = random.randint(0, 100000)
+
+ prompt = messages[-1]['content']
+
+ async with ClientSession(headers=headers) as session:
+ params = {
+ "model": model,
+ "prompt": prompt,
+ "size": size,
+ "seed": seed
+ }
+ async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
+ response.raise_for_status()
+ content_type = response.headers.get('Content-Type', '').lower()
+
+ if 'application/json' in content_type:
+ async for chunk in response.content.iter_chunked(1024):
+ if chunk:
+ yield chunk.decode('utf-8')
+ elif 'image' in content_type:
+ image_data = b""
+ async for chunk in response.content.iter_chunked(1024):
+ if chunk:
+ image_data += chunk
+ image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
+ alt_text = f"Generated image for prompt: {prompt}"
+ yield ImageResponse(images=image_url, alt=alt_text)
+
+ @classmethod
+ async def _generate_text(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer missing api key",
+ "content-type": "application/json",
+ "user-agent": "Mozilla/5.0"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ formatted_prompt = cls._format_messages(messages)
+ prompt_parts = split_long_message(formatted_prompt)
+ full_response = ""
+
+ for part in prompt_parts:
+ data = {
+ "messages": [{"role": "user", "content": part}],
+ "model": model,
+ "max_tokens": 4096,
+ "temperature": 1,
+ "top_p": 1,
+ "stream": stream
+ }
+ async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ part_response = ""
+ if stream:
+ async for line in response.content:
+ if line:
+ line = line.decode('utf-8').strip()
+ if line.startswith("data: ") and line != "data: [DONE]":
+ json_data = json.loads(line[6:])
+ content = json_data['choices'][0]['delta'].get('content', '')
+ part_response += content
+ else:
+ json_data = await response.json()
+ content = json_data['choices'][0]['message']['content']
+ part_response = content
+
+ part_response = re.sub(
+ r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
+ '',
+ part_response
+ )
+
+ part_response = re.sub(
+ r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
+ '',
+ part_response
+ )
+
+ full_response += part_response
+ yield full_response
+
+ @classmethod
+ def _format_messages(cls, messages: Messages) -> str:
+ return " ".join([msg['content'] for msg in messages])
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
index 8733b1ec..bf607df4 100644
--- a/g4f/Provider/Allyfy.py
+++ b/g4f/Provider/Allyfy.py
@@ -9,10 +9,9 @@ from .helper import format_prompt
class Allyfy(AsyncGeneratorProvider):
- url = "https://chatbot.allyfy.chat"
- api_endpoint = "/api/v1/message/stream/super/chat"
+ url = "https://allyfy.chat"
+ api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
working = True
- supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
@@ -53,7 +52,7 @@ class Allyfy(AsyncGeneratorProvider):
"packageName": "com.cch.allyfy.webh",
}
}
- async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = []
async for line in response.content:
diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py
new file mode 100644
index 00000000..f5027111
--- /dev/null
+++ b/g4f/Provider/AmigoChat.py
@@ -0,0 +1,189 @@
+from __future__ import annotations
+
+import json
+import uuid
+from aiohttp import ClientSession, ClientTimeout, ClientResponseError
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+from ..image import ImageResponse
+
+class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://amigochat.io/chat/"
+ chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
+ image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o-mini'
+
+ chat_models = [
+ 'gpt-4o',
+ default_model,
+ 'o1-preview',
+ 'o1-mini',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
+ 'claude-3-sonnet-20240229',
+ 'gemini-1.5-pro',
+ ]
+
+ image_models = [
+ 'flux-pro/v1.1',
+ 'flux-realism',
+ 'flux-pro',
+ 'dalle-e-3',
+ ]
+
+ models = [*chat_models, *image_models]
+
+ model_aliases = {
+ "o1": "o1-preview",
+ "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
+ "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
+ "claude-3.5-sonnet": "claude-3-sonnet-20240229",
+ "gemini-pro": "gemini-1.5-pro",
+
+ "flux-pro": "flux-pro/v1.1",
+ "dalle-3": "dalle-e-3",
+ }
+
+ persona_ids = {
+ 'gpt-4o': "gpt",
+ 'gpt-4o-mini': "amigo",
+ 'o1-preview': "openai-o-one",
+ 'o1-mini': "openai-o-one-mini",
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one",
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2",
+ 'claude-3-sonnet-20240229': "claude",
+ 'gemini-1.5-pro': "gemini-1-5-pro",
+ 'flux-pro/v1.1': "flux-1-1-pro",
+ 'flux-realism': "flux-realism",
+ 'flux-pro': "flux-pro",
+ 'dalle-e-3': "dalle-three",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def get_personaId(cls, model: str) -> str:
+ return cls.persona_ids[model]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ device_uuid = str(uuid.uuid4())
+ max_retries = 3
+ retry_count = 0
+
+ while retry_count < max_retries:
+ try:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "x-device-language": "en-US",
+ "x-device-platform": "web",
+ "x-device-uuid": device_uuid,
+ "x-device-version": "1.0.32"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model in cls.chat_models:
+ # Chat completion
+ data = {
+ "messages": [{"role": m["role"], "content": m["content"]} for m in messages],
+ "model": model,
+ "personaId": cls.get_personaId(model),
+ "frequency_penalty": 0,
+ "max_tokens": 4000,
+ "presence_penalty": 0,
+ "stream": stream,
+ "temperature": 0.5,
+ "top_p": 0.95
+ }
+
+ timeout = ClientTimeout(total=300) # 5 minutes timeout
+ async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy, timeout=timeout) as response:
+ if response.status not in (200, 201):
+ error_text = await response.text()
+ raise Exception(f"Error {response.status}: {error_text}")
+
+ async for line in response.content:
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ if line == 'data: [DONE]':
+ break
+ try:
+ chunk = json.loads(line[6:]) # Remove 'data: ' prefix
+ if 'choices' in chunk and len(chunk['choices']) > 0:
+ choice = chunk['choices'][0]
+ if 'delta' in choice:
+ content = choice['delta'].get('content')
+ elif 'text' in choice:
+ content = choice['text']
+ else:
+ content = None
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ pass
+ else:
+ # Image generation
+ prompt = messages[-1]['content']
+ data = {
+ "prompt": prompt,
+ "model": model,
+ "personaId": cls.get_personaId(model)
+ }
+ async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ response_data = await response.json()
+
+ if "data" in response_data:
+ image_urls = []
+ for item in response_data["data"]:
+ if "url" in item:
+ image_url = item["url"]
+ image_urls.append(image_url)
+ if image_urls:
+ yield ImageResponse(image_urls, prompt)
+ else:
+ yield None
+
+ break
+
+ except (ClientResponseError, Exception) as e:
+ retry_count += 1
+ if retry_count >= max_retries:
+ raise e
+ device_uuid = str(uuid.uuid4())
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py
index 4a8d0a55..e2c56754 100644
--- a/g4f/Provider/Aura.py
+++ b/g4f/Provider/Aura.py
@@ -9,7 +9,7 @@ from ..webdriver import WebDriver
class Aura(AsyncGeneratorProvider):
url = "https://openchat.team"
- working = True
+ working = False
@classmethod
async def create_async_generator(
@@ -46,4 +46,4 @@ class Aura(AsyncGeneratorProvider):
async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
- yield chunk.decode(error="ignore") \ No newline at end of file
+ yield chunk.decode(error="ignore")
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 4056f9ff..f04b1a54 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -37,7 +37,6 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://bing.com/chat"
working = True
supports_message_history = True
- supports_gpt_4 = True
default_model = "Balanced"
default_vision_model = "gpt-4-vision"
models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")]
diff --git a/g4f/Provider/Binjie.py b/g4f/Provider/Binjie.py
deleted file mode 100644
index 90f9ec3c..00000000
--- a/g4f/Provider/Binjie.py
+++ /dev/null
@@ -1,65 +0,0 @@
-from __future__ import annotations
-
-import random
-from ..requests import StreamSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, format_prompt
-
-
-class Binjie(AsyncGeneratorProvider):
- url = "https://chat18.aichatos8.com"
- working = True
- supports_gpt_4 = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- @staticmethod
- async def create_async_generator(
- model: str,
- messages: Messages,
- proxy: str = None,
- timeout: int = 120,
- **kwargs,
- ) -> AsyncResult:
- async with StreamSession(
- headers=_create_header(), proxies={"https": proxy}, timeout=timeout
- ) as session:
- payload = _create_payload(messages, **kwargs)
- async with session.post("https://api.binjie.fun/api/generateStream", json=payload) as response:
- response.raise_for_status()
- async for chunk in response.iter_content():
- if chunk:
- chunk = chunk.decode()
- if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
- raise RuntimeError("IP address is blocked by abuse detection.")
- yield chunk
-
-
-def _create_header():
- return {
- "accept" : "application/json, text/plain, */*",
- "content-type" : "application/json",
- "origin" : "https://chat18.aichatos8.com",
- "referer" : "https://chat18.aichatos8.com/"
- }
-
-
-def _create_payload(
- messages: Messages,
- system_message: str = "",
- user_id: int = None,
- **kwargs
-):
- if not user_id:
- user_id = random.randint(1690000544336, 2093025544336)
- return {
- "prompt": format_prompt(messages),
- "network": True,
- "system": system_message,
- "withoutContext": False,
- "stream": True,
- "userId": f"#/chat/{user_id}"
- }
-
diff --git a/g4f/Provider/Bixin123.py b/g4f/Provider/Bixin123.py
deleted file mode 100644
index 694a2eff..00000000
--- a/g4f/Provider/Bixin123.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..typing import AsyncResult, Messages
-from .helper import format_prompt
-
-class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://chat.bixin123.com"
- api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
-
- default_model = 'gpt-3.5-turbo-0125'
- models = ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']
-
- model_aliases = {
- "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "application/json, text/plain, */*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "fingerprint": "988148794",
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/chat",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
- "x-website-domain": "chat.bixin123.com",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "prompt": prompt,
- "options": {
- "usingNetwork": False,
- "file": ""
- }
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
-
- lines = response_text.strip().split("\n")
- last_json = None
- for line in reversed(lines):
- try:
- last_json = json.loads(line)
- break
- except json.JSONDecodeError:
- pass
-
- if last_json:
- text = last_json.get("text", "")
- yield text
- else:
- yield ""
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 9fab4a09..4052893a 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -1,43 +1,122 @@
from __future__ import annotations
+import asyncio
+import aiohttp
+import random
+import string
+import json
import uuid
-import secrets
import re
-import base64
-from aiohttp import ClientSession
-from typing import AsyncGenerator, Optional
+from typing import Optional, AsyncGenerator, Union
+
+from aiohttp import ClientSession, ClientResponseError
from ..typing import AsyncResult, Messages, ImageType
-from ..image import to_data_uri, ImageResponse
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse, to_data_uri
+
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Blackbox AI"
url = "https://www.blackbox.ai"
+ api_endpoint = "https://www.blackbox.ai/api/chat"
working = True
- default_model = 'blackbox'
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'blackboxai'
+ image_models = ['ImageGeneration']
models = [
default_model,
- "gemini-1.5-flash",
+ 'blackboxai-pro',
+ *image_models,
"llama-3.1-8b",
'llama-3.1-70b',
'llama-3.1-405b',
- 'ImageGeneration',
+ 'gpt-4o',
+ 'gemini-pro',
+ 'gemini-1.5-flash',
+ 'claude-sonnet-3.5',
+ 'PythonAgent',
+ 'JavaAgent',
+ 'JavaScriptAgent',
+ 'HTMLAgent',
+ 'GoogleCloudAgent',
+ 'AndroidDeveloper',
+ 'SwiftDeveloper',
+ 'Next.jsAgent',
+ 'MongoDBAgent',
+ 'PyTorchAgent',
+ 'ReactAgent',
+ 'XcodeAgent',
+ 'AngularJSAgent',
]
-
- model_aliases = {
- "gemini-flash": "gemini-1.5-flash",
- }
-
- agent_mode_map = {
- 'ImageGeneration': {"mode": True, "id": "ImageGenerationLV45LJp", "name": "Image Generation"},
+
+ agentMode = {
+ 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
}
- model_id_map = {
- "blackbox": {},
+ trendingAgentMode = {
+ "blackboxai": {},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
+ 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
+ 'PythonAgent': {'mode': True, 'id': "Python Agent"},
+ 'JavaAgent': {'mode': True, 'id': "Java Agent"},
+ 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
+ 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
+ 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
+ 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
+ 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
+ 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
+ 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
+ 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
+ 'ReactAgent': {'mode': True, 'id': "React Agent"},
+ 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
+ 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
+ }
+
+ userSelectedModel = {
+ "gpt-4o": "gpt-4o",
+ "gemini-pro": "gemini-pro",
+ 'claude-sonnet-3.5': "claude-sonnet-3.5",
+ }
+
+ model_prefixes = {
+ 'gpt-4o': '@GPT-4o',
+ 'gemini-pro': '@Gemini-PRO',
+ 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
+ 'PythonAgent': '@Python Agent',
+ 'JavaAgent': '@Java Agent',
+ 'JavaScriptAgent': '@JavaScript Agent',
+ 'HTMLAgent': '@HTML Agent',
+ 'GoogleCloudAgent': '@Google Cloud Agent',
+ 'AndroidDeveloper': '@Android Developer',
+ 'SwiftDeveloper': '@Swift Developer',
+ 'Next.jsAgent': '@Next.js Agent',
+ 'MongoDBAgent': '@MongoDB Agent',
+ 'PyTorchAgent': '@PyTorch Agent',
+ 'ReactAgent': '@React Agent',
+ 'XcodeAgent': '@Xcode Agent',
+ 'AngularJSAgent': '@AngularJS Agent',
+ 'blackboxai-pro': '@BLACKBOXAI-PRO',
+ 'ImageGeneration': '@Image Generation',
+ }
+
+ model_referers = {
+ "blackboxai": "/?model=blackboxai",
+ "gpt-4o": "/?model=gpt-4o",
+ "gemini-pro": "/?model=gemini-pro",
+ "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
+ }
+
+ model_aliases = {
+ "gemini-flash": "gemini-1.5-flash",
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
+ "flux": "ImageGeneration",
}
@classmethod
@@ -49,14 +128,41 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
else:
return cls.default_model
- @classmethod
- async def download_image_to_base64_url(cls, url: str) -> str:
- async with ClientSession() as session:
- async with session.get(url) as response:
- image_data = await response.read()
- base64_data = base64.b64encode(image_data).decode('utf-8')
- mime_type = response.headers.get('Content-Type', 'image/jpeg')
- return f"data:{mime_type};base64,{base64_data}"
+ @staticmethod
+ def generate_random_string(length: int = 7) -> str:
+ characters = string.ascii_letters + string.digits
+ return ''.join(random.choices(characters, k=length))
+
+ @staticmethod
+ def generate_next_action() -> str:
+ return uuid.uuid4().hex
+
+ @staticmethod
+ def generate_next_router_state_tree() -> str:
+ router_state = [
+ "",
+ {
+ "children": [
+ "(chat)",
+ {
+ "children": [
+ "__PAGE__",
+ {}
+ ]
+ }
+ ]
+ },
+ None,
+ None,
+ True
+ ]
+ return json.dumps(router_state)
+
+ @staticmethod
+ def clean_response(text: str) -> str:
+ pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
+ cleaned_text = re.sub(pattern, '', text)
+ return cleaned_text
@classmethod
async def create_async_generator(
@@ -64,93 +170,203 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: Optional[str] = None,
- image: Optional[ImageType] = None,
- image_name: Optional[str] = None,
+ image: ImageType = None,
+ image_name: str = None,
+ web_search: bool = False,
**kwargs
- ) -> AsyncGenerator[AsyncResult, None]:
+ ) -> AsyncGenerator[Union[str, ImageResponse], None]:
+ """
+ Creates an asynchronous generator for streaming responses from Blackbox AI.
+
+ Parameters:
+ model (str): Model to use for generating responses.
+ messages (Messages): Message history.
+ proxy (Optional[str]): Proxy URL, if needed.
+ image (ImageType): Image data to be processed, if any.
+ image_name (str): Name of the image file, if an image is provided.
+ web_search (bool): Enables or disables web search mode.
+ **kwargs: Additional keyword arguments.
+
+ Yields:
+ Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
+ """
+
if image is not None:
- messages[-1]["data"] = {
- "fileText": image_name,
- "imageBase64": to_data_uri(image),
- "title": str(uuid.uuid4())
+ messages[-1]['data'] = {
+ 'fileText': '',
+ 'imageBase64': to_data_uri(image),
+ 'title': image_name
}
+ messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
+
+ model = cls.get_model(model)
+
+ chat_id = cls.generate_random_string()
+ next_action = cls.generate_next_action()
+ next_router_state_tree = cls.generate_next_router_state_tree()
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br",
- "Referer": cls.url,
- "Content-Type": "application/json",
- "Origin": cls.url,
- "DNT": "1",
- "Sec-GPC": "1",
- "Alt-Used": "www.blackbox.ai",
- "Connection": "keep-alive",
+ agent_mode = cls.agentMode.get(model, {})
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
+
+ prefix = cls.model_prefixes.get(model, "")
+
+ formatted_prompt = ""
+ for message in messages:
+ role = message.get('role', '').capitalize()
+ content = message.get('content', '')
+ if role and content:
+ formatted_prompt += f"{role}: {content}\n"
+
+ if prefix:
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
+
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
+ referer_url = f"{cls.url}{referer_path}"
+
+ common_headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
+ 'Chrome/129.0.0.0 Safari/537.36'
}
- async with ClientSession(headers=headers) as session:
- random_id = secrets.token_hex(16)
- random_user_id = str(uuid.uuid4())
-
- model = cls.get_model(model) # Resolve the model alias
-
- data = {
- "messages": messages,
- "id": random_id,
- "userId": random_user_id,
- "codeModelMode": True,
- "agentMode": cls.agent_mode_map.get(model, {}),
- "trendingAgentMode": {},
- "isMicMode": False,
- "isChromeExt": False,
- "playgroundMode": False,
- "webSearchMode": False,
- "userSystemPrompt": "",
- "githubToken": None,
- "trendingAgentModel": cls.model_id_map.get(model, {}),
- "maxTokens": None
- }
+ headers_api_chat = {
+ 'Content-Type': 'application/json',
+ 'Referer': referer_url
+ }
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
+
+ payload_api_chat = {
+ "messages": [
+ {
+ "id": chat_id,
+ "content": formatted_prompt,
+ "role": "user",
+ "data": messages[-1].get('data')
+ }
+ ],
+ "id": chat_id,
+ "previewToken": None,
+ "userId": None,
+ "codeModelMode": True,
+ "agentMode": agent_mode,
+ "trendingAgentMode": trending_agent_mode,
+ "isMicMode": False,
+ "userSystemPrompt": None,
+ "maxTokens": 1024,
+ "playgroundTopP": 0.9,
+ "playgroundTemperature": 0.5,
+ "isChromeExt": False,
+ "githubToken": None,
+ "clickedAnswer2": False,
+ "clickedAnswer3": False,
+ "clickedForceWebSearch": False,
+ "visitFromDelta": False,
+ "mobileClient": False,
+ "webSearchMode": web_search,
+ "userSelectedModel": cls.userSelectedModel.get(model, model)
+ }
+
+ headers_chat = {
+ 'Accept': 'text/x-component',
+ 'Content-Type': 'text/plain;charset=UTF-8',
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
+ 'next-action': next_action,
+ 'next-router-state-tree': next_router_state_tree,
+ 'next-url': '/'
+ }
+ headers_chat_combined = {**common_headers, **headers_chat}
+
+ data_chat = '[]'
+
+ async with ClientSession(headers=common_headers) as session:
+ try:
+ async with session.post(
+ cls.api_endpoint,
+ headers=headers_api_chat_combined,
+ json=payload_api_chat,
+ proxy=proxy
+ ) as response_api_chat:
+ response_api_chat.raise_for_status()
+ text = await response_api_chat.text()
+ cleaned_response = cls.clean_response(text)
+
+ if model in cls.image_models:
+ match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
+ if match:
+ image_url = match.group(1)
+ image_response = ImageResponse(images=image_url, alt="Generated Image")
+ yield image_response
+ else:
+ yield cleaned_response
+ else:
+ if web_search:
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
+ if match:
+ source_part = match.group(1).strip()
+ answer_part = cleaned_response[match.end():].strip()
+ try:
+ sources = json.loads(source_part)
+ source_formatted = "**Source:**\n"
+ for item in sources:
+ title = item.get('title', 'No Title')
+ link = item.get('link', '#')
+ position = item.get('position', '')
+ source_formatted += f"{position}. [{title}]({link})\n"
+ final_response = f"{answer_part}\n\n{source_formatted}"
+ except json.JSONDecodeError:
+ final_response = f"{answer_part}\n\nSource information is unavailable."
+ else:
+ final_response = cleaned_response
+ else:
+ if '$~~~$' in cleaned_response:
+ final_response = cleaned_response.split('$~~~$')[0].strip()
+ else:
+ final_response = cleaned_response
+
+ yield final_response
+ except ClientResponseError as e:
+ error_text = f"Error {e.status}: {e.message}"
+ try:
+ error_response = await e.response.text()
+ cleaned_error = cls.clean_response(error_response)
+ error_text += f" - {cleaned_error}"
+ except Exception:
+ pass
+ yield error_text
+ except Exception as e:
+ yield f"Unexpected error during /api/chat request: {str(e)}"
+
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
- async with session.post(
- f"{cls.url}/api/chat", json=data, proxy=proxy
- ) as response:
- response.raise_for_status()
- full_response = ""
- buffer = ""
- image_base64_url = None
- async for chunk in response.content.iter_any():
- if chunk:
- decoded_chunk = chunk.decode()
- cleaned_chunk = re.sub(r'\$@\$.+?\$@\$|\$@\$', '', decoded_chunk)
-
- buffer += cleaned_chunk
-
- # Check if there's a complete image line in the buffer
- image_match = re.search(r'!\[Generated Image\]\((https?://[^\s\)]+)\)', buffer)
- if image_match:
- image_url = image_match.group(1)
- # Download the image and convert to base64 URL
- image_base64_url = await cls.download_image_to_base64_url(image_url)
-
- # Remove the image line from the buffer
- buffer = re.sub(r'!\[Generated Image\]\(https?://[^\s\)]+\)', '', buffer)
-
- # Send text line by line
- lines = buffer.split('\n')
- for line in lines[:-1]:
- if line.strip():
- full_response += line + '\n'
- yield line + '\n'
- buffer = lines[-1] # Keep the last incomplete line in the buffer
-
- # Send the remaining buffer if it's not empty
- if buffer.strip():
- full_response += buffer
- yield buffer
-
- # If an image was found, send it as ImageResponse
- if image_base64_url:
- alt_text = "Generated Image"
- image_response = ImageResponse(image_base64_url, alt=alt_text)
- yield image_response
+ try:
+ async with session.post(
+ chat_url,
+ headers=headers_chat_combined,
+ data=data_chat,
+ proxy=proxy
+ ) as response_chat:
+ response_chat.raise_for_status()
+ pass
+ except ClientResponseError as e:
+ error_text = f"Error {e.status}: {e.message}"
+ try:
+ error_response = await e.response.text()
+ cleaned_error = cls.clean_response(error_response)
+ error_text += f" - {cleaned_error}"
+ except Exception:
+ pass
+ yield error_text
+ except Exception as e:
+ yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
diff --git a/g4f/Provider/ChatGpt.py b/g4f/Provider/ChatGpt.py
new file mode 100644
index 00000000..b5a78b9a
--- /dev/null
+++ b/g4f/Provider/ChatGpt.py
@@ -0,0 +1,225 @@
+from __future__ import annotations
+
+from ..typing import Messages, CreateResult
+from ..providers.base_provider import AbstractProvider, ProviderModelMixin
+
+import time, uuid, random, json
+from requests import Session
+
+from .openai.new import (
+ get_config,
+ get_answer_token,
+ process_turnstile,
+ get_requirements_token
+)
+
+def format_conversation(messages: list):
+ conversation = []
+
+ for message in messages:
+ conversation.append({
+ 'id': str(uuid.uuid4()),
+ 'author': {
+ 'role': message['role'],
+ },
+ 'content': {
+ 'content_type': 'text',
+ 'parts': [
+ message['content'],
+ ],
+ },
+ 'metadata': {
+ 'serialization_metadata': {
+ 'custom_symbol_offsets': [],
+ },
+ },
+ 'create_time': round(time.time(), 3),
+ })
+
+ return conversation
+
+def init_session(user_agent):
+ session = Session()
+
+ cookies = {
+ '_dd_s': '',
+ }
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.8',
+ 'cache-control': 'no-cache',
+ 'pragma': 'no-cache',
+ 'priority': 'u=0, i',
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
+ 'sec-ch-ua-arch': '"arm"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua-platform-version': '"14.4.0"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': user_agent,
+ }
+
+ session.get('https://chatgpt.com/', cookies=cookies, headers=headers)
+
+ return session
+
+class ChatGpt(AbstractProvider, ProviderModelMixin):
+ label = "ChatGpt"
+ working = True
+ supports_message_history = True
+ supports_system_message = True
+ supports_stream = True
+ models = [
+ 'gpt-4o',
+ 'gpt-4o-mini',
+ 'gpt-4',
+ 'gpt-4-turbo',
+ 'chatgpt-4o-latest',
+ ]
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+
+ if model in [
+ 'gpt-4o',
+ 'gpt-4o-mini',
+ 'gpt-4',
+ 'gpt-4-turbo',
+ 'chatgpt-4o-latest'
+ ]:
+ model = 'auto'
+
+ elif model in [
+ 'gpt-3.5-turbo'
+ ]:
+ model = 'text-davinci-002-render-sha'
+
+ else:
+ raise ValueError(f"Invalid model: {model}")
+
+ user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
+ session: Session = init_session(user_agent)
+
+ config = get_config(user_agent)
+ pow_req = get_requirements_token(config)
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.8',
+ 'content-type': 'application/json',
+ 'oai-device-id': f'{uuid.uuid4()}',
+ 'oai-language': 'en-US',
+ 'origin': 'https://chatgpt.com',
+ 'priority': 'u=1, i',
+ 'referer': 'https://chatgpt.com/',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'sec-gpc': '1',
+ 'user-agent': f'{user_agent}'
+ }
+
+ response = session.post('https://chatgpt.com/backend-anon/sentinel/chat-requirements',
+ headers=headers, json={'p': pow_req})
+
+ if response.status_code != 200:
+ print(f"Request failed with status: {response.status_code}")
+ print(f"Response content: {response.content}")
+ return
+
+ response_data = response.json()
+ if "detail" in response_data and "Unusual activity" in response_data["detail"]:
+ print(f"Blocked due to unusual activity: {response_data['detail']}")
+ return
+
+ turnstile = response_data.get('turnstile', {})
+ turnstile_required = turnstile.get('required')
+ pow_conf = response_data.get('proofofwork', {})
+
+ if turnstile_required:
+ turnstile_dx = turnstile.get('dx')
+ turnstile_token = process_turnstile(turnstile_dx, pow_req)
+
+ headers = headers | {
+ 'openai-sentinel-turnstile-token' : turnstile_token,
+ 'openai-sentinel-chat-requirements-token': response_data.get('token'),
+ 'openai-sentinel-proof-token' : get_answer_token(
+ pow_conf.get('seed'), pow_conf.get('difficulty'), config
+ )
+ }
+
+ json_data = {
+ 'action': 'next',
+ 'messages': format_conversation(messages),
+ 'parent_message_id': str(uuid.uuid4()),
+ 'model': 'auto',
+ 'timezone_offset_min': -120,
+ 'suggestions': [
+ 'Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.',
+ 'Could you help me plan a relaxing day that focuses on activities for rejuvenation? To start, can you ask me what my favorite forms of relaxation are?',
+ 'I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look good on camera?',
+ 'Make up a 5-sentence story about "Sharky", a tooth-brushing shark superhero. Make each sentence a bullet point.',
+ ],
+ 'history_and_training_disabled': False,
+ 'conversation_mode': {
+ 'kind': 'primary_assistant',
+ },
+ 'force_paragen': False,
+ 'force_paragen_model_slug': '',
+ 'force_nulligen': False,
+ 'force_rate_limit': False,
+ 'reset_rate_limits': False,
+ 'websocket_request_id': str(uuid.uuid4()),
+ 'system_hints': [],
+ 'force_use_sse': True,
+ 'conversation_origin': None,
+ 'client_contextual_info': {
+ 'is_dark_mode': True,
+ 'time_since_loaded': random.randint(22,33),
+ 'page_height': random.randint(600, 900),
+ 'page_width': random.randint(500, 800),
+ 'pixel_ratio': 2,
+ 'screen_height': random.randint(800, 1200),
+ 'screen_width': random.randint(1200, 2000),
+ },
+ }
+
+ time.sleep(2)
+
+ response = session.post('https://chatgpt.com/backend-anon/conversation',
+ headers=headers, json=json_data, stream=True)
+
+ replace = ''
+ for line in response.iter_lines():
+ if line:
+ decoded_line = line.decode()
+ print(f"Received line: {decoded_line}")
+ if decoded_line.startswith('data:'):
+ json_string = decoded_line[6:]
+ if json_string.strip():
+ try:
+ data = json.loads(json_string)
+ except json.JSONDecodeError as e:
+ print(f"Error decoding JSON: {e}, content: {json_string}")
+ continue
+
+ if data.get('message').get('author').get('role') == 'assistant':
+ tokens = (data.get('message').get('content').get('parts')[0])
+
+ yield tokens.replace(replace, '')
+
+ replace = tokens
diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py
new file mode 100644
index 00000000..a060ecb1
--- /dev/null
+++ b/g4f/Provider/ChatGptEs.py
@@ -0,0 +1,84 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import os
+import json
+import re
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chatgpt.es"
+ api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o'
+ models = ['gpt-4o', 'gpt-4o-mini', 'chatgpt-4o-latest']
+
+ model_aliases = {
+ "gpt-4o": "chatgpt-4o-latest",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "authority": "chatgpt.es",
+ "accept": "application/json",
+ "origin": cls.url,
+ "referer": f"{cls.url}/chat",
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ initial_response = await session.get(cls.url)
+ nonce_ = re.findall(r'data-nonce="(.+?)"', await initial_response.text())[0]
+ post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0]
+
+ conversation_history = [
+ "Human: strictly respond in the same language as my prompt, preferably English"
+ ]
+
+ for message in messages[:-1]:
+ if message['role'] == "user":
+ conversation_history.append(f"Human: {message['content']}")
+ else:
+ conversation_history.append(f"AI: {message['content']}")
+
+ payload = {
+ '_wpnonce': nonce_,
+ 'post_id': post_id,
+ 'url': cls.url,
+ 'action': 'wpaicg_chat_shortcode_message',
+ 'message': messages[-1]['content'],
+ 'bot_id': '0',
+ 'chatbot_identity': 'shortcode',
+ 'wpaicg_chat_client_id': os.urandom(5).hex(),
+ 'wpaicg_chat_history': json.dumps(conversation_history)
+ }
+
+ async with session.post(cls.api_endpoint, headers=headers, data=payload) as response:
+ response.raise_for_status()
+ result = await response.json()
+ yield result['data']
diff --git a/g4f/Provider/ChatHub.py b/g4f/Provider/ChatHub.py
new file mode 100644
index 00000000..3b762687
--- /dev/null
+++ b/g4f/Provider/ChatHub.py
@@ -0,0 +1,84 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class ChatHub(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "ChatHub"
+ url = "https://app.chathub.gg"
+ api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'meta/llama3.1-8b'
+ models = [
+ 'meta/llama3.1-8b',
+ 'mistral/mixtral-8x7b',
+ 'google/gemma-2',
+ 'perplexity/sonar-online',
+ ]
+
+ model_aliases = {
+ "llama-3.1-8b": "meta/llama3.1-8b",
+ "mixtral-8x7b": "mistral/mixtral-8x7b",
+ "gemma-2": "google/gemma-2",
+ "sonar-online": "perplexity/sonar-online",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'origin': cls.url,
+ 'referer': f"{cls.url}/chat/cloud-llama3.1-8b",
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
+ 'x-app-id': 'web'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "model": model,
+ "messages": [{"role": "user", "content": prompt}],
+ "tools": []
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line:
+ decoded_line = line.decode('utf-8')
+ if decoded_line.startswith('data:'):
+ try:
+ data = json.loads(decoded_line[5:])
+ if data['type'] == 'text-delta':
+ yield data['textDelta']
+ elif data['type'] == 'done':
+ break
+ except json.JSONDecodeError:
+ continue
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index 8c058fdc..627facf6 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -12,13 +12,15 @@ class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
working = True
- supports_gpt_4 = True
+
+ default_model = 'gpt-4'
+ models = [default_model]
async def get_nonce(headers: dict) -> str:
async with ClientSession(headers=headers) as session:
async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
return (await response.json())["restNonce"]
-
+
@classmethod
async def create_async_generator(
cls,
diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/Chatgpt4o.py
index f3dc8a15..7730fc84 100644
--- a/g4f/Provider/Chatgpt4o.py
+++ b/g4f/Provider/Chatgpt4o.py
@@ -9,11 +9,16 @@ from .helper import format_prompt
class Chatgpt4o(AsyncProvider, ProviderModelMixin):
url = "https://chatgpt4o.one"
- supports_gpt_4 = True
working = True
_post_id = None
_nonce = None
- default_model = 'gpt-4o'
+ default_model = 'gpt-4o-mini-2024-07-18'
+ models = [
+ 'gpt-4o-mini-2024-07-18',
+ ]
+ model_aliases = {
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ }
@classmethod
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py
index 95efa865..d2837594 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/ChatgptFree.py
@@ -10,7 +10,6 @@ from .helper import format_prompt
class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgptfree.ai"
- supports_gpt_4 = True
working = True
_post_id = None
_nonce = None
diff --git a/g4f/Provider/ChatifyAI.py b/g4f/Provider/ChatifyAI.py
new file mode 100644
index 00000000..7e43b065
--- /dev/null
+++ b/g4f/Provider/ChatifyAI.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class ChatifyAI(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chatify-ai.vercel.app"
+ api_endpoint = "https://chatify-ai.vercel.app/api/chat"
+ working = True
+ supports_stream = False
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3.1'
+ models = [default_model]
+ model_aliases = {
+ "llama-3.1-8b": "llama-3.1",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases.get(model, cls.default_model)
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [{"role": "user", "content": format_prompt(messages)}]
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+
+ filtered_response = cls.filter_response(response_text)
+ yield filtered_response
+
+ @staticmethod
+ def filter_response(response_text: str) -> str:
+ parts = response_text.split('"')
+
+ text_parts = parts[1::2]
+
+ clean_text = ''.join(text_parts)
+
+ return clean_text
diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py
new file mode 100644
index 00000000..e78bbcd0
--- /dev/null
+++ b/g4f/Provider/Cloudflare.py
@@ -0,0 +1,212 @@
+from __future__ import annotations
+
+import asyncio
+import json
+import uuid
+import cloudscraper
+from typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://playground.ai.cloudflare.com"
+ api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = '@cf/meta/llama-3.1-8b-instruct'
+ models = [
+ '@cf/deepseek-ai/deepseek-math-7b-instruct', # Specific answer
+
+
+ '@cf/thebloke/discolm-german-7b-v1-awq',
+
+
+ '@cf/tiiuae/falcon-7b-instruct', # Specific answer
+
+
+ '@hf/google/gemma-7b-it',
+
+
+ '@cf/meta/llama-2-7b-chat-fp16',
+ '@cf/meta/llama-2-7b-chat-int8',
+
+ '@cf/meta/llama-3-8b-instruct',
+ '@cf/meta/llama-3-8b-instruct-awq',
+ default_model,
+ '@hf/meta-llama/meta-llama-3-8b-instruct',
+
+ '@cf/meta/llama-3.1-8b-instruct-awq',
+ '@cf/meta/llama-3.1-8b-instruct-fp8',
+ '@cf/meta/llama-3.2-11b-vision-instruct',
+ '@cf/meta/llama-3.2-1b-instruct',
+ '@cf/meta/llama-3.2-3b-instruct',
+
+ '@cf/mistral/mistral-7b-instruct-v0.1',
+ '@hf/mistral/mistral-7b-instruct-v0.2',
+
+ '@cf/openchat/openchat-3.5-0106',
+
+ '@cf/microsoft/phi-2',
+
+ '@cf/qwen/qwen1.5-0.5b-chat',
+ '@cf/qwen/qwen1.5-1.8b-chat',
+ '@cf/qwen/qwen1.5-14b-chat-awq',
+ '@cf/qwen/qwen1.5-7b-chat-awq',
+
+ '@cf/defog/sqlcoder-7b-2', # Specific answer
+
+ '@cf/tinyllama/tinyllama-1.1b-chat-v1.0',
+
+ '@cf/fblgit/una-cybertron-7b-v2-bf16',
+ ]
+
+ model_aliases = {
+ "german-7b-v1": "@cf/thebloke/discolm-german-7b-v1-awq",
+
+
+ "gemma-7b": "@hf/google/gemma-7b-it",
+
+
+ "llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
+ "llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
+
+ "llama-3-8b": "@cf/meta/llama-3-8b-instruct",
+ "llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq",
+ "llama-3-8b": "@cf/meta/llama-3.1-8b-instruct",
+ "llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct",
+
+ "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq",
+ "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
+ "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
+
+ "llama-3.2-11b": "@cf/meta/llama-3.2-11b-vision-instruct",
+ "llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct",
+ "llama-3.2-3b": "@cf/meta/llama-3.2-3b-instruct",
+
+
+ "mistral-7b": "@cf/mistral/mistral-7b-instruct-v0.1",
+ "mistral-7b": "@hf/mistral/mistral-7b-instruct-v0.2",
+
+
+ "openchat-3.5": "@cf/openchat/openchat-3.5-0106",
+
+
+ "phi-2": "@cf/microsoft/phi-2",
+
+
+ "qwen-1.5-0.5b": "@cf/qwen/qwen1.5-0.5b-chat",
+ "qwen-1.5-1.8b": "@cf/qwen/qwen1.5-1.8b-chat",
+ "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
+ "qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
+
+
+ "tinyllama-1.1b": "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
+
+
+ "cybertron-7b": "@cf/fblgit/una-cybertron-7b-v2-bf16",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ max_tokens: str = 2048,
+ stream: bool = True,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Accept': 'text/event-stream',
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Cache-Control': 'no-cache',
+ 'Content-Type': 'application/json',
+ 'Origin': cls.url,
+ 'Pragma': 'no-cache',
+ 'Referer': f'{cls.url}/',
+ 'Sec-Ch-Ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'Sec-Ch-Ua-Mobile': '?0',
+ 'Sec-Ch-Ua-Platform': '"Linux"',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
+ }
+
+ cookies = {
+ '__cf_bm': uuid.uuid4().hex,
+ }
+
+ scraper = cloudscraper.create_scraper()
+
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [
+ {"role": "system", "content": "You are a helpful assistant"},
+ {"role": "user", "content": prompt}
+ ],
+ "lora": None,
+ "model": model,
+ "max_tokens": max_tokens,
+ "stream": stream
+ }
+
+ max_retries = 3
+ for attempt in range(max_retries):
+ try:
+ response = scraper.post(
+ cls.api_endpoint,
+ headers=headers,
+ cookies=cookies,
+ json=data,
+ stream=True,
+ proxies={'http': proxy, 'https': proxy} if proxy else None
+ )
+
+ if response.status_code == 403:
+ await asyncio.sleep(2 ** attempt)
+ continue
+
+ response.raise_for_status()
+
+ for line in response.iter_lines():
+ if line.startswith(b'data: '):
+ if line == b'data: [DONE]':
+ break
+ try:
+ content = json.loads(line[6:].decode('utf-8'))['response']
+ yield content
+ except Exception:
+ continue
+ break
+ except Exception as e:
+ if attempt == max_retries - 1:
+ raise
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> str:
+ full_response = ""
+ async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
+ full_response += response
+ return full_response
diff --git a/g4f/Provider/CodeNews.py b/g4f/Provider/CodeNews.py
deleted file mode 100644
index 05ec7a45..00000000
--- a/g4f/Provider/CodeNews.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-from asyncio import sleep
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class CodeNews(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://codenews.cc"
- api_endpoint = "https://codenews.cc/chatxyz13"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = False
- supports_stream = True
- supports_system_message = False
- supports_message_history = False
-
- default_model = 'free_gpt'
- models = ['free_gpt', 'gpt-4o-mini', 'deepseek-coder', 'chatpdf']
-
- model_aliases = {
- "glm-4": "free_gpt",
- "gpt-3.5-turbo": "chatpdf",
- "deepseek": "deepseek-coder",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "application/json, text/javascript, */*; q=0.01",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/chatgpt",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
- "x-requested-with": "XMLHttpRequest",
- }
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "chatgpt_input": prompt,
- "qa_type2": model,
- "chatgpt_version_value": "20240804",
- "enable_web_search": "0",
- "enable_agent": "0",
- "dy_video_text_extract": "0",
- "enable_summary": "0",
- }
- async with session.post(cls.api_endpoint, data=data, proxy=proxy) as response:
- response.raise_for_status()
- json_data = await response.json()
- chat_id = json_data["data"]["id"]
-
- headers["content-type"] = "application/x-www-form-urlencoded; charset=UTF-8"
- data = {"current_req_count": "2"}
-
- while True:
- async with session.post(f"{cls.url}/chat_stream", headers=headers, data=data, proxy=proxy) as response:
- response.raise_for_status()
- json_data = await response.json()
- if json_data["data"]:
- yield json_data["data"]
- break
- else:
- await sleep(1) # Затримка перед наступним запитом
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index c8c36fc9..43cc39c0 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -2,115 +2,107 @@ from __future__ import annotations
import json
import aiohttp
-import asyncio
-from typing import Optional
-import base64
+from aiohttp import ClientSession
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_connector
from ..typing import AsyncResult, Messages
-from ..requests.raise_for_status import raise_for_status
-from ..providers.conversation import BaseConversation
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
- url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9haWNoYXQ=").decode("utf-8")
+ url = "https://duckduckgo.com"
+ api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
working = True
- supports_gpt_35_turbo = True
+ supports_stream = True
+ supports_system_message = True
supports_message_history = True
default_model = "gpt-4o-mini"
- models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
+ models = [
+ "gpt-4o-mini",
+ "claude-3-haiku-20240307",
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
+ ]
model_aliases = {
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"
}
- # Obfuscated URLs and headers
- status_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9zdGF0dXM=").decode("utf-8")
- chat_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9jaGF0").decode("utf-8")
- referer = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS8=").decode("utf-8")
- origin = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbQ==").decode("utf-8")
-
- user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
- headers = {
- 'User-Agent': user_agent,
- 'Accept': 'text/event-stream',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Accept-Encoding': 'gzip, deflate, br, zstd',
- 'Referer': referer,
- 'Content-Type': 'application/json',
- 'Origin': origin,
- 'Connection': 'keep-alive',
- 'Cookie': 'dcm=3',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'Pragma': 'no-cache',
- 'TE': 'trailers'
- }
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.model_aliases.get(model, model) if model in cls.model_aliases else cls.default_model
@classmethod
- async def get_vqd(cls, session: aiohttp.ClientSession) -> Optional[str]:
- try:
- async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response:
- await raise_for_status(response)
- return response.headers.get("x-vqd-4")
- except Exception as e:
- print(f"Error getting VQD: {e}")
- return None
+ async def get_vqd(cls):
+ status_url = "https://duckduckgo.com/duckchat/v1/status"
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
+ 'Accept': 'text/event-stream',
+ 'x-vqd-accept': '1'
+ }
+
+ async with aiohttp.ClientSession() as session:
+ try:
+ async with session.get(status_url, headers=headers) as response:
+ if response.status == 200:
+ return response.headers.get("x-vqd-4")
+ else:
+ print(f"Error: Status code {response.status}")
+ return None
+ except Exception as e:
+ print(f"Error getting VQD: {e}")
+ return None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
+ conversation: dict = None,
proxy: str = None,
- connector: aiohttp.BaseConnector = None,
- conversation: Conversation = None,
- return_conversation: bool = False,
**kwargs
) -> AsyncResult:
- async with aiohttp.ClientSession(headers=cls.headers, connector=get_connector(connector, proxy)) as session:
- vqd_4 = None
- if conversation is not None and len(messages) > 1:
- vqd_4 = conversation.vqd_4
- messages = [*conversation.messages, messages[-2], messages[-1]]
- else:
- for _ in range(3): # Try up to 3 times to get a valid VQD
- vqd_4 = await cls.get_vqd(session)
- if vqd_4:
- break
- await asyncio.sleep(1) # Wait a bit before retrying
-
- if not vqd_4:
- raise Exception("Failed to obtain a valid VQD token")
-
- messages = [messages[-1]] # Only use the last message for new conversations
-
- payload = {
- 'model': cls.get_model(model),
- 'messages': [{'role': m['role'], 'content': m['content']} for m in messages]
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': 'text/event-stream',
+ 'content-type': 'application/json',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
+ }
+
+ vqd = conversation.get('vqd') if conversation else await cls.get_vqd()
+ if not vqd:
+ raise Exception("Failed to obtain VQD token")
+
+ headers['x-vqd-4'] = vqd
+
+ if conversation:
+ message_history = conversation.get('messages', [])
+ message_history.append({"role": "user", "content": format_prompt(messages)})
+ else:
+ message_history = [{"role": "user", "content": format_prompt(messages)}]
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": model,
+ "messages": message_history
}
-
- async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response:
- await raise_for_status(response)
- if return_conversation:
- yield Conversation(vqd_4, messages)
-
- async for line in response.content:
- if line.startswith(b"data: "):
- chunk = line[6:]
- if chunk.startswith(b"[DONE]"):
- break
- try:
- data = json.loads(chunk)
- if "message" in data and data["message"]:
- yield data["message"]
- except json.JSONDecodeError:
- print(f"Failed to decode JSON: {chunk}")
-class Conversation(BaseConversation):
- def __init__(self, vqd_4: str, messages: Messages) -> None:
- self.vqd_4 = vqd_4
- self.messages = messages
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line:
+ decoded_line = line.decode('utf-8')
+ if decoded_line.startswith('data: '):
+ json_str = decoded_line[6:]
+ if json_str == '[DONE]':
+ break
+ try:
+ json_data = json.loads(json_str)
+ if 'message' in json_data:
+ yield json_data['message']
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py
new file mode 100644
index 00000000..6ffb615e
--- /dev/null
+++ b/g4f/Provider/DarkAI.py
@@ -0,0 +1,85 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.aiuncensored.info"
+ api_endpoint = "https://darkai.foundation/chat"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o'
+ models = [
+ default_model, # Uncensored
+ 'gpt-3.5-turbo', # Uncensored
+ 'llama-3-70b', # Uncensored
+ 'llama-3-405b',
+ ]
+
+ model_aliases = {
+ "llama-3.1-70b": "llama-3-70b",
+ "llama-3.1-405b": "llama-3-405b",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "text/event-stream",
+ "content-type": "application/json",
+ "origin": "https://www.aiuncensored.info",
+ "referer": "https://www.aiuncensored.info/",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "query": prompt,
+ "model": model,
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_text = ""
+ async for chunk in response.content:
+ if chunk:
+ try:
+ chunk_str = chunk.decode().strip()
+ if chunk_str.startswith('data: '):
+ chunk_data = json.loads(chunk_str[6:])
+ if chunk_data['event'] == 'text-chunk':
+ full_text += chunk_data['data']['text']
+ elif chunk_data['event'] == 'stream-end':
+ if full_text:
+ yield full_text.strip()
+ return
+ except json.JSONDecodeError:
+ print(f"Failed to decode JSON: {chunk_str}")
+ except Exception as e:
+ print(f"Error processing chunk: {e}")
+
+ if full_text:
+ yield full_text.strip()
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
new file mode 100644
index 00000000..b8cc6ab8
--- /dev/null
+++ b/g4f/Provider/DeepInfraChat.py
@@ -0,0 +1,142 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages, ImageType
+from ..image import to_data_uri
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://deepinfra.com/chat"
+ api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct'
+ models = [
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-8B-Instruct',
+ 'mistralai/Mixtral-8x22B-Instruct-v0.1',
+ 'mistralai/Mixtral-8x7B-Instruct-v0.1',
+ 'microsoft/WizardLM-2-8x22B',
+ 'microsoft/WizardLM-2-7B',
+ 'Qwen/Qwen2-72B-Instruct',
+ 'microsoft/Phi-3-medium-4k-instruct',
+ 'google/gemma-2-27b-it',
+ 'openbmb/MiniCPM-Llama3-V-2_5', # Image upload is available
+ 'mistralai/Mistral-7B-Instruct-v0.3',
+ 'lizpreciatior/lzlv_70b_fp16_hf',
+ 'openchat/openchat-3.6-8b',
+ 'Phind/Phind-CodeLlama-34B-v2',
+ 'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
+ ]
+ model_aliases = {
+ "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct",
+ "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ "llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
+ "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
+ "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
+ "wizardlm-2-7b": "microsoft/WizardLM-2-7B",
+ "qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
+ "phi-3-medium-4k": "microsoft/Phi-3-medium-4k-instruct",
+ "gemma-2b-27b": "google/gemma-2-27b-it",
+ "minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5", # Image upload is available
+ "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
+ "lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
+ "openchat-3.6-8b": "openchat/openchat-3.6-8b",
+ "phind-codellama-34b-v2": "Phind/Phind-CodeLlama-34B-v2",
+ "dolphin-2.9.1-llama-3-70b": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
+ }
+
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ image: ImageType = None,
+ image_name: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Content-Type': 'application/json',
+ 'Origin': 'https://deepinfra.com',
+ 'Pragma': 'no-cache',
+ 'Referer': 'https://deepinfra.com/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
+ 'X-Deepinfra-Source': 'web-embed',
+ 'accept': 'text/event-stream',
+ 'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ 'model': model,
+ 'messages': [
+ {'role': 'system', 'content': 'Be a helpful assistant'},
+ {'role': 'user', 'content': prompt}
+ ],
+ 'stream': True
+ }
+
+ if model == 'openbmb/MiniCPM-Llama3-V-2_5' and image is not None:
+ data['messages'][-1]['content'] = [
+ {
+ 'type': 'image_url',
+ 'image_url': {
+ 'url': to_data_uri(image)
+ }
+ },
+ {
+ 'type': 'text',
+ 'text': messages[-1]['content']
+ }
+ ]
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line:
+ decoded_line = line.decode('utf-8').strip()
+ if decoded_line.startswith('data:'):
+ json_part = decoded_line[5:].strip()
+ if json_part == '[DONE]':
+ break
+ try:
+ data = json.loads(json_part)
+ choices = data.get('choices', [])
+ if choices:
+ delta = choices[0].get('delta', {})
+ content = delta.get('content', '')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ print(f"JSON decode error: {json_part}")
diff --git a/g4f/Provider/DeepInfraImage.py b/g4f/Provider/DeepInfraImage.py
index 46a5c2e2..cee608ce 100644
--- a/g4f/Provider/DeepInfraImage.py
+++ b/g4f/Provider/DeepInfraImage.py
@@ -11,7 +11,8 @@ class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com"
parent = "DeepInfra"
working = True
- default_model = 'stability-ai/sdxl'
+ needs_auth = True
+ default_model = ''
image_models = [default_model]
@classmethod
@@ -76,4 +77,4 @@ class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
if not images:
raise RuntimeError(f"Response: {data}")
images = images[0] if len(images) == 1 else images
- return ImageResponse(images, prompt) \ No newline at end of file
+ return ImageResponse(images, prompt)
diff --git a/g4f/Provider/Editee.py b/g4f/Provider/Editee.py
new file mode 100644
index 00000000..8ac2324a
--- /dev/null
+++ b/g4f/Provider/Editee.py
@@ -0,0 +1,77 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Editee(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Editee"
+ url = "https://editee.com"
+ api_endpoint = "https://editee.com/submit/chatgptfree"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'claude'
+ models = ['claude', 'gpt4', 'gemini' 'mistrallarge']
+
+ model_aliases = {
+ "claude-3.5-sonnet": "claude",
+ "gpt-4o": "gpt4",
+ "gemini-pro": "gemini",
+ "mistral-large": "mistrallarge",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Cache-Control": "no-cache",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Pragma": "no-cache",
+ "Priority": "u=1, i",
+ "Referer": f"{cls.url}/chat-gpt",
+ "Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "Sec-CH-UA-Mobile": '?0',
+ "Sec-CH-UA-Platform": '"Linux"',
+ "Sec-Fetch-Dest": 'empty',
+ "Sec-Fetch-Mode": 'cors',
+ "Sec-Fetch-Site": 'same-origin',
+ "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
+ "X-Requested-With": 'XMLHttpRequest',
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "user_input": prompt,
+ "context": " ",
+ "template_id": "",
+ "selected_model": model
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ yield response_data['text']
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py
index d823a7ab..1a45997b 100644
--- a/g4f/Provider/FlowGpt.py
+++ b/g4f/Provider/FlowGpt.py
@@ -12,8 +12,7 @@ from ..requests.raise_for_status import raise_for_status
class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flowgpt.com/chat"
- working = True
- supports_gpt_35_turbo = True
+ working = False
supports_message_history = True
supports_system_message = True
default_model = "gpt-3.5-turbo"
diff --git a/g4f/Provider/FluxAirforce.py b/g4f/Provider/FluxAirforce.py
deleted file mode 100644
index fe003a61..00000000
--- a/g4f/Provider/FluxAirforce.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
-from urllib.parse import urlencode
-import io
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse, is_accepted_format
-
-class FluxAirforce(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://flux.api.airforce/"
- api_endpoint = "https://api.airforce/v1/imagine2"
- working = True
- default_model = 'flux-realism'
- models = [
- 'flux',
- 'flux-realism',
- 'flux-anime',
- 'flux-3d',
- 'flux-disney'
- ]
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "origin": "https://flux.api.airforce",
- "priority": "u=1, i",
- "referer": "https://flux.api.airforce/",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
- }
-
- prompt = messages[-1]['content'] if messages else ""
-
- params = {
- "prompt": prompt,
- "size": kwargs.get("size", "1:1"),
- "seed": kwargs.get("seed"),
- "model": model
- }
-
- params = {k: v for k, v in params.items() if v is not None}
-
- try:
- async with ClientSession(headers=headers) as session:
- async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
- response.raise_for_status()
-
- content = await response.read()
-
- if response.content_type.startswith('image/'):
- image_url = str(response.url)
- yield ImageResponse(image_url, prompt)
- else:
- try:
- text = content.decode('utf-8', errors='ignore')
- yield f"Error: {text}"
- except Exception as decode_error:
- yield f"Error: Unable to decode response - {str(decode_error)}"
-
- except ClientResponseError as e:
- yield f"Error: HTTP {e.status}: {e.message}"
- except Exception as e:
- yield f"Unexpected error: {str(e)}"
-
- finally:
- if not session.closed:
- await session.close()
diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/FreeNetfly.py
index d0543176..ada5d51a 100644
--- a/g4f/Provider/FreeNetfly.py
+++ b/g4f/Provider/FreeNetfly.py
@@ -13,8 +13,6 @@ class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.netfly.top"
api_endpoint = "/api/openai/v1/chat/completions"
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/GPROChat.py
new file mode 100644
index 00000000..a33c9571
--- /dev/null
+++ b/g4f/Provider/GPROChat.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+import hashlib
+import time
+from aiohttp import ClientSession
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "GPROChat"
+ url = "https://gprochat.com"
+ api_endpoint = "https://gprochat.com/api/generate"
+ working = True
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @staticmethod
+ def generate_signature(timestamp: int, message: str) -> str:
+ secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8"
+ hash_input = f"{timestamp}:{message}:{secret_key}"
+ signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
+ return signature
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ timestamp = int(time.time() * 1000)
+ prompt = format_prompt(messages)
+ sign = cls.generate_signature(timestamp, prompt)
+
+ headers = {
+ "accept": "*/*",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "content-type": "text/plain;charset=UTF-8"
+ }
+
+ data = {
+ "messages": [{"role": "user", "parts": [{"text": prompt}]}],
+ "time": timestamp,
+ "pass": None,
+ "sign": sign
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/GeminiPro.py
index b225c26c..06bf69ee 100644
--- a/g4f/Provider/GeminiPro.py
+++ b/g4f/Provider/GeminiPro.py
@@ -54,6 +54,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
"parts": [{"text": message["content"]}]
}
for message in messages
+ if message["role"] != "system"
]
if image is not None:
image = to_bytes(image)
@@ -73,6 +74,13 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
"topK": kwargs.get("top_k"),
}
}
+ system_prompt = "\n".join(
+ message["content"]
+ for message in messages
+ if message["role"] == "system"
+ )
+ if system_prompt:
+ data["system_instruction"] = {"parts": {"text": system_prompt}}
async with session.post(url, params=params, json=data) as response:
if not response.ok:
data = await response.json()
diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py
new file mode 100644
index 00000000..127edc9e
--- /dev/null
+++ b/g4f/Provider/GizAI.py
@@ -0,0 +1,151 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from ..image import ImageResponse
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://app.giz.ai/assistant/"
+ api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
+ working = True
+
+ supports_system_message = True
+ supports_message_history = True
+
+ # Chat models
+ default_model = 'chat-gemini-flash'
+ chat_models = [
+ default_model,
+ 'chat-gemini-pro',
+ 'chat-gpt4m',
+ 'chat-gpt4',
+ 'claude-sonnet',
+ 'claude-haiku',
+ 'llama-3-70b',
+ 'llama-3-8b',
+ 'mistral-large',
+ 'chat-o1-mini'
+ ]
+
+ # Image models
+ image_models = [
+ 'flux1',
+ 'sdxl',
+ 'sd',
+ 'sd35',
+ ]
+
+ models = [*chat_models, *image_models]
+
+ model_aliases = {
+ # Chat model aliases
+ "gemini-flash": "chat-gemini-flash",
+ "gemini-pro": "chat-gemini-pro",
+ "gpt-4o-mini": "chat-gpt4m",
+ "gpt-4o": "chat-gpt4",
+ "claude-3.5-sonnet": "claude-sonnet",
+ "claude-3-haiku": "claude-haiku",
+ "llama-3.1-70b": "llama-3-70b",
+ "llama-3.1-8b": "llama-3-8b",
+ "o1-mini": "chat-o1-mini",
+ # Image model aliases
+ "sd-1.5": "sd",
+ "sd-3.5": "sd35",
+ "flux-schnell": "flux1",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def is_image_model(cls, model: str) -> bool:
+ return model in cls.image_models
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Accept': 'application/json, text/plain, */*',
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Content-Type': 'application/json',
+ 'Origin': 'https://app.giz.ai',
+ 'Pragma': 'no-cache',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"'
+ }
+
+ async with ClientSession() as session:
+ if cls.is_image_model(model):
+ # Image generation
+ prompt = messages[-1]["content"]
+ data = {
+ "model": model,
+ "input": {
+ "width": "1024",
+ "height": "1024",
+ "steps": 4,
+ "output_format": "webp",
+ "batch_size": 1,
+ "mode": "plan",
+ "prompt": prompt
+ }
+ }
+ async with session.post(
+ cls.api_endpoint,
+ headers=headers,
+ data=json.dumps(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ if response_data.get('status') == 'completed' and response_data.get('output'):
+ for url in response_data['output']:
+ yield ImageResponse(images=url, alt="Generated Image")
+ else:
+ # Chat completion
+ data = {
+ "model": model,
+ "input": {
+ "messages": [
+ {
+ "type": "human",
+ "content": format_prompt(messages)
+ }
+ ],
+ "mode": "plan"
+ },
+ "noStream": True
+ }
+ async with session.post(
+ cls.api_endpoint,
+ headers=headers,
+ data=json.dumps(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ result = await response.json()
+ yield result.get('output', '')
diff --git a/g4f/Provider/GptTalkRu.py b/g4f/Provider/GptTalkRu.py
deleted file mode 100644
index 6a59484f..00000000
--- a/g4f/Provider/GptTalkRu.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession, BaseConnector
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string, get_connector
-from ..requests import raise_for_status, get_args_from_browser, WebDriver
-from ..webdriver import has_seleniumwire
-from ..errors import MissingRequirementsError
-
-class GptTalkRu(AsyncGeneratorProvider):
- url = "https://gpttalk.ru"
- working = True
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- connector: BaseConnector = None,
- webdriver: WebDriver = None,
- **kwargs
- ) -> AsyncResult:
- if not model:
- model = "gpt-3.5-turbo"
- if not has_seleniumwire:
- raise MissingRequirementsError('Install "selenium-wire" package')
- args = get_args_from_browser(f"{cls.url}", webdriver)
- args["headers"]["accept"] = "application/json, text/plain, */*"
- async with ClientSession(connector=get_connector(connector, proxy), **args) as session:
- async with session.get("https://gpttalk.ru/getToken") as response:
- await raise_for_status(response)
- public_key = (await response.json())["response"]["key"]["publicKey"]
- random_string = get_random_string(8)
- data = {
- "model": model,
- "modelType": 1,
- "prompt": messages,
- "responseType": "stream",
- "security": {
- "randomMessage": random_string,
- "shifrText": encrypt(public_key, random_string)
- }
- }
- async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response:
- await raise_for_status(response)
- async for chunk in response.content.iter_any():
- yield chunk.decode(errors="ignore")
-
-def encrypt(public_key: str, value: str) -> str:
- from Crypto.Cipher import PKCS1_v1_5
- from Crypto.PublicKey import RSA
- import base64
- rsa_key = RSA.importKey(public_key)
- cipher = PKCS1_v1_5.new(rsa_key)
- return base64.b64encode(cipher.encrypt(value.encode())).decode() \ No newline at end of file
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 76c76a35..7ebbf570 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -1,6 +1,7 @@
from __future__ import annotations
-import json, requests, re
+import json
+import requests
from curl_cffi import requests as cf_reqs
from ..typing import CreateResult, Messages
@@ -12,26 +13,27 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
working = True
supports_stream = True
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
+
models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct',
- 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
- 'CohereForAI/c4ai-command-r-plus',
- 'mistralai/Mixtral-8x7B-Instruct-v0.1',
- 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
- '01-ai/Yi-1.5-34B-Chat',
- 'mistralai/Mistral-7B-Instruct-v0.3',
- 'microsoft/Phi-3-mini-4k-instruct',
+ 'CohereForAI/c4ai-command-r-plus-08-2024',
+ 'Qwen/Qwen2.5-72B-Instruct',
+ 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
+ 'meta-llama/Llama-3.2-11B-Vision-Instruct',
+ 'NousResearch/Hermes-3-Llama-3.1-8B',
+ 'mistralai/Mistral-Nemo-Instruct-2407',
+ 'microsoft/Phi-3.5-mini-instruct',
]
model_aliases = {
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
- "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
- "command-r-plus": "CohereForAI/c4ai-command-r-plus",
- "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
- "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
- "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
- "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
- "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct",
+ "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
+ "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
+ "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
+ "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct",
+ "hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
+ "mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
+ "phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",
}
@classmethod
@@ -72,17 +74,18 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
}
- print(model)
json_data = {
'model': model,
}
response = session.post('https://huggingface.co/chat/conversation', json=json_data)
- conversationId = response.json()['conversationId']
+ if response.status_code != 200:
+ raise RuntimeError(f"Request failed with status code: {response.status_code}, response: {response.text}")
- response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',)
+ conversationId = response.json().get('conversationId')
+ response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11')
- data: list = (response.json())["nodes"][1]["data"]
+ data: list = response.json()["nodes"][1]["data"]
keys: list[int] = data[data[0]["messages"]]
message_keys: dict = data[keys[0]]
messageId: str = data[message_keys["id"]]
@@ -123,22 +126,26 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
files=files,
)
- first_token = True
+ full_response = ""
for line in response.iter_lines():
- line = json.loads(line)
+ if not line:
+ continue
+ try:
+ line = json.loads(line)
+ except json.JSONDecodeError as e:
+ print(f"Failed to decode JSON: {line}, error: {e}")
+ continue
if "type" not in line:
raise RuntimeError(f"Response: {line}")
elif line["type"] == "stream":
- token = line["token"]
- if first_token:
- token = token.lstrip().replace('\u0000', '')
- first_token = False
- else:
- token = token.replace('\u0000', '')
-
- yield token
+ token = line["token"].replace('\u0000', '')
+ full_response += token
elif line["type"] == "finalAnswer":
break
+
+ full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip()
+
+ yield full_response
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index 74957862..586e5f5f 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -9,33 +9,16 @@ from .helper import get_connector
from ..errors import RateLimitError, ModelNotFoundError
from ..requests.raise_for_status import raise_for_status
+from .HuggingChat import HuggingChat
+
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
working = True
needs_auth = True
supports_message_history = True
- default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
- models = [
- 'meta-llama/Meta-Llama-3.1-70B-Instruct',
- 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
- 'CohereForAI/c4ai-command-r-plus',
- 'mistralai/Mixtral-8x7B-Instruct-v0.1',
- 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
- '01-ai/Yi-1.5-34B-Chat',
- 'mistralai/Mistral-7B-Instruct-v0.3',
- 'microsoft/Phi-3-mini-4k-instruct',
- ]
-
- model_aliases = {
- "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
- "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
- "command-r-plus": "CohereForAI/c4ai-command-r-plus",
- "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
- "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
- "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
- "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
- "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct",
- }
+ default_model = HuggingChat.default_model
+ models = HuggingChat.models
+ model_aliases = HuggingChat.model_aliases
@classmethod
def get_model(cls, model: str) -> str:
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py
index 0e810083..0dd76b71 100644
--- a/g4f/Provider/Koala.py
+++ b/g4f/Provider/Koala.py
@@ -10,10 +10,10 @@ from .helper import get_random_string, get_connector
from ..requests import raise_for_status
class Koala(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://koala.sh"
+ url = "https://koala.sh/chat"
+ api_endpoint = "https://koala.sh/api/gpt/"
working = True
supports_message_history = True
- supports_gpt_4 = True
default_model = 'gpt-4o-mini'
@classmethod
@@ -26,17 +26,17 @@ class Koala(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs: Any
) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
if not model:
- model = "gpt-3.5-turbo"
+ model = "gpt-4o-mini"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
- "Referer": f"{cls.url}/chat",
+ "Referer": f"{cls.url}",
"Flag-Real-Time-Data": "false",
"Visitor-ID": get_random_string(20),
- "Origin": cls.url,
+ "Origin": "https://koala.sh",
"Alt-Used": "koala.sh",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
@@ -67,7 +67,7 @@ class Koala(AsyncGeneratorProvider, ProviderModelMixin):
"model": model,
}
- async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in cls._parse_event_stream(response):
yield chunk
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 8a9f46b1..56f765de 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -9,6 +9,15 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
+ "gpt-3.5-turbo": {
+ "id": "gpt-3.5-turbo",
+ "name": "GPT-3.5-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 48000,
+ "tokenLimit": 14000,
+ "context": "16K",
+ },
"gpt-4o-mini-free": {
"id": "gpt-4o-mini-free",
"name": "GPT-4o-Mini-Free",
@@ -36,32 +45,41 @@ models = {
"tokenLimit": 7800,
"context": "8K",
},
- "gpt-4-turbo-2024-04-09": {
- "id": "gpt-4-turbo-2024-04-09",
- "name": "GPT-4-Turbo",
+ "gpt-4o-2024-08-06": {
+ "id": "gpt-4o-2024-08-06",
+ "name": "GPT-4o",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
- "gpt-4o-2024-08-06": {
- "id": "gpt-4o-2024-08-06",
- "name": "GPT-4o",
+ "gpt-4-turbo-2024-04-09": {
+ "id": "gpt-4-turbo-2024-04-09",
+ "name": "GPT-4-Turbo",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
- "gpt-4-0613": {
- "id": "gpt-4-0613",
- "name": "GPT-4-0613",
- "model": "ChatGPT",
- "provider": "OpenAI",
- "maxLength": 32000,
- "tokenLimit": 7600,
- "context": "8K",
+ "grok-2": {
+ "id": "grok-2",
+ "name": "Grok-2",
+ "model": "Grok",
+ "provider": "x.ai",
+ "maxLength": 400000,
+ "tokenLimit": 100000,
+ "context": "100K",
+ },
+ "grok-2-mini": {
+ "id": "grok-2-mini",
+ "name": "Grok-2-mini",
+ "model": "Grok",
+ "provider": "x.ai",
+ "maxLength": 400000,
+ "tokenLimit": 100000,
+ "context": "100K",
},
"claude-3-opus-20240229": {
"id": "claude-3-opus-20240229",
@@ -90,18 +108,18 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
- "claude-3-sonnet-20240229": {
- "id": "claude-3-sonnet-20240229",
- "name": "Claude-3-Sonnet",
+ "claude-3-5-sonnet-20240620": {
+ "id": "claude-3-5-sonnet-20240620",
+ "name": "Claude-3.5-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
- "claude-3-5-sonnet-20240620": {
- "id": "claude-3-5-sonnet-20240620",
- "name": "Claude-3.5-Sonnet",
+ "claude-3-sonnet-20240229": {
+ "id": "claude-3-sonnet-20240229",
+ "name": "Claude-3-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
@@ -126,17 +144,8 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
- "gemini-1.0-pro-latest": {
- "id": "gemini-1.0-pro-latest",
- "name": "Gemini-Pro",
- "model": "Gemini",
- "provider": "Google",
- "maxLength": 120000,
- "tokenLimit": 30000,
- "context": "32K",
- },
- "gemini-1.5-flash-latest": {
- "id": "gemini-1.5-flash-latest",
+ "gemini-1.5-flash-002": {
+ "id": "gemini-1.5-flash-002",
"name": "Gemini-1.5-Flash-1M",
"model": "Gemini",
"provider": "Google",
@@ -144,8 +153,8 @@ models = {
"tokenLimit": 1000000,
"context": "1024K",
},
- "gemini-1.5-pro-latest": {
- "id": "gemini-1.5-pro-latest",
+ "gemini-1.5-pro-002": {
+ "id": "gemini-1.5-pro-002",
"name": "Gemini-1.5-Pro-1M",
"model": "Gemini",
"provider": "Google",
@@ -161,28 +170,27 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
- supports_gpt_4 = True
- default_model = "gpt-4o"
+ default_model = "gpt-3.5-turbo"
models = list(models.keys())
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-free",
- "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4o": "gpt-4o-2024-08-06",
+
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4": "gpt-4-0613",
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
"claude-3-opus": "claude-3-opus-20240229-gcp",
"claude-3-sonnet": "claude-3-sonnet-20240229",
- "claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-2.1": "claude-2.1",
- "gemini-pro": "gemini-1.0-pro-latest",
- "gemini-flash": "gemini-1.5-flash-latest",
- "gemini-pro": "gemini-1.5-pro-latest",
+ "gemini-flash": "gemini-1.5-flash-002",
+ "gemini-pro": "gemini-1.5-pro-002",
}
_auth_code = ""
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
deleted file mode 100644
index 69294a57..00000000
--- a/g4f/Provider/LiteIcoding.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
-import re
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://lite.icoding.ink"
- api_endpoint = "/api/v1/gpt/message"
- working = True
- supports_gpt_4 = True
- default_model = "gpt-4o"
- models = [
- 'gpt-4o',
- 'gpt-4-turbo',
- 'claude-3',
- 'claude-3.5',
- 'gemini-1.5',
- ]
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.9",
- "Authorization": "Bearer aa3020ee873e40cb8b3f515a0708ebc4",
- "Connection": "keep-alive",
- "Content-Type": "application/json;charset=utf-8",
- "DNT": "1",
- "Origin": cls.url,
- "Referer": f"{cls.url}/",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": (
- "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
- "Chrome/126.0.0.0 Safari/537.36"
- ),
- "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- }
-
- data = {
- "model": model,
- "chatId": "-1",
- "messages": [
- {
- "role": msg["role"],
- "content": msg["content"],
- "time": msg.get("time", ""),
- "attachments": msg.get("attachments", []),
- }
- for msg in messages
- ],
- "plugins": [],
- "systemPrompt": "",
- "temperature": 0.5,
- }
-
- async with ClientSession(headers=headers) as session:
- try:
- async with session.post(
- f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy
- ) as response:
- response.raise_for_status()
- buffer = ""
- full_response = ""
- def decode_content(data):
- bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()])
- return bytes_array.decode('utf-8')
- async for chunk in response.content.iter_any():
- if chunk:
- buffer += chunk.decode()
- while "\n\n" in buffer:
- part, buffer = buffer.split("\n\n", 1)
- if part.startswith("data: "):
- content = part[6:].strip()
- if content and content != "[DONE]":
- content = content.strip('"')
- # Decoding each content block
- decoded_content = decode_content(content)
- full_response += decoded_content
- full_response = (
- full_response.replace('""', '') # Handle double quotes
- .replace('" "', ' ') # Handle space within quotes
- .replace("\\n\\n", "\n\n")
- .replace("\\n", "\n")
- .replace('\\"', '"')
- .strip()
- )
- # Add filter to remove unwanted text
- filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL)
- # Remove extra quotes at the beginning and end
- cleaned_response = filtered_response.strip().strip('"')
- yield cleaned_response
-
- except ClientResponseError as e:
- raise RuntimeError(
- f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}"
- ) from e
-
- except Exception as e:
- raise RuntimeError(f"Unexpected error: {str(e)}") from e
diff --git a/g4f/Provider/Llama.py b/g4f/Provider/Llama.py
deleted file mode 100644
index 235c0994..00000000
--- a/g4f/Provider/Llama.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from ..requests.raise_for_status import raise_for_status
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-
-class Llama(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.llama2.ai"
- working = False
- supports_message_history = True
- default_model = "meta/meta-llama-3-70b-instruct"
- models = [
- "meta/llama-2-7b-chat",
- "meta/llama-2-13b-chat",
- "meta/llama-2-70b-chat",
- "meta/meta-llama-3-8b-instruct",
- "meta/meta-llama-3-70b-instruct",
- ]
- model_aliases = {
- "meta-llama/Meta-Llama-3-8B-Instruct": "meta/meta-llama-3-8b-instruct",
- "meta-llama/Meta-Llama-3-70B-Instruct": "meta/meta-llama-3-70b-instruct",
- "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
- "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
- "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",
- }
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- system_message: str = "You are a helpful assistant.",
- temperature: float = 0.75,
- top_p: float = 0.9,
- max_tokens: int = 8000,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
- "Accept": "*/*",
- "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
- "Accept-Encoding": "gzip, deflate, br",
- "Referer": f"{cls.url}/",
- "Content-Type": "text/plain;charset=UTF-8",
- "Origin": cls.url,
- "Connection": "keep-alive",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "Pragma": "no-cache",
- "Cache-Control": "no-cache",
- "TE": "trailers"
- }
- async with ClientSession(headers=headers) as session:
- system_messages = [message["content"] for message in messages if message["role"] == "system"]
- if system_messages:
- system_message = "\n".join(system_messages)
- messages = [message for message in messages if message["role"] != "system"]
- prompt = format_prompt(messages)
- data = {
- "prompt": prompt,
- "model": cls.get_model(model),
- "systemPrompt": system_message,
- "temperature": temperature,
- "topP": top_p,
- "maxTokens": max_tokens,
- "image": None
- }
- started = False
- async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response:
- await raise_for_status(response)
- async for chunk in response.content.iter_any():
- if not chunk:
- continue
- if not started:
- chunk = chunk.lstrip()
- started = True
- yield chunk.decode(errors="ignore")
-
-def format_prompt(messages: Messages):
- messages = [
- f"[INST] {message['content']} [/INST]"
- if message["role"] == "user"
- else message["content"]
- for message in messages
- ]
- return "\n".join(messages) + "\n"
diff --git a/g4f/Provider/MagickPen.py b/g4f/Provider/MagickPen.py
index eab70536..7f1751dd 100644
--- a/g4f/Provider/MagickPen.py
+++ b/g4f/Provider/MagickPen.py
@@ -1,72 +1,53 @@
from __future__ import annotations
+from aiohttp import ClientSession
+import hashlib
import time
import random
-import hashlib
import re
-from aiohttp import ClientSession
-
+import json
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://magickpen.com"
- api_endpoint_free = "https://api.magickpen.com/chat/free"
- api_endpoint_ask = "https://api.magickpen.com/ask"
+ api_endpoint = "https://api.magickpen.com/ask"
working = True
- supports_gpt_4 = True
- supports_stream = False
-
- default_model = 'free'
- models = ['free', 'ask']
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
- model_aliases = {
- "gpt-4o-mini": "free",
- "gpt-4o-mini": "ask",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
+ default_model = 'gpt-4o-mini'
+ models = ['gpt-4o-mini']
@classmethod
- async def get_secrets(cls):
- url = 'https://magickpen.com/_nuxt/02c76dc.js'
+ async def fetch_api_credentials(cls) -> tuple:
+ url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
async with ClientSession() as session:
async with session.get(url) as response:
- if response.status == 200:
- text = await response.text()
- x_api_secret_match = re.search(r'"X-API-Secret":"([^"]+)"', text)
- secret_match = re.search(r'secret:\s*"([^"]+)"', text)
-
- x_api_secret = x_api_secret_match.group(1) if x_api_secret_match else None
- secret = secret_match.group(1) if secret_match else None
-
- # Generate timestamp and nonce dynamically
- timestamp = str(int(time.time() * 1000))
- nonce = str(random.random())
-
- # Generate signature
- signature_parts = ["TGDBU9zCgM", timestamp, nonce]
- signature_string = "".join(sorted(signature_parts))
- signature = hashlib.md5(signature_string.encode()).hexdigest()
-
- return {
- 'X-API-Secret': x_api_secret,
- 'signature': signature,
- 'timestamp': timestamp,
- 'nonce': nonce,
- 'secret': secret
- }
- else:
- print(f"Error while fetching the file: {response.status}")
- return None
+ text = await response.text()
+
+ pattern = r'"X-API-Secret":"(\w+)"'
+ match = re.search(pattern, text)
+ X_API_SECRET = match.group(1) if match else None
+
+ timestamp = str(int(time.time() * 1000))
+ nonce = str(random.random())
+
+ s = ["TGDBU9zCgM", timestamp, nonce]
+ s.sort()
+ signature_string = ''.join(s)
+ signature = hashlib.md5(signature_string.encode()).hexdigest()
+
+ pattern = r'secret:"(\w+)"'
+ match = re.search(pattern, text)
+ secret = match.group(1) if match else None
+
+ if X_API_SECRET and timestamp and nonce and secret:
+ return X_API_SECRET, signature, timestamp, nonce, secret
+ else:
+ raise Exception("Unable to extract all the necessary data from the JavaScript file.")
@classmethod
async def create_async_generator(
@@ -77,54 +58,30 @@ class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
+ X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials()
- secrets = await cls.get_secrets()
- if not secrets:
- raise Exception("Failed to obtain necessary secrets")
-
headers = {
- "accept": "application/json, text/plain, */*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "nonce": secrets['nonce'],
- "origin": "https://magickpen.com",
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": "https://magickpen.com/",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-site",
- "secret": secrets['secret'],
- "signature": secrets['signature'],
- "timestamp": secrets['timestamp'],
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
- "x-api-secret": secrets['X-API-Secret']
+ 'accept': 'application/json, text/plain, */*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'nonce': nonce,
+ 'origin': cls.url,
+ 'referer': f"{cls.url}/",
+ 'secret': secret,
+ 'signature': signature,
+ 'timestamp': timestamp,
+ 'x-api-secret': X_API_SECRET,
}
async with ClientSession(headers=headers) as session:
- if model == 'free':
- data = {
- "history": [{"role": "user", "content": format_prompt(messages)}]
- }
- async with session.post(cls.api_endpoint_free, json=data, proxy=proxy) as response:
- response.raise_for_status()
- result = await response.text()
- yield result
-
- elif model == 'ask':
- data = {
- "query": format_prompt(messages),
- "plan": "Pay as you go"
- }
- async with session.post(cls.api_endpoint_ask, json=data, proxy=proxy) as response:
- response.raise_for_status()
- async for chunk in response.content:
- if chunk:
- yield chunk.decode()
-
- else:
- raise ValueError(f"Unknown model: {model}")
+ prompt = format_prompt(messages)
+ payload = {
+ 'query': prompt,
+ 'turnstileResponse': '',
+ 'action': 'verify'
+ }
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py
deleted file mode 100644
index e2c3e197..00000000
--- a/g4f/Provider/Nexra.py
+++ /dev/null
@@ -1,181 +0,0 @@
-from __future__ import annotations
-
-import json
-import base64
-from aiohttp import ClientSession
-from typing import AsyncGenerator
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse
-from .helper import format_prompt
-
-class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://nexra.aryahcr.cc"
- api_endpoint_text = "https://nexra.aryahcr.cc/api/chat/gpt"
- api_endpoint_image = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'gpt-3.5-turbo'
- models = [
- # Text models
- 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
- 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
- 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
- 'text-curie-001', 'text-babbage-001', 'text-ada-001',
- 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
- # Image models
- 'dalle', 'dalle-mini', 'emi'
- ]
-
- image_models = {"dalle", "dalle-mini", "emi"}
- text_models = set(models) - image_models
-
- model_aliases = {
- "gpt-4": "gpt-4-0613",
- "gpt-4": "gpt-4-32k",
- "gpt-4": "gpt-4-0314",
- "gpt-4": "gpt-4-32k-0314",
-
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
-
- "gpt-3": "text-davinci-003",
- "gpt-3": "text-davinci-002",
- "gpt-3": "code-davinci-002",
- "gpt-3": "text-curie-001",
- "gpt-3": "text-babbage-001",
- "gpt-3": "text-ada-001",
- "gpt-3": "text-ada-001",
- "gpt-3": "davinci",
- "gpt-3": "curie",
- "gpt-3": "babbage",
- "gpt-3": "ada",
- "gpt-3": "babbage-002",
- "gpt-3": "davinci-002",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncGenerator[str | ImageResponse, None]:
- model = cls.get_model(model)
-
- if model in cls.image_models:
- async for result in cls.create_image_async_generator(model, messages, proxy, **kwargs):
- yield result
- else:
- async for result in cls.create_text_async_generator(model, messages, proxy, **kwargs):
- yield result
-
- @classmethod
- async def create_text_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncGenerator[str, None]:
- headers = {
- "Content-Type": "application/json",
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "messages": messages,
- "prompt": format_prompt(messages),
- "model": model,
- "markdown": False,
- "stream": False,
- }
- async with session.post(cls.api_endpoint_text, json=data, proxy=proxy) as response:
- response.raise_for_status()
- result = await response.text()
- json_result = json.loads(result)
- yield json_result["gpt"]
-
- @classmethod
- async def create_image_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncGenerator[ImageResponse | str, None]:
- headers = {
- "Content-Type": "application/json"
- }
-
- prompt = messages[-1]['content'] if messages else ""
-
- data = {
- "prompt": prompt,
- "model": model
- }
-
- async def process_response(response_text: str) -> ImageResponse | None:
- json_start = response_text.find('{')
- if json_start != -1:
- json_data = response_text[json_start:]
- try:
- response_data = json.loads(json_data)
- image_data = response_data.get('images', [])[0]
-
- if image_data.startswith('data:image/'):
- return ImageResponse([image_data], "Generated image")
-
- try:
- base64.b64decode(image_data)
- data_uri = f"data:image/jpeg;base64,{image_data}"
- return ImageResponse([data_uri], "Generated image")
- except:
- print("Invalid base64 data")
- return None
- except json.JSONDecodeError:
- print("Failed to parse JSON.")
- else:
- print("No JSON data found in the response.")
- return None
-
- async with ClientSession(headers=headers) as session:
- async with session.post(cls.api_endpoint_image, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
-
- image_response = await process_response(response_text)
- if image_response:
- yield image_response
- else:
- yield "Failed to process image data."
-
- @classmethod
- async def create_async(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> str:
- async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
- if isinstance(response, ImageResponse):
- return response.images[0]
- return response
diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/Ollama.py
index a44aaacd..f9116541 100644
--- a/g4f/Provider/Ollama.py
+++ b/g4f/Provider/Ollama.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import requests
+import os
from .needs_auth.Openai import Openai
from ..typing import AsyncResult, Messages
@@ -14,9 +15,11 @@ class Ollama(Openai):
@classmethod
def get_models(cls):
if not cls.models:
- url = 'http://127.0.0.1:11434/api/tags'
+ host = os.getenv("OLLAMA_HOST", "127.0.0.1")
+ port = os.getenv("OLLAMA_PORT", "11434")
+ url = f"http://{host}:{port}/api/tags"
models = requests.get(url).json()["models"]
- cls.models = [model['name'] for model in models]
+ cls.models = [model["name"] for model in models]
cls.default_model = cls.models[0]
return cls.models
@@ -25,9 +28,13 @@ class Ollama(Openai):
cls,
model: str,
messages: Messages,
- api_base: str = "http://localhost:11434/v1",
+ api_base: str = None,
**kwargs
) -> AsyncResult:
+ if not api_base:
+ host = os.getenv("OLLAMA_HOST", "localhost")
+ port = os.getenv("OLLAMA_PORT", "11434")
+ api_base: str = f"http://{host}:{port}/v1"
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
) \ No newline at end of file
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index 3656a39b..b776e96a 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -13,7 +13,7 @@ WS_URL = "wss://www.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://labs.perplexity.ai"
working = True
- default_model = "llama-3.1-8b-instruct"
+ default_model = "llama-3.1-70b-instruct"
models = [
"llama-3.1-sonar-large-128k-online",
"llama-3.1-sonar-small-128k-online",
@@ -22,6 +22,15 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"llama-3.1-8b-instruct",
"llama-3.1-70b-instruct",
]
+
+ model_aliases = {
+ "sonar-online": "llama-3.1-sonar-large-128k-online",
+ "sonar-online": "sonar-small-128k-online",
+ "sonar-chat": "llama-3.1-sonar-large-128k-chat",
+ "sonar-chat": "llama-3.1-sonar-small-128k-chat",
+ "llama-3.1-8b": "llama-3.1-8b-instruct",
+ "llama-3.1-70b": "llama-3.1-70b-instruct",
+ }
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index e03830f4..266647ba 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -22,6 +22,7 @@ class Pi(AbstractProvider):
proxy: str = None,
timeout: int = 180,
conversation_id: str = None,
+ webdriver: WebDriver = None,
**kwargs
) -> CreateResult:
if cls._session is None:
diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py
index 47cb135c..6513bd34 100644
--- a/g4f/Provider/Pizzagpt.py
+++ b/g4f/Provider/Pizzagpt.py
@@ -12,7 +12,6 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.pizzagpt.it"
api_endpoint = "/api/chatx-completion"
working = True
- supports_gpt_4 = True
default_model = 'gpt-4o-mini'
@classmethod
diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py
new file mode 100644
index 00000000..543a8b19
--- /dev/null
+++ b/g4f/Provider/Prodia.py
@@ -0,0 +1,150 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import time
+import asyncio
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+
+class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://app.prodia.com"
+ api_endpoint = "https://api.prodia.com/generate"
+ working = True
+
+ default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
+ image_models = [
+ '3Guofeng3_v34.safetensors [50f420de]',
+ 'absolutereality_V16.safetensors [37db0fc3]',
+ default_model,
+ 'amIReal_V41.safetensors [0a8a2e61]',
+ 'analog-diffusion-1.0.ckpt [9ca13f02]',
+ 'aniverse_v30.safetensors [579e6f85]',
+ 'anythingv3_0-pruned.ckpt [2700c435]',
+ 'anything-v4.5-pruned.ckpt [65745d25]',
+ 'anythingV5_PrtRE.safetensors [893e49b9]',
+ 'AOM3A3_orangemixs.safetensors [9600da17]',
+ 'blazing_drive_v10g.safetensors [ca1c1eab]',
+ 'breakdomain_I2428.safetensors [43cc7d2f]',
+ 'breakdomain_M2150.safetensors [15f7afca]',
+ 'cetusMix_Version35.safetensors [de2f2560]',
+ 'childrensStories_v13D.safetensors [9dfaabcb]',
+ 'childrensStories_v1SemiReal.safetensors [a1c56dbb]',
+ 'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
+ 'Counterfeit_v30.safetensors [9e2a8f19]',
+ 'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]',
+ 'cyberrealistic_v33.safetensors [82b0d085]',
+ 'dalcefo_v4.safetensors [425952fe]',
+ 'deliberate_v2.safetensors [10ec4b29]',
+ 'deliberate_v3.safetensors [afd9d2d4]',
+ 'dreamlike-anime-1.0.safetensors [4520e090]',
+ 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]',
+ 'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
+ 'dreamshaper_6BakedVae.safetensors [114c8abb]',
+ 'dreamshaper_7.safetensors [5cf5ae06]',
+ 'dreamshaper_8.safetensors [9d40847d]',
+ 'edgeOfRealism_eorV20.safetensors [3ed5de15]',
+ 'EimisAnimeDiffusion_V1.ckpt [4f828a15]',
+ 'elldreths-vivid-mix.safetensors [342d9d26]',
+ 'epicphotogasm_xPlusPlus.safetensors [1a8f6d35]',
+ 'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
+ 'epicrealism_pureEvolutionV3.safetensors [42c8440c]',
+ 'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]',
+ 'indigoFurryMix_v75Hybrid.safetensors [91208cbb]',
+ 'juggernaut_aftermath.safetensors [5e20c455]',
+ 'lofi_v4.safetensors [ccc204d6]',
+ 'lyriel_v16.safetensors [68fceea2]',
+ 'majicmixRealistic_v4.safetensors [29d0de58]',
+ 'mechamix_v10.safetensors [ee685731]',
+ 'meinamix_meinaV9.safetensors [2ec66ab0]',
+ 'meinamix_meinaV11.safetensors [b56ce717]',
+ 'neverendingDream_v122.safetensors [f964ceeb]',
+ 'openjourney_V4.ckpt [ca2f377f]',
+ 'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]',
+ 'portraitplus_V1.0.safetensors [1400e684]',
+ 'protogenx34.safetensors [5896f8d5]',
+ 'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]',
+ 'Realistic_Vision_V2.0.safetensors [79587710]',
+ 'Realistic_Vision_V4.0.safetensors [29a7afaa]',
+ 'Realistic_Vision_V5.0.safetensors [614d1063]',
+ 'Realistic_Vision_V5.1.safetensors [a0f13c83]',
+ 'redshift_diffusion-V10.safetensors [1400e684]',
+ 'revAnimated_v122.safetensors [3f4fefd9]',
+ 'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
+ 'rundiffusionFX_v10.safetensors [cd4e694d]',
+ 'sdv1_4.ckpt [7460a6fa]',
+ 'v1-5-pruned-emaonly.safetensors [d7049739]',
+ 'v1-5-inpainting.safetensors [21c7ab71]',
+ 'shoninsBeautiful_v10.safetensors [25d8c546]',
+ 'theallys-mix-ii-churned.safetensors [5d9225a4]',
+ 'timeless-1.0.ckpt [7c4971d4]',
+ 'toonyou_beta6.safetensors [980f6b15]',
+ ]
+ models = [*image_models]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[-1]['content'] if messages else ""
+
+ params = {
+ "new": "true",
+ "prompt": prompt,
+ "model": model,
+ "negative_prompt": kwargs.get("negative_prompt", ""),
+ "steps": kwargs.get("steps", 20),
+ "cfg": kwargs.get("cfg", 7),
+ "seed": kwargs.get("seed", int(time.time())),
+ "sampler": kwargs.get("sampler", "DPM++ 2M Karras"),
+ "aspect_ratio": kwargs.get("aspect_ratio", "square")
+ }
+
+ async with session.get(cls.api_endpoint, params=params, proxy=proxy) as response:
+ response.raise_for_status()
+ job_data = await response.json()
+ job_id = job_data["job"]
+
+ image_url = await cls._poll_job(session, job_id, proxy)
+ yield ImageResponse(image_url, alt=prompt)
+
+ @classmethod
+ async def _poll_job(cls, session: ClientSession, job_id: str, proxy: str, max_attempts: int = 30, delay: int = 2) -> str:
+ for _ in range(max_attempts):
+ async with session.get(f"https://api.prodia.com/job/{job_id}", proxy=proxy) as response:
+ response.raise_for_status()
+ job_status = await response.json()
+
+ if job_status["status"] == "succeeded":
+ return f"https://images.prodia.xyz/{job_id}.png"
+ elif job_status["status"] == "failed":
+ raise Exception("Image generation failed")
+
+ await asyncio.sleep(delay)
+
+ raise Exception("Timeout waiting for image generation")
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
index c4e52ad6..7f443a7d 100644
--- a/g4f/Provider/ReplicateHome.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -1,66 +1,60 @@
from __future__ import annotations
-from typing import Generator, Optional, Dict, Any, Union, List
-import random
+
+import json
import asyncio
-import base64
+from aiohttp import ClientSession, ContentTypeError
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
-from ..requests import StreamSession, raise_for_status
-from ..errors import ResponseError
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
from ..image import ImageResponse
class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
- parent = "Replicate"
+ api_endpoint = "https://homepage.replicate.com/api/prediction"
working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
default_model = 'meta/meta-llama-3-70b-instruct'
- models = [
- # Models for image generation
- 'stability-ai/stable-diffusion-3',
- 'bytedance/sdxl-lightning-4step',
- 'playgroundai/playground-v2.5-1024px-aesthetic',
-
- # Models for image generation
+
+ text_models = [
'meta/meta-llama-3-70b-instruct',
'mistralai/mixtral-8x7b-instruct-v0.1',
'google-deepmind/gemma-2b-it',
+ 'yorickvp/llava-13b',
]
- versions = {
- # Model versions for generating images
- 'stability-ai/stable-diffusion-3': [
- "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
- ],
- 'bytedance/sdxl-lightning-4step': [
- "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f"
- ],
- 'playgroundai/playground-v2.5-1024px-aesthetic': [
- "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
- ],
-
- # Model versions for text generation
- 'meta/meta-llama-3-70b-instruct': [
- "dp-cf04fe09351e25db628e8b6181276547"
- ],
- 'mistralai/mixtral-8x7b-instruct-v0.1': [
- "dp-89e00f489d498885048e94f9809fbc76"
- ],
- 'google-deepmind/gemma-2b-it': [
- "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626"
- ]
- }
-
- image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
- text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
+ image_models = [
+ 'black-forest-labs/flux-schnell',
+ 'stability-ai/stable-diffusion-3',
+ 'bytedance/sdxl-lightning-4step',
+ 'playgroundai/playground-v2.5-1024px-aesthetic',
+ ]
+ models = text_models + image_models
+
model_aliases = {
+ "flux-schnell": "black-forest-labs/flux-schnell",
"sd-3": "stability-ai/stable-diffusion-3",
"sdxl": "bytedance/sdxl-lightning-4step",
"playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic",
"llama-3-70b": "meta/meta-llama-3-70b-instruct",
"mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1",
"gemma-2b": "google-deepmind/gemma-2b-it",
+ "llava-13b": "yorickvp/llava-13b",
+ }
+
+ model_versions = {
+ "meta/meta-llama-3-70b-instruct": "fbfb20b472b2f3bdd101412a9f70a0ed4fc0ced78a77ff00970ee7a2383c575d",
+ "mistralai/mixtral-8x7b-instruct-v0.1": "5d78bcd7a992c4b793465bcdcf551dc2ab9668d12bb7aa714557a21c1e77041c",
+ "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
+ "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
+ 'black-forest-labs/flux-schnell': "f2ab8a5bfe79f02f0789a146cf5e73d2a4ff2684a98c2b303d1e1ff3814271db",
+ 'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f",
+ 'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f",
+ 'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
}
@classmethod
@@ -77,84 +71,73 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
- **kwargs: Any
- ) -> Generator[Union[str, ImageResponse], None, None]:
- yield await cls.create_async(messages[-1]["content"], model, **kwargs)
-
- @classmethod
- async def create_async(
- cls,
- prompt: str,
- model: str,
- api_key: Optional[str] = None,
- proxy: Optional[str] = None,
- timeout: int = 180,
- version: Optional[str] = None,
- extra_data: Dict[str, Any] = {},
- **kwargs: Any
- ) -> Union[str, ImageResponse]:
- model = cls.get_model(model) # Use the get_model method to resolve model name
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Accept-Language': 'en-US',
- 'Connection': 'keep-alive',
- 'Origin': cls.url,
- 'Referer': f'{cls.url}/',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-site',
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": "https://replicate.com",
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": "https://replicate.com/",
+ "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
}
-
- if version is None:
- version = random.choice(cls.versions.get(model, []))
- if api_key is not None:
- headers["Authorization"] = f"Bearer {api_key}"
-
- async with StreamSession(
- proxies={"all": proxy},
- headers=headers,
- timeout=timeout
- ) as session:
+
+ async with ClientSession(headers=headers) as session:
+ if model in cls.image_models:
+ prompt = messages[-1]['content'] if messages else ""
+ else:
+ prompt = format_prompt(messages)
+
data = {
- "input": {
- "prompt": prompt,
- **extra_data
- },
- "version": version
+ "model": model,
+ "version": cls.model_versions[model],
+ "input": {"prompt": prompt},
}
- if api_key is None:
- data["model"] = model
- url = "https://homepage.replicate.com/api/prediction"
- else:
- url = "https://api.replicate.com/v1/predictions"
- async with session.post(url, json=data) as response:
- await raise_for_status(response)
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
result = await response.json()
- if "id" not in result:
- raise ResponseError(f"Invalid response: {result}")
+ prediction_id = result['id']
+
+ poll_url = f"https://homepage.replicate.com/api/poll?id={prediction_id}"
+ max_attempts = 30
+ delay = 5
+ for _ in range(max_attempts):
+ async with session.get(poll_url, proxy=proxy) as response:
+ response.raise_for_status()
+ try:
+ result = await response.json()
+ except ContentTypeError:
+ text = await response.text()
+ try:
+ result = json.loads(text)
+ except json.JSONDecodeError:
+ raise ValueError(f"Unexpected response format: {text}")
- while True:
- if api_key is None:
- url = f"https://homepage.replicate.com/api/poll?id={result['id']}"
- else:
- url = f"https://api.replicate.com/v1/predictions/{result['id']}"
- async with session.get(url) as response:
- await raise_for_status(response)
- result = await response.json()
- if "status" not in result:
- raise ResponseError(f"Invalid response: {result}")
- if result["status"] == "succeeded":
- output = result['output']
- if model in cls.text_models:
- return ''.join(output) if isinstance(output, list) else output
- elif model in cls.image_models:
- images: List[Any] = output
- images = images[0] if len(images) == 1 else images
- return ImageResponse(images, prompt)
- elif result["status"] == "failed":
- raise ResponseError(f"Prediction failed: {result}")
- await asyncio.sleep(0.5)
+ if result['status'] == 'succeeded':
+ if model in cls.image_models:
+ image_url = result['output'][0]
+ yield ImageResponse(image_url, "Generated image")
+ return
+ else:
+ for chunk in result['output']:
+ yield chunk
+ break
+ elif result['status'] == 'failed':
+ raise Exception(f"Prediction failed: {result.get('error')}")
+ await asyncio.sleep(delay)
+
+ if result['status'] != 'succeeded':
+ raise Exception("Prediction timed out")
diff --git a/g4f/Provider/Rocks.py b/g4f/Provider/Rocks.py
deleted file mode 100644
index f44e0060..00000000
--- a/g4f/Provider/Rocks.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import asyncio
-import json
-from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from .base_provider import AsyncGeneratorProvider
-
-class Rocks(AsyncGeneratorProvider):
- url = "https://api.airforce"
- api_endpoint = "/chat/completions"
- supports_message_history = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
- supports_stream = True
- supports_system_message = True
- working = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- payload = {"messages":messages,"model":model,"max_tokens":4096,"temperature":1,"top_p":1,"stream":True}
-
- headers = {
- "Accept": "application/json",
- "Accept-Encoding": "gzip, deflate, br, zstd",
- "Accept-Language": "en-US,en;q=0.9",
- "Authorization": "Bearer missing api key",
- "Origin": "https://llmplayground.net",
- "Referer": "https://llmplayground.net/",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
- }
-
- async with ClientSession() as session:
- async with session.post(
- f"{cls.url}{cls.api_endpoint}",
- json=payload,
- proxy=proxy,
- headers=headers
- ) as response:
- response.raise_for_status()
- last_chunk_time = asyncio.get_event_loop().time()
-
- async for line in response.content:
- current_time = asyncio.get_event_loop().time()
- if current_time - last_chunk_time > 5:
- return
-
- if line.startswith(b"\n"):
- pass
- elif "discord.com/invite/" in line.decode() or "discord.gg/" in line.decode():
- pass # trolled
- elif line.startswith(b"data: "):
- try:
- line = json.loads(line[6:])
- except json.JSONDecodeError:
- continue
- chunk = line["choices"][0]["delta"].get("content")
- if chunk:
- yield chunk
- last_chunk_time = current_time
- else:
- raise Exception(f"Unexpected line: {line}")
- return \ No newline at end of file
diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py
new file mode 100644
index 00000000..7e76d558
--- /dev/null
+++ b/g4f/Provider/RubiksAI.py
@@ -0,0 +1,162 @@
+from __future__ import annotations
+
+import asyncio
+import aiohttp
+import random
+import string
+import json
+from urllib.parse import urlencode
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Rubiks AI"
+ url = "https://rubiks.ai"
+ api_endpoint = "https://rubiks.ai/search/api.php"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3.1-70b-versatile'
+ models = [default_model, 'gpt-4o-mini']
+
+ model_aliases = {
+ "llama-3.1-70b": "llama-3.1-70b-versatile",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @staticmethod
+ def generate_mid() -> str:
+ """
+ Generates a 'mid' string following the pattern:
+ 6 characters - 4 characters - 4 characters - 4 characters - 12 characters
+ Example: 0r7v7b-quw4-kdy3-rvdu-ekief6xbuuq4
+ """
+ parts = [
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=12))
+ ]
+ return '-'.join(parts)
+
+ @staticmethod
+ def create_referer(q: str, mid: str, model: str = '') -> str:
+ """
+ Creates a Referer URL with dynamic q and mid values, using urlencode for safe parameter encoding.
+ """
+ params = {'q': q, 'model': model, 'mid': mid}
+ encoded_params = urlencode(params)
+ return f'https://rubiks.ai/search/?{encoded_params}'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ websearch: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ """
+ Creates an asynchronous generator that sends requests to the Rubiks AI API and yields the response.
+
+ Parameters:
+ - model (str): The model to use in the request.
+ - messages (Messages): The messages to send as a prompt.
+ - proxy (str, optional): Proxy URL, if needed.
+ - websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False.
+ """
+ model = cls.get_model(model)
+ prompt = format_prompt(messages)
+ q_value = prompt
+ mid_value = cls.generate_mid()
+ referer = cls.create_referer(q=q_value, mid=mid_value, model=model)
+
+ url = cls.api_endpoint
+ params = {
+ 'q': q_value,
+ 'model': model,
+ 'id': '',
+ 'mid': mid_value
+ }
+
+ headers = {
+ 'Accept': 'text/event-stream',
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Pragma': 'no-cache',
+ 'Referer': referer,
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"'
+ }
+
+ try:
+ timeout = aiohttp.ClientTimeout(total=None)
+ async with ClientSession(timeout=timeout) as session:
+ async with session.get(url, headers=headers, params=params, proxy=proxy) as response:
+ if response.status != 200:
+ yield f"Request ended with status code {response.status}"
+ return
+
+ assistant_text = ''
+ sources = []
+
+ async for line in response.content:
+ decoded_line = line.decode('utf-8').strip()
+ if not decoded_line.startswith('data: '):
+ continue
+ data = decoded_line[6:]
+ if data in ('[DONE]', '{"done": ""}'):
+ break
+ try:
+ json_data = json.loads(data)
+ except json.JSONDecodeError:
+ continue
+
+ if 'url' in json_data and 'title' in json_data:
+ if websearch:
+ sources.append({'title': json_data['title'], 'url': json_data['url']})
+
+ elif 'choices' in json_data:
+ for choice in json_data['choices']:
+ delta = choice.get('delta', {})
+ content = delta.get('content', '')
+ role = delta.get('role', '')
+ if role == 'assistant':
+ continue
+ assistant_text += content
+
+ if websearch and sources:
+ sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)])
+ assistant_text += f"\n\n**Source:**\n{sources_text}"
+
+ yield assistant_text
+
+ except asyncio.CancelledError:
+ yield "The request was cancelled."
+ except aiohttp.ClientError as e:
+ yield f"An error occurred during the request: {e}"
+ except Exception as e:
+ yield f"An unexpected error occurred: {e}"
diff --git a/g4f/Provider/Snova.py b/g4f/Provider/Snova.py
deleted file mode 100644
index 76dfac40..00000000
--- a/g4f/Provider/Snova.py
+++ /dev/null
@@ -1,133 +0,0 @@
-from __future__ import annotations
-
-import json
-from typing import AsyncGenerator
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class Snova(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://fast.snova.ai"
- api_endpoint = "https://fast.snova.ai/api/completion"
- working = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'Meta-Llama-3.1-8B-Instruct'
- models = [
- 'Meta-Llama-3.1-8B-Instruct',
- 'Meta-Llama-3.1-70B-Instruct',
- 'Meta-Llama-3.1-405B-Instruct',
- 'Samba-CoE',
- 'ignos/Mistral-T5-7B-v1',
- 'v1olet/v1olet_merged_dpo_7B',
- 'macadeliccc/WestLake-7B-v2-laser-truthy-dpo',
- 'cookinai/DonutLM-v1',
- ]
-
- model_aliases = {
- "llama-3.1-8b": "Meta-Llama-3.1-8B-Instruct",
- "llama-3.1-70b": "Meta-Llama-3.1-70B-Instruct",
- "llama-3.1-405b": "Meta-Llama-3.1-405B-Instruct",
-
- "mistral-7b": "ignos/Mistral-T5-7B-v1",
-
- "samba-coe-v0.1": "Samba-CoE",
- "v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B",
- "westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo",
- "donutlm-v1": "cookinai/DonutLM-v1",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncGenerator[str, None]:
- model = cls.get_model(model)
-
- headers = {
- "accept": "text/event-stream",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "body": {
- "messages": [
- {
- "role": "system",
- "content": "You are a helpful assistant."
- },
- {
- "role": "user",
- "content": format_prompt(messages),
- "id": "1-id",
- "ref": "1-ref",
- "revision": 1,
- "draft": False,
- "status": "done",
- "enableRealTimeChat": False,
- "meta": None
- }
- ],
- "max_tokens": 1000,
- "stop": ["<|eot_id|>"],
- "stream": True,
- "stream_options": {"include_usage": True},
- "model": model
- },
- "env_type": "tp16"
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- full_response = ""
- async for line in response.content:
- line = line.decode().strip()
- if line.startswith("data: "):
- data = line[6:]
- if data == "[DONE]":
- break
- try:
- json_data = json.loads(data)
- choices = json_data.get("choices", [])
- if choices:
- delta = choices[0].get("delta", {})
- content = delta.get("content", "")
- full_response += content
- except json.JSONDecodeError:
- continue
- except Exception as e:
- print(f"Error processing chunk: {e}")
- print(f"Problematic data: {data}")
- continue
-
- yield full_response.strip()
diff --git a/g4f/Provider/TwitterBio.py b/g4f/Provider/TwitterBio.py
deleted file mode 100644
index c143e4ff..00000000
--- a/g4f/Provider/TwitterBio.py
+++ /dev/null
@@ -1,103 +0,0 @@
-from __future__ import annotations
-
-import json
-import re
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-class TwitterBio(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.twitterbio.io"
- api_endpoint_mistral = "https://www.twitterbio.io/api/mistral"
- api_endpoint_openai = "https://www.twitterbio.io/api/openai"
- working = True
- supports_gpt_35_turbo = True
-
- default_model = 'gpt-3.5-turbo'
- models = [
- 'mistralai/Mixtral-8x7B-Instruct-v0.1',
- 'gpt-3.5-turbo',
- ]
-
- model_aliases = {
- "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- return cls.default_model
-
- @staticmethod
- def format_text(text: str) -> str:
- text = re.sub(r'\s+', ' ', text.strip())
- text = re.sub(r'\s+([,.!?])', r'\1', text)
- return text
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
- }
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "prompt": f'{prompt}.'
- }
-
- if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
- api_endpoint = cls.api_endpoint_mistral
- elif model == 'gpt-3.5-turbo':
- api_endpoint = cls.api_endpoint_openai
- else:
- raise ValueError(f"Unsupported model: {model}")
-
- async with session.post(api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- buffer = ""
- async for line in response.content:
- line = line.decode('utf-8').strip()
- if line.startswith('data: '):
- try:
- json_data = json.loads(line[6:])
- if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
- if 'choices' in json_data and len(json_data['choices']) > 0:
- text = json_data['choices'][0].get('text', '')
- if text:
- buffer += text
- elif model == 'gpt-3.5-turbo':
- text = json_data.get('text', '')
- if text:
- buffer += text
- except json.JSONDecodeError:
- continue
- elif line == 'data: [DONE]':
- break
-
- if buffer:
- yield cls.format_text(buffer)
diff --git a/g4f/Provider/Upstage.py b/g4f/Provider/Upstage.py
index e61a5af2..65409159 100644
--- a/g4f/Provider/Upstage.py
+++ b/g4f/Provider/Upstage.py
@@ -12,14 +12,15 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://console.upstage.ai/playground/chat"
api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
working = True
- default_model = 'upstage/solar-1-mini-chat'
+ default_model = 'solar-pro'
models = [
'upstage/solar-1-mini-chat',
'upstage/solar-1-mini-chat-ja',
+ 'solar-pro',
]
model_aliases = {
- "solar-1-mini": "upstage/solar-1-mini-chat",
- "solar-1-mini": "upstage/solar-1-mini-chat-ja",
+ "solar-mini": "upstage/solar-1-mini-chat",
+ "solar-mini": "upstage/solar-1-mini-chat-ja",
}
@classmethod
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
deleted file mode 100644
index bd918396..00000000
--- a/g4f/Provider/Vercel.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from __future__ import annotations
-
-import json, base64, requests, random, os
-
-try:
- import execjs
- has_requirements = True
-except ImportError:
- has_requirements = False
-
-from ..typing import Messages, CreateResult
-from .base_provider import AbstractProvider
-from ..requests import raise_for_status
-from ..errors import MissingRequirementsError
-
-class Vercel(AbstractProvider):
- url = 'https://chat.vercel.ai'
- working = True
- supports_message_history = True
- supports_system_message = True
- supports_gpt_35_turbo = True
- supports_stream = True
-
- @staticmethod
- def create_completion(
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- max_retries: int = 6,
- **kwargs
- ) -> CreateResult:
- if not has_requirements:
- raise MissingRequirementsError('Install "PyExecJS" package')
-
- headers = {
- 'authority': 'chat.vercel.ai',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'custom-encoding': get_anti_bot_token(),
- 'origin': 'https://chat.vercel.ai',
- 'pragma': 'no-cache',
- 'referer': 'https://chat.vercel.ai/',
- 'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'messages': messages,
- 'id' : f'{os.urandom(3).hex()}a',
- }
- response = None
- for _ in range(max_retries):
- response = requests.post('https://chat.vercel.ai/api/chat',
- headers=headers, json=json_data, stream=True, proxies={"https": proxy})
- if not response.ok:
- continue
- for token in response.iter_content(chunk_size=None):
- try:
- yield token.decode(errors="ignore")
- except UnicodeDecodeError:
- pass
- break
- raise_for_status(response)
-
-def get_anti_bot_token() -> str:
- headers = {
- 'authority': 'sdk.vercel.ai',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'pragma': 'no-cache',
- 'referer': 'https://sdk.vercel.ai/',
- 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
- }
-
- response = requests.get('https://chat.vercel.ai/openai.jpeg',
- headers=headers).text
-
- raw_data = json.loads(base64.b64decode(response,
- validate=True))
-
- js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
- return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
-
- sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"]
-
- raw_token = json.dumps({'r': sec_list, 't': raw_data['t']},
- separators = (",", ":"))
-
- return base64.b64encode(raw_token.encode('utf-8')).decode() \ No newline at end of file
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index af8aab0e..02735038 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -17,8 +17,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
label = "You.com"
url = "https://you.com"
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
default_model = "gpt-4o-mini"
default_vision_model = "agent"
image_models = ["dall-e"]
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index a9a815ea..1caf8aaf 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -5,61 +5,69 @@ from ..providers.retry_provider import RetryProvider, IterListProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
-from .deprecated import *
-from .selenium import *
-from .needs_auth import *
+from .deprecated import *
+from .selenium import *
+from .needs_auth import *
+from .gigachat import *
+from .nexra import *
+
+from .Ai4Chat import Ai4Chat
from .AI365VIP import AI365VIP
+from .AIChatFree import AIChatFree
+from .AIUncensored import AIUncensored
from .Allyfy import Allyfy
+from .AmigoChat import AmigoChat
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
+from .AiMathGPT import AiMathGPT
+from .Airforce import Airforce
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
-from .Binjie import Binjie
-from .Bixin123 import Bixin123
from .Blackbox import Blackbox
from .ChatGot import ChatGot
+from .ChatGpt import ChatGpt
from .Chatgpt4Online import Chatgpt4Online
from .Chatgpt4o import Chatgpt4o
+from .ChatGptEs import ChatGptEs
from .ChatgptFree import ChatgptFree
-from .CodeNews import CodeNews
+from .ChatHub import ChatHub
+from .ChatifyAI import ChatifyAI
+from .Cloudflare import Cloudflare
+from .DarkAI import DarkAI
from .DDG import DDG
from .DeepInfra import DeepInfra
+from .DeepInfraChat import DeepInfraChat
from .DeepInfraImage import DeepInfraImage
+from .Editee import Editee
from .FlowGpt import FlowGpt
-from .FluxAirforce import FluxAirforce
from .Free2GPT import Free2GPT
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
-from .GigaChat import GigaChat
-from .GptTalkRu import GptTalkRu
+from .GizAI import GizAI
+from .GPROChat import GPROChat
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
-from .LiteIcoding import LiteIcoding
-from .Llama import Llama
from .Local import Local
from .MagickPen import MagickPen
from .MetaAI import MetaAI
-from .MetaAIAccount import MetaAIAccount
-from .Nexra import Nexra
+#from .MetaAIAccount import MetaAIAccount
from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
+from .Prodia import Prodia
from .Reka import Reka
-from .Snova import Snova
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
-from .Rocks import Rocks
+from .RubiksAI import RubiksAI
from .TeachAnything import TeachAnything
-from .TwitterBio import TwitterBio
from .Upstage import Upstage
-from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py
index a4195fa4..b5c237f9 100644
--- a/g4f/Provider/bing/conversation.py
+++ b/g4f/Provider/bing/conversation.py
@@ -33,9 +33,9 @@ async def create_conversation(session: StreamSession, headers: dict, tone: str)
Conversation: An instance representing the created conversation.
"""
if tone == "Copilot":
- url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1690.0"
+ url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1809.0"
else:
- url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1690.0"
+ url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1809.0"
async with session.get(url, headers=headers) as response:
if response.status == 404:
raise RateLimitError("Response 404: Do less requests and reuse conversations")
@@ -90,4 +90,4 @@ async def delete_conversation(session: StreamSession, conversation: Conversation
response = await response.json()
return response["result"]["value"] == "Success"
except:
- return False \ No newline at end of file
+ return False
diff --git a/g4f/Provider/GigaChat.py b/g4f/Provider/gigachat/GigaChat.py
index 8ba07b43..b1b293e3 100644
--- a/g4f/Provider/GigaChat.py
+++ b/g4f/Provider/gigachat/GigaChat.py
@@ -9,10 +9,10 @@ import json
from aiohttp import ClientSession, TCPConnector, BaseConnector
from g4f.requests import raise_for_status
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..errors import MissingAuthError
-from .helper import get_connector
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...errors import MissingAuthError
+from ..helper import get_connector
access_token = ""
token_expires_at = 0
@@ -45,7 +45,7 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
if not api_key:
raise MissingAuthError('Missing "api_key"')
- cafile = os.path.join(os.path.dirname(__file__), "gigachat_crt/russian_trusted_root_ca_pem.crt")
+ cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt")
ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None
if connector is None and ssl_context is not None:
connector = TCPConnector(ssl_context=ssl_context)
diff --git a/g4f/Provider/gigachat/__init__.py b/g4f/Provider/gigachat/__init__.py
new file mode 100644
index 00000000..c9853742
--- /dev/null
+++ b/g4f/Provider/gigachat/__init__.py
@@ -0,0 +1,2 @@
+from .GigaChat import GigaChat
+
diff --git a/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt b/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt
index 4c143a21..4c143a21 100644
--- a/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt
+++ b/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index eddd25fa..8d741476 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -54,6 +54,7 @@ class Gemini(AsyncGeneratorProvider):
url = "https://gemini.google.com"
needs_auth = True
working = True
+ default_model = 'gemini'
image_models = ["gemini"]
default_vision_model = "gemini"
_cookies: Cookies = None
@@ -305,4 +306,4 @@ class Conversation(BaseConversation):
) -> None:
self.conversation_id = conversation_id
self.response_id = response_id
- self.choice_id = choice_id \ No newline at end of file
+ self.choice_id = choice_id
diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py
index d11f6a82..027d98bf 100644
--- a/g4f/Provider/needs_auth/Groq.py
+++ b/g4f/Provider/needs_auth/Groq.py
@@ -8,7 +8,26 @@ class Groq(Openai):
url = "https://console.groq.com/playground"
working = True
default_model = "mixtral-8x7b-32768"
- models = ["mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"]
+ models = [
+ "distil-whisper-large-v3-en",
+ "gemma2-9b-it",
+ "gemma-7b-it",
+ "llama3-groq-70b-8192-tool-use-preview",
+ "llama3-groq-8b-8192-tool-use-preview",
+ "llama-3.1-70b-versatile",
+ "llama-3.1-8b-instant",
+ "llama-3.2-1b-preview",
+ "llama-3.2-3b-preview",
+ "llama-3.2-11b-vision-preview",
+ "llama-3.2-90b-vision-preview",
+ "llama-guard-3-8b",
+ "llava-v1.5-7b-4096-preview",
+ "llama3-70b-8192",
+ "llama3-8b-8192",
+ "mixtral-8x7b-32768",
+ "whisper-large-v3",
+ "whisper-large-v3-turbo",
+ ]
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
@classmethod
@@ -21,4 +40,4 @@ class Groq(Openai):
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py
index 7945784a..5e0bf336 100644
--- a/g4f/Provider/needs_auth/OpenRouter.py
+++ b/g4f/Provider/needs_auth/OpenRouter.py
@@ -8,7 +8,7 @@ from ...typing import AsyncResult, Messages
class OpenRouter(Openai):
label = "OpenRouter"
url = "https://openrouter.ai"
- working = True
+ working = False
default_model = "mistralai/mistral-7b-instruct:free"
@classmethod
@@ -29,4 +29,4 @@ class OpenRouter(Openai):
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/Openai.py
index a0740c47..382ebada 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/Openai.py
@@ -11,7 +11,7 @@ from ...image import to_data_uri
class Openai(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI API"
- url = "https://openai.com"
+ url = "https://platform.openai.com"
working = True
needs_auth = True
supports_message_history = True
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 82462040..f02121e3 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -61,9 +61,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
default_model = None
default_vision_model = "gpt-4o"
models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"]
+
model_aliases = {
- "gpt-4-turbo-preview": "gpt-4",
- "dall-e": "gpt-4",
+ #"gpt-4-turbo": "gpt-4",
+ #"gpt-4": "gpt-4-gizmo",
+ #"dalle": "gpt-4",
}
_api_key: str = None
_headers: dict = None
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
index 35d8d9d6..3ee65b30 100644
--- a/g4f/Provider/needs_auth/PerplexityApi.py
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -15,7 +15,6 @@ class PerplexityApi(Openai):
"llama-3-sonar-large-32k-online",
"llama-3-8b-instruct",
"llama-3-70b-instruct",
- "mixtral-8x7b-instruct"
]
@classmethod
@@ -28,4 +27,4 @@ class PerplexityApi(Openai):
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index b5463b71..0492645d 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -7,5 +7,5 @@ from .Poe import Poe
from .Openai import Openai
from .Groq import Groq
from .OpenRouter import OpenRouter
-from .OpenaiAccount import OpenaiAccount
-from .PerplexityApi import PerplexityApi \ No newline at end of file
+#from .OpenaiAccount import OpenaiAccount
+from .PerplexityApi import PerplexityApi
diff --git a/g4f/Provider/nexra/NexraBing.py b/g4f/Provider/nexra/NexraBing.py
new file mode 100644
index 00000000..28f0b117
--- /dev/null
+++ b/g4f/Provider/nexra/NexraBing.py
@@ -0,0 +1,93 @@
+from __future__ import annotations
+
+import json
+import requests
+
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ..helper import format_prompt
+
+class NexraBing(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Bing"
+ url = "https://nexra.aryahcr.cc/documentation/bing/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ working = True
+ supports_stream = True
+
+ default_model = 'Balanced'
+ models = [default_model, 'Creative', 'Precise']
+
+ model_aliases = {
+ "gpt-4": "Balanced",
+ "gpt-4": "Creative",
+ "gpt-4": "Precise",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ],
+ "conversation_style": model,
+ "markdown": markdown,
+ "stream": stream,
+ "model": "Bing"
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=True)
+
+ return cls.process_response(response)
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code != 200:
+ yield f"Error: {response.status_code}"
+ return
+
+ full_message = ""
+ for chunk in response.iter_content(chunk_size=None):
+ if chunk:
+ messages = chunk.decode('utf-8').split('\x1e')
+ for message in messages:
+ try:
+ json_data = json.loads(message)
+ if json_data.get('finish', False):
+ return
+ current_message = json_data.get('message', '')
+ if current_message:
+ new_content = current_message[len(full_message):]
+ if new_content:
+ yield new_content
+ full_message = current_message
+ except json.JSONDecodeError:
+ continue
+
+ if not full_message:
+ yield "No message received"
diff --git a/g4f/Provider/nexra/NexraBlackbox.py b/g4f/Provider/nexra/NexraBlackbox.py
new file mode 100644
index 00000000..be048fdd
--- /dev/null
+++ b/g4f/Provider/nexra/NexraBlackbox.py
@@ -0,0 +1,100 @@
+from __future__ import annotations
+
+import json
+import requests
+
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ..helper import format_prompt
+
+class NexraBlackbox(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Blackbox"
+ url = "https://nexra.aryahcr.cc/documentation/blackbox/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ working = True
+ supports_stream = True
+
+ default_model = "blackbox"
+ models = [default_model]
+ model_aliases = {"blackboxai": "blackbox",}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ markdown: bool = False,
+ websearch: bool = False,
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ],
+ "websearch": websearch,
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
+
+ if stream:
+ return cls.process_streaming_response(response)
+ else:
+ return cls.process_non_streaming_response(response)
+
+ @classmethod
+ def process_non_streaming_response(cls, response):
+ if response.status_code == 200:
+ try:
+ full_response = ""
+ for line in response.iter_lines(decode_unicode=True):
+ if line:
+ data = json.loads(line)
+ if data.get('finish'):
+ break
+ message = data.get('message', '')
+ if message:
+ full_response = message
+ return full_response
+ except json.JSONDecodeError:
+ return "Error: Unable to decode JSON response"
+ else:
+ return f"Error: {response.status_code}"
+
+ @classmethod
+ def process_streaming_response(cls, response):
+ previous_message = ""
+ for line in response.iter_lines(decode_unicode=True):
+ if line:
+ try:
+ data = json.loads(line)
+ if data.get('finish'):
+ break
+ message = data.get('message', '')
+ if message and message != previous_message:
+ yield message[len(previous_message):]
+ previous_message = message
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py
new file mode 100644
index 00000000..074a0363
--- /dev/null
+++ b/g4f/Provider/nexra/NexraChatGPT.py
@@ -0,0 +1,285 @@
+from __future__ import annotations
+
+import asyncio
+import json
+import requests
+from typing import Any, Dict
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra ChatGPT"
+ url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
+ api_endpoint_nexra_chatgpt = "https://nexra.aryahcr.cc/api/chat/gpt"
+ api_endpoint_nexra_chatgpt4o = "https://nexra.aryahcr.cc/api/chat/complements"
+ api_endpoint_nexra_chatgpt_v2 = "https://nexra.aryahcr.cc/api/chat/complements"
+ api_endpoint_nexra_gptweb = "https://nexra.aryahcr.cc/api/chat/gptweb"
+ working = True
+ supports_system_message = True
+ supports_message_history = True
+ supports_stream = True
+
+ default_model = 'gpt-3.5-turbo'
+ nexra_chatgpt = [
+ 'gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314',
+ default_model, 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
+ 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002'
+ ]
+ nexra_chatgpt4o = ['gpt-4o']
+ nexra_chatgptv2 = ['chatgpt']
+ nexra_gptweb = ['gptweb']
+ models = nexra_chatgpt + nexra_chatgpt4o + nexra_chatgptv2 + nexra_gptweb
+
+ model_aliases = {
+ "gpt-4": "gpt-4-0613",
+ "gpt-4-32k": "gpt-4-32k-0314",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613": "gpt-3.5-turbo-16k-0613",
+ "gpt-3": "text-davinci-003",
+ "text-davinci-002": "code-davinci-002",
+ "text-curie-001": "text-babbage-001",
+ "text-ada-001": "davinci",
+ "curie": "babbage",
+ "ada": "babbage-002",
+ "davinci-002": "davinci-002",
+ "chatgpt": "chatgpt",
+ "gptweb": "gptweb"
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ if model in cls.nexra_chatgpt:
+ async for chunk in cls._create_async_generator_nexra_chatgpt(model, messages, proxy, **kwargs):
+ yield chunk
+ elif model in cls.nexra_chatgpt4o:
+ async for chunk in cls._create_async_generator_nexra_chatgpt4o(model, messages, stream, proxy, markdown, **kwargs):
+ yield chunk
+ elif model in cls.nexra_chatgptv2:
+ async for chunk in cls._create_async_generator_nexra_chatgpt_v2(model, messages, stream, proxy, markdown, **kwargs):
+ yield chunk
+ elif model in cls.nexra_gptweb:
+ async for chunk in cls._create_async_generator_nexra_gptweb(model, messages, proxy, **kwargs):
+ yield chunk
+
+ @classmethod
+ async def _create_async_generator_nexra_chatgpt(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ prompt = format_prompt(messages)
+ data = {
+ "messages": messages,
+ "prompt": prompt,
+ "model": model,
+ "markdown": markdown
+ }
+
+ loop = asyncio.get_event_loop()
+ try:
+ response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt, data, headers, proxy)
+ filtered_response = cls._filter_response(response)
+
+ for chunk in filtered_response:
+ yield chunk
+ except Exception as e:
+ print(f"Error during API request (nexra_chatgpt): {e}")
+
+ @classmethod
+ async def _create_async_generator_nexra_chatgpt4o(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ loop = asyncio.get_event_loop()
+ try:
+ response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt4o, data, headers, proxy, stream)
+
+ if stream:
+ async for chunk in cls._process_streaming_response(response):
+ yield chunk
+ else:
+ for chunk in cls._process_non_streaming_response(response):
+ yield chunk
+ except Exception as e:
+ print(f"Error during API request (nexra_chatgpt4o): {e}")
+
+ @classmethod
+ async def _create_async_generator_nexra_chatgpt_v2(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ loop = asyncio.get_event_loop()
+ try:
+ response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt_v2, data, headers, proxy, stream)
+
+ if stream:
+ async for chunk in cls._process_streaming_response(response):
+ yield chunk
+ else:
+ for chunk in cls._process_non_streaming_response(response):
+ yield chunk
+ except Exception as e:
+ print(f"Error during API request (nexra_chatgpt_v2): {e}")
+
+ @classmethod
+ async def _create_async_generator_nexra_gptweb(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "markdown": markdown,
+ }
+
+ loop = asyncio.get_event_loop()
+ try:
+ response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_gptweb, data, headers, proxy)
+
+ for chunk in response.iter_content(1024):
+ if chunk:
+ decoded_chunk = chunk.decode().lstrip('_')
+ try:
+ response_json = json.loads(decoded_chunk)
+ if response_json.get("status"):
+ yield response_json.get("gpt", "")
+ except json.JSONDecodeError:
+ continue
+ except Exception as e:
+ print(f"Error during API request (nexra_gptweb): {e}")
+
+ @staticmethod
+ def _sync_post_request(url: str, data: Dict[str, Any], headers: Dict[str, str], proxy: str = None, stream: bool = False) -> requests.Response:
+ proxies = {
+ "http": proxy,
+ "https": proxy,
+ } if proxy else None
+
+ try:
+ response = requests.post(url, json=data, headers=headers, proxies=proxies, stream=stream)
+ response.raise_for_status()
+ return response
+ except requests.RequestException as e:
+ print(f"Request failed: {e}")
+ raise
+
+ @staticmethod
+ def _process_non_streaming_response(response: requests.Response) -> str:
+ if response.status_code == 200:
+ try:
+ content = response.text.lstrip('')
+ data = json.loads(content)
+ return data.get('message', '')
+ except json.JSONDecodeError:
+ return "Error: Unable to decode JSON response"
+ else:
+ return f"Error: {response.status_code}"
+
+ @staticmethod
+ async def _process_streaming_response(response: requests.Response):
+ full_message = ""
+ for line in response.iter_lines(decode_unicode=True):
+ if line:
+ try:
+ line = line.lstrip('')
+ data = json.loads(line)
+ if data.get('finish'):
+ break
+ message = data.get('message', '')
+ if message:
+ yield message[len(full_message):]
+ full_message = message
+ except json.JSONDecodeError:
+ pass
+
+ @staticmethod
+ def _filter_response(response: requests.Response) -> str:
+ response_json = response.json()
+ return response_json.get("gpt", "")
diff --git a/g4f/Provider/nexra/NexraDallE.py b/g4f/Provider/nexra/NexraDallE.py
new file mode 100644
index 00000000..f605c6d0
--- /dev/null
+++ b/g4f/Provider/nexra/NexraDallE.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraDallE(AbstractProvider, ProviderModelMixin):
+ label = "Nexra DALL-E"
+ url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = "dalle"
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": model,
+ "response": response
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_')
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraDallE2.py b/g4f/Provider/nexra/NexraDallE2.py
new file mode 100644
index 00000000..2a36b6e6
--- /dev/null
+++ b/g4f/Provider/nexra/NexraDallE2.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraDallE2(AbstractProvider, ProviderModelMixin):
+ label = "Nexra DALL-E 2"
+ url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = "dalle2"
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": model,
+ "response": response
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_')
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraEmi.py b/g4f/Provider/nexra/NexraEmi.py
new file mode 100644
index 00000000..c26becec
--- /dev/null
+++ b/g4f/Provider/nexra/NexraEmi.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraEmi(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Emi"
+ url = "https://nexra.aryahcr.cc/documentation/emi/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = "emi"
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": model,
+ "response": response
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_')
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraFluxPro.py b/g4f/Provider/nexra/NexraFluxPro.py
new file mode 100644
index 00000000..cfb26385
--- /dev/null
+++ b/g4f/Provider/nexra/NexraFluxPro.py
@@ -0,0 +1,70 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraFluxPro(AbstractProvider, ProviderModelMixin):
+ url = "https://nexra.aryahcr.cc/documentation/flux-pro/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'flux'
+ models = [default_model]
+ model_aliases = {
+ "flux-pro": "flux",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": model,
+ "response": response
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_')
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraGeminiPro.py b/g4f/Provider/nexra/NexraGeminiPro.py
new file mode 100644
index 00000000..e4e6a8ec
--- /dev/null
+++ b/g4f/Provider/nexra/NexraGeminiPro.py
@@ -0,0 +1,86 @@
+from __future__ import annotations
+
+import json
+import requests
+
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ..helper import format_prompt
+
+class NexraGeminiPro(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Gemini PRO"
+ url = "https://nexra.aryahcr.cc/documentation/gemini-pro/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ working = True
+ supports_stream = True
+
+ default_model = 'gemini-pro'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ],
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
+
+ if stream:
+ return cls.process_streaming_response(response)
+ else:
+ return cls.process_non_streaming_response(response)
+
+ @classmethod
+ def process_non_streaming_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.lstrip('')
+ data = json.loads(content)
+ return data.get('message', '')
+ except json.JSONDecodeError:
+ return "Error: Unable to decode JSON response"
+ else:
+ return f"Error: {response.status_code}"
+
+ @classmethod
+ def process_streaming_response(cls, response):
+ full_message = ""
+ for line in response.iter_lines(decode_unicode=True):
+ if line:
+ try:
+ line = line.lstrip('')
+ data = json.loads(line)
+ if data.get('finish'):
+ break
+ message = data.get('message', '')
+ if message:
+ yield message[len(full_message):]
+ full_message = message
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/nexra/NexraMidjourney.py b/g4f/Provider/nexra/NexraMidjourney.py
new file mode 100644
index 00000000..c427f8a0
--- /dev/null
+++ b/g4f/Provider/nexra/NexraMidjourney.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraMidjourney(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Midjourney"
+ url = "https://nexra.aryahcr.cc/documentation/midjourney/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = "midjourney"
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": model,
+ "response": response
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_')
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraProdiaAI.py b/g4f/Provider/nexra/NexraProdiaAI.py
new file mode 100644
index 00000000..de997fce
--- /dev/null
+++ b/g4f/Provider/nexra/NexraProdiaAI.py
@@ -0,0 +1,151 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraProdiaAI(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Prodia AI"
+ url = "https://nexra.aryahcr.cc/documentation/prodia/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
+ models = [
+ '3Guofeng3_v34.safetensors [50f420de]',
+ 'absolutereality_V16.safetensors [37db0fc3]',
+ default_model,
+ 'amIReal_V41.safetensors [0a8a2e61]',
+ 'analog-diffusion-1.0.ckpt [9ca13f02]',
+ 'aniverse_v30.safetensors [579e6f85]',
+ 'anythingv3_0-pruned.ckpt [2700c435]',
+ 'anything-v4.5-pruned.ckpt [65745d25]',
+ 'anythingV5_PrtRE.safetensors [893e49b9]',
+ 'AOM3A3_orangemixs.safetensors [9600da17]',
+ 'blazing_drive_v10g.safetensors [ca1c1eab]',
+ 'breakdomain_I2428.safetensors [43cc7d2f]',
+ 'breakdomain_M2150.safetensors [15f7afca]',
+ 'cetusMix_Version35.safetensors [de2f2560]',
+ 'childrensStories_v13D.safetensors [9dfaabcb]',
+ 'childrensStories_v1SemiReal.safetensors [a1c56dbb]',
+ 'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
+ 'Counterfeit_v30.safetensors [9e2a8f19]',
+ 'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]',
+ 'cyberrealistic_v33.safetensors [82b0d085]',
+ 'dalcefo_v4.safetensors [425952fe]',
+ 'deliberate_v2.safetensors [10ec4b29]',
+ 'deliberate_v3.safetensors [afd9d2d4]',
+ 'dreamlike-anime-1.0.safetensors [4520e090]',
+ 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]',
+ 'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
+ 'dreamshaper_6BakedVae.safetensors [114c8abb]',
+ 'dreamshaper_7.safetensors [5cf5ae06]',
+ 'dreamshaper_8.safetensors [9d40847d]',
+ 'edgeOfRealism_eorV20.safetensors [3ed5de15]',
+ 'EimisAnimeDiffusion_V1.ckpt [4f828a15]',
+ 'elldreths-vivid-mix.safetensors [342d9d26]',
+ 'epicphotogasm_xPlusPlus.safetensors [1a8f6d35]',
+ 'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
+ 'epicrealism_pureEvolutionV3.safetensors [42c8440c]',
+ 'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]',
+ 'indigoFurryMix_v75Hybrid.safetensors [91208cbb]',
+ 'juggernaut_aftermath.safetensors [5e20c455]',
+ 'lofi_v4.safetensors [ccc204d6]',
+ 'lyriel_v16.safetensors [68fceea2]',
+ 'majicmixRealistic_v4.safetensors [29d0de58]',
+ 'mechamix_v10.safetensors [ee685731]',
+ 'meinamix_meinaV9.safetensors [2ec66ab0]',
+ 'meinamix_meinaV11.safetensors [b56ce717]',
+ 'neverendingDream_v122.safetensors [f964ceeb]',
+ 'openjourney_V4.ckpt [ca2f377f]',
+ 'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]',
+ 'portraitplus_V1.0.safetensors [1400e684]',
+ 'protogenx34.safetensors [5896f8d5]',
+ 'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]',
+ 'Realistic_Vision_V2.0.safetensors [79587710]',
+ 'Realistic_Vision_V4.0.safetensors [29a7afaa]',
+ 'Realistic_Vision_V5.0.safetensors [614d1063]',
+ 'Realistic_Vision_V5.1.safetensors [a0f13c83]',
+ 'redshift_diffusion-V10.safetensors [1400e684]',
+ 'revAnimated_v122.safetensors [3f4fefd9]',
+ 'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
+ 'rundiffusionFX_v10.safetensors [cd4e694d]',
+ 'sdv1_4.ckpt [7460a6fa]',
+ 'v1-5-pruned-emaonly.safetensors [d7049739]',
+ 'v1-5-inpainting.safetensors [21c7ab71]',
+ 'shoninsBeautiful_v10.safetensors [25d8c546]',
+ 'theallys-mix-ii-churned.safetensors [5d9225a4]',
+ 'timeless-1.0.ckpt [7c4971d4]',
+ 'toonyou_beta6.safetensors [980f6b15]',
+ ]
+
+ model_aliases = {}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ steps: str = 25, # Min: 1, Max: 30
+ cfg_scale: str = 7, # Min: 0, Max: 20
+ sampler: str = "DPM++ 2M Karras", # Select from these: "Euler","Euler a","Heun","DPM++ 2M Karras","DPM++ SDE Karras","DDIM"
+ negative_prompt: str = "", # Indicates what the AI should not do
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": "prodia",
+ "response": response,
+ "data": {
+ "model": model,
+ "steps": steps,
+ "cfg_scale": cfg_scale,
+ "sampler": sampler,
+ "negative_prompt": negative_prompt
+ }
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_') # Remove leading underscores
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraQwen.py b/g4f/Provider/nexra/NexraQwen.py
new file mode 100644
index 00000000..7f944e44
--- /dev/null
+++ b/g4f/Provider/nexra/NexraQwen.py
@@ -0,0 +1,86 @@
+from __future__ import annotations
+
+import json
+import requests
+
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ..helper import format_prompt
+
+class NexraQwen(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Qwen"
+ url = "https://nexra.aryahcr.cc/documentation/qwen/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ working = True
+ supports_stream = True
+
+ default_model = 'qwen'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ],
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
+
+ if stream:
+ return cls.process_streaming_response(response)
+ else:
+ return cls.process_non_streaming_response(response)
+
+ @classmethod
+ def process_non_streaming_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.lstrip('')
+ data = json.loads(content)
+ return data.get('message', '')
+ except json.JSONDecodeError:
+ return "Error: Unable to decode JSON response"
+ else:
+ return f"Error: {response.status_code}"
+
+ @classmethod
+ def process_streaming_response(cls, response):
+ full_message = ""
+ for line in response.iter_lines(decode_unicode=True):
+ if line:
+ try:
+ line = line.lstrip('')
+ data = json.loads(line)
+ if data.get('finish'):
+ break
+ message = data.get('message', '')
+ if message is not None and message != full_message:
+ yield message[len(full_message):]
+ full_message = message
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/nexra/NexraSD15.py b/g4f/Provider/nexra/NexraSD15.py
new file mode 100644
index 00000000..860a132f
--- /dev/null
+++ b/g4f/Provider/nexra/NexraSD15.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraSD15(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Stable Diffusion 1.5"
+ url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'stablediffusion-1.5'
+ models = [default_model]
+
+ model_aliases = {
+ "sd-1.5": "stablediffusion-1.5",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": model,
+ "response": response
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_')
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraSDLora.py b/g4f/Provider/nexra/NexraSDLora.py
new file mode 100644
index 00000000..a12bff1a
--- /dev/null
+++ b/g4f/Provider/nexra/NexraSDLora.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraSDLora(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Stable Diffusion Lora"
+ url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = "sdxl-lora"
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ guidance: str = 0.3, # Min: 0, Max: 5
+ steps: str = 2, # Min: 2, Max: 10
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": model,
+ "response": response,
+ "data": {
+ "guidance": guidance,
+ "steps": steps
+ }
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_')
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraSDTurbo.py b/g4f/Provider/nexra/NexraSDTurbo.py
new file mode 100644
index 00000000..865b4522
--- /dev/null
+++ b/g4f/Provider/nexra/NexraSDTurbo.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...image import ImageResponse
+
+class NexraSDTurbo(AbstractProvider, ProviderModelMixin):
+ label = "Nexra Stable Diffusion Turbo"
+ url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = "sdxl-turbo"
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ strength: str = 0.7, # Min: 0, Max: 1
+ steps: str = 2, # Min: 1, Max: 10
+ **kwargs
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "prompt": messages[-1]["content"],
+ "model": model,
+ "response": response,
+ "data": {
+ "strength": strength,
+ "steps": steps
+ }
+ }
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data)
+
+ result = cls.process_response(response)
+ yield result
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code == 200:
+ try:
+ content = response.text.strip()
+ content = content.lstrip('_') # Remove the leading underscore
+ data = json.loads(content)
+ if data.get('status') and data.get('images'):
+ image_url = data['images'][0]
+ return ImageResponse(images=[image_url], alt="Generated Image")
+ else:
+ return "Error: No image URL found in the response"
+ except json.JSONDecodeError as e:
+ return f"Error: Unable to decode JSON response. Details: {str(e)}"
+ else:
+ return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/__init__.py b/g4f/Provider/nexra/__init__.py
new file mode 100644
index 00000000..bebc1fb6
--- /dev/null
+++ b/g4f/Provider/nexra/__init__.py
@@ -0,0 +1,14 @@
+from .NexraBing import NexraBing
+from .NexraBlackbox import NexraBlackbox
+from .NexraChatGPT import NexraChatGPT
+from .NexraDallE import NexraDallE
+from .NexraDallE2 import NexraDallE2
+from .NexraEmi import NexraEmi
+from .NexraFluxPro import NexraFluxPro
+from .NexraGeminiPro import NexraGeminiPro
+from .NexraMidjourney import NexraMidjourney
+from .NexraProdiaAI import NexraProdiaAI
+from .NexraQwen import NexraQwen
+from .NexraSD15 import NexraSD15
+from .NexraSDLora import NexraSDLora
+from .NexraSDTurbo import NexraSDTurbo
diff --git a/g4f/Provider/openai/new.py b/g4f/Provider/openai/new.py
new file mode 100644
index 00000000..f4d8e13d
--- /dev/null
+++ b/g4f/Provider/openai/new.py
@@ -0,0 +1,730 @@
+import hashlib
+import base64
+import random
+import json
+import time
+import uuid
+
+from collections import OrderedDict, defaultdict
+from typing import Any, Callable, Dict, List
+
+from datetime import (
+ datetime,
+ timedelta,
+ timezone
+)
+
+cores = [16, 24, 32]
+screens = [3000, 4000, 6000]
+maxAttempts = 500000
+
+navigator_keys = [
+ "registerProtocolHandler−function registerProtocolHandler() { [native code] }",
+ "storage−[object StorageManager]",
+ "locks−[object LockManager]",
+ "appCodeName−Mozilla",
+ "permissions−[object Permissions]",
+ "appVersion−5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "share−function share() { [native code] }",
+ "webdriver−false",
+ "managed−[object NavigatorManagedData]",
+ "canShare−function canShare() { [native code] }",
+ "vendor−Google Inc.",
+ "vendor−Google Inc.",
+ "mediaDevices−[object MediaDevices]",
+ "vibrate−function vibrate() { [native code] }",
+ "storageBuckets−[object StorageBucketManager]",
+ "mediaCapabilities−[object MediaCapabilities]",
+ "getGamepads−function getGamepads() { [native code] }",
+ "bluetooth−[object Bluetooth]",
+ "share−function share() { [native code] }",
+ "cookieEnabled−true",
+ "virtualKeyboard−[object VirtualKeyboard]",
+ "product−Gecko",
+ "mediaDevices−[object MediaDevices]",
+ "canShare−function canShare() { [native code] }",
+ "getGamepads−function getGamepads() { [native code] }",
+ "product−Gecko",
+ "xr−[object XRSystem]",
+ "clipboard−[object Clipboard]",
+ "storageBuckets−[object StorageBucketManager]",
+ "unregisterProtocolHandler−function unregisterProtocolHandler() { [native code] }",
+ "productSub−20030107",
+ "login−[object NavigatorLogin]",
+ "vendorSub−",
+ "login−[object NavigatorLogin]",
+ "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
+ "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "mediaDevices−[object MediaDevices]",
+ "locks−[object LockManager]",
+ "webkitGetUserMedia−function webkitGetUserMedia() { [native code] }",
+ "vendor−Google Inc.",
+ "xr−[object XRSystem]",
+ "mediaDevices−[object MediaDevices]",
+ "virtualKeyboard−[object VirtualKeyboard]",
+ "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "virtualKeyboard−[object VirtualKeyboard]",
+ "appName−Netscape",
+ "storageBuckets−[object StorageBucketManager]",
+ "presentation−[object Presentation]",
+ "onLine−true",
+ "mimeTypes−[object MimeTypeArray]",
+ "credentials−[object CredentialsContainer]",
+ "presentation−[object Presentation]",
+ "getGamepads−function getGamepads() { [native code] }",
+ "vendorSub−",
+ "virtualKeyboard−[object VirtualKeyboard]",
+ "serviceWorker−[object ServiceWorkerContainer]",
+ "xr−[object XRSystem]",
+ "product−Gecko",
+ "keyboard−[object Keyboard]",
+ "gpu−[object GPU]",
+ "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
+ "webkitPersistentStorage−[object DeprecatedStorageQuota]",
+ "doNotTrack",
+ "clearAppBadge−function clearAppBadge() { [native code] }",
+ "presentation−[object Presentation]",
+ "serial−[object Serial]",
+ "locks−[object LockManager]",
+ "requestMIDIAccess−function requestMIDIAccess() { [native code] }",
+ "locks−[object LockManager]",
+ "requestMediaKeySystemAccess−function requestMediaKeySystemAccess() { [native code] }",
+ "vendor−Google Inc.",
+ "pdfViewerEnabled−true",
+ "language−zh-CN",
+ "setAppBadge−function setAppBadge() { [native code] }",
+ "geolocation−[object Geolocation]",
+ "userAgentData−[object NavigatorUAData]",
+ "mediaCapabilities−[object MediaCapabilities]",
+ "requestMIDIAccess−function requestMIDIAccess() { [native code] }",
+ "getUserMedia−function getUserMedia() { [native code] }",
+ "mediaDevices−[object MediaDevices]",
+ "webkitPersistentStorage−[object DeprecatedStorageQuota]",
+ "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "sendBeacon−function sendBeacon() { [native code] }",
+ "hardwareConcurrency−32",
+ "appVersion−5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "credentials−[object CredentialsContainer]",
+ "storage−[object StorageManager]",
+ "cookieEnabled−true",
+ "pdfViewerEnabled−true",
+ "windowControlsOverlay−[object WindowControlsOverlay]",
+ "scheduling−[object Scheduling]",
+ "pdfViewerEnabled−true",
+ "hardwareConcurrency−32",
+ "xr−[object XRSystem]",
+ "userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "webdriver−false",
+ "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
+ "getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
+ "bluetooth−[object Bluetooth]"
+]
+
+window_keys = [
+ "0",
+ "window",
+ "self",
+ "document",
+ "name",
+ "location",
+ "customElements",
+ "history",
+ "navigation",
+ "locationbar",
+ "menubar",
+ "personalbar",
+ "scrollbars",
+ "statusbar",
+ "toolbar",
+ "status",
+ "closed",
+ "frames",
+ "length",
+ "top",
+ "opener",
+ "parent",
+ "frameElement",
+ "navigator",
+ "origin",
+ "external",
+ "screen",
+ "innerWidth",
+ "innerHeight",
+ "scrollX",
+ "pageXOffset",
+ "scrollY",
+ "pageYOffset",
+ "visualViewport",
+ "screenX",
+ "screenY",
+ "outerWidth",
+ "outerHeight",
+ "devicePixelRatio",
+ "clientInformation",
+ "screenLeft",
+ "screenTop",
+ "styleMedia",
+ "onsearch",
+ "isSecureContext",
+ "trustedTypes",
+ "performance",
+ "onappinstalled",
+ "onbeforeinstallprompt",
+ "crypto",
+ "indexedDB",
+ "sessionStorage",
+ "localStorage",
+ "onbeforexrselect",
+ "onabort",
+ "onbeforeinput",
+ "onbeforematch",
+ "onbeforetoggle",
+ "onblur",
+ "oncancel",
+ "oncanplay",
+ "oncanplaythrough",
+ "onchange",
+ "onclick",
+ "onclose",
+ "oncontentvisibilityautostatechange",
+ "oncontextlost",
+ "oncontextmenu",
+ "oncontextrestored",
+ "oncuechange",
+ "ondblclick",
+ "ondrag",
+ "ondragend",
+ "ondragenter",
+ "ondragleave",
+ "ondragover",
+ "ondragstart",
+ "ondrop",
+ "ondurationchange",
+ "onemptied",
+ "onended",
+ "onerror",
+ "onfocus",
+ "onformdata",
+ "oninput",
+ "oninvalid",
+ "onkeydown",
+ "onkeypress",
+ "onkeyup",
+ "onload",
+ "onloadeddata",
+ "onloadedmetadata",
+ "onloadstart",
+ "onmousedown",
+ "onmouseenter",
+ "onmouseleave",
+ "onmousemove",
+ "onmouseout",
+ "onmouseover",
+ "onmouseup",
+ "onmousewheel",
+ "onpause",
+ "onplay",
+ "onplaying",
+ "onprogress",
+ "onratechange",
+ "onreset",
+ "onresize",
+ "onscroll",
+ "onsecuritypolicyviolation",
+ "onseeked",
+ "onseeking",
+ "onselect",
+ "onslotchange",
+ "onstalled",
+ "onsubmit",
+ "onsuspend",
+ "ontimeupdate",
+ "ontoggle",
+ "onvolumechange",
+ "onwaiting",
+ "onwebkitanimationend",
+ "onwebkitanimationiteration",
+ "onwebkitanimationstart",
+ "onwebkittransitionend",
+ "onwheel",
+ "onauxclick",
+ "ongotpointercapture",
+ "onlostpointercapture",
+ "onpointerdown",
+ "onpointermove",
+ "onpointerrawupdate",
+ "onpointerup",
+ "onpointercancel",
+ "onpointerover",
+ "onpointerout",
+ "onpointerenter",
+ "onpointerleave",
+ "onselectstart",
+ "onselectionchange",
+ "onanimationend",
+ "onanimationiteration",
+ "onanimationstart",
+ "ontransitionrun",
+ "ontransitionstart",
+ "ontransitionend",
+ "ontransitioncancel",
+ "onafterprint",
+ "onbeforeprint",
+ "onbeforeunload",
+ "onhashchange",
+ "onlanguagechange",
+ "onmessage",
+ "onmessageerror",
+ "onoffline",
+ "ononline",
+ "onpagehide",
+ "onpageshow",
+ "onpopstate",
+ "onrejectionhandled",
+ "onstorage",
+ "onunhandledrejection",
+ "onunload",
+ "crossOriginIsolated",
+ "scheduler",
+ "alert",
+ "atob",
+ "blur",
+ "btoa",
+ "cancelAnimationFrame",
+ "cancelIdleCallback",
+ "captureEvents",
+ "clearInterval",
+ "clearTimeout",
+ "close",
+ "confirm",
+ "createImageBitmap",
+ "fetch",
+ "find",
+ "focus",
+ "getComputedStyle",
+ "getSelection",
+ "matchMedia",
+ "moveBy",
+ "moveTo",
+ "open",
+ "postMessage",
+ "print",
+ "prompt",
+ "queueMicrotask",
+ "releaseEvents",
+ "reportError",
+ "requestAnimationFrame",
+ "requestIdleCallback",
+ "resizeBy",
+ "resizeTo",
+ "scroll",
+ "scrollBy",
+ "scrollTo",
+ "setInterval",
+ "setTimeout",
+ "stop",
+ "structuredClone",
+ "webkitCancelAnimationFrame",
+ "webkitRequestAnimationFrame",
+ "chrome",
+ "g_opr",
+ "opr",
+ "ethereum",
+ "caches",
+ "cookieStore",
+ "ondevicemotion",
+ "ondeviceorientation",
+ "ondeviceorientationabsolute",
+ "launchQueue",
+ "documentPictureInPicture",
+ "getScreenDetails",
+ "queryLocalFonts",
+ "showDirectoryPicker",
+ "showOpenFilePicker",
+ "showSaveFilePicker",
+ "originAgentCluster",
+ "credentialless",
+ "speechSynthesis",
+ "onscrollend",
+ "webkitRequestFileSystem",
+ "webkitResolveLocalFileSystemURL",
+ "__remixContext",
+ "__oai_SSR_TTI",
+ "__remixManifest",
+ "__reactRouterVersion",
+ "DD_RUM",
+ "__REACT_INTL_CONTEXT__",
+ "filterCSS",
+ "filterXSS",
+ "__SEGMENT_INSPECTOR__",
+ "DD_LOGS",
+ "regeneratorRuntime",
+ "_g",
+ "__remixRouteModules",
+ "__remixRouter",
+ "__STATSIG_SDK__",
+ "__STATSIG_JS_SDK__",
+ "__STATSIG_RERENDER_OVERRIDE__",
+ "_oaiHandleSessionExpired"
+]
+
+def get_parse_time():
+ now = datetime.now(timezone(timedelta(hours=-5)))
+ return now.strftime("%a %b %d %Y %H:%M:%S") + " GMT+0200 (Central European Summer Time)"
+
+def get_config(user_agent):
+
+ core = random.choice(cores)
+ screen = random.choice(screens)
+
+ # partially hardcoded config
+ config = [
+ core + screen,
+ get_parse_time(),
+ 4294705152,
+ random.random(),
+ user_agent,
+ None,
+ "remix-prod-15f1ec0f78ad898b9606a88d384ef76345b82b82", #document.documentElement.getAttribute("data-build"),
+ "en-US",
+ "en-US,es-US,en,es",
+ 0,
+ random.choice(navigator_keys),
+ 'location',
+ random.choice(window_keys),
+ time.perf_counter(),
+ str(uuid.uuid4()),
+ ]
+
+ return config
+
+
+def get_answer_token(seed, diff, config):
+ answer, solved = generate_answer(seed, diff, config)
+
+ if solved:
+ return "gAAAAAB" + answer
+ else:
+ raise Exception("Failed to solve 'gAAAAAB' challenge")
+
+def generate_answer(seed, diff, config):
+ diff_len = len(diff)
+ seed_encoded = seed.encode()
+ p1 = (json.dumps(config[:3], separators=(',', ':'), ensure_ascii=False)[:-1] + ',').encode()
+ p2 = (',' + json.dumps(config[4:9], separators=(',', ':'), ensure_ascii=False)[1:-1] + ',').encode()
+ p3 = (',' + json.dumps(config[10:], separators=(',', ':'), ensure_ascii=False)[1:]).encode()
+
+ target_diff = bytes.fromhex(diff)
+
+ for i in range(maxAttempts):
+ d1 = str(i).encode()
+ d2 = str(i >> 1).encode()
+
+ string = (
+ p1
+ + d1
+ + p2
+ + d2
+ + p3
+ )
+
+ base_encode = base64.b64encode(string)
+ hash_value = hashlib.new("sha3_512", seed_encoded + base_encode).digest()
+
+ if hash_value[:diff_len] <= target_diff:
+ return base_encode.decode(), True
+
+ return 'wQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D' + base64.b64encode(f'"{seed}"'.encode()).decode(), False
+
+def get_requirements_token(config):
+ require, solved = generate_answer(format(random.random()), "0fffff", config)
+
+ if solved:
+ return 'gAAAAAC' + require
+ else:
+ raise Exception("Failed to solve 'gAAAAAC' challenge")
+
+
+### processing turnstile token
+
+class OrderedMap:
+ def __init__(self):
+ self.map = OrderedDict()
+
+ def add(self, key: str, value: Any):
+ self.map[key] = value
+
+ def to_json(self):
+ return json.dumps(self.map)
+
+ def __str__(self):
+ return self.to_json()
+
+
+TurnTokenList = List[List[Any]]
+FloatMap = Dict[float, Any]
+StringMap = Dict[str, Any]
+FuncType = Callable[..., Any]
+
+start_time = time.time()
+
+def get_turnstile_token(dx: str, p: str) -> str:
+ decoded_bytes = base64.b64decode(dx)
+ # print(decoded_bytes.decode())
+ return process_turnstile_token(decoded_bytes.decode(), p)
+
+
+def process_turnstile_token(dx: str, p: str) -> str:
+ result = []
+ p_length = len(p)
+ if p_length != 0:
+ for i, r in enumerate(dx):
+ result.append(chr(ord(r) ^ ord(p[i % p_length])))
+ else:
+ result = list(dx)
+ return "".join(result)
+
+
+def is_slice(input_val: Any) -> bool:
+ return isinstance(input_val, (list, tuple))
+
+
+def is_float(input_val: Any) -> bool:
+ return isinstance(input_val, float)
+
+
+def is_string(input_val: Any) -> bool:
+ return isinstance(input_val, str)
+
+
+def to_str(input_val: Any) -> str:
+ if input_val is None:
+ return "undefined"
+ elif is_float(input_val):
+ return f"{input_val:.16g}"
+ elif is_string(input_val):
+ special_cases = {
+ "window.Math": "[object Math]",
+ "window.Reflect": "[object Reflect]",
+ "window.performance": "[object Performance]",
+ "window.localStorage": "[object Storage]",
+ "window.Object": "function Object() { [native code] }",
+ "window.Reflect.set": "function set() { [native code] }",
+ "window.performance.now": "function () { [native code] }",
+ "window.Object.create": "function create() { [native code] }",
+ "window.Object.keys": "function keys() { [native code] }",
+ "window.Math.random": "function random() { [native code] }",
+ }
+ return special_cases.get(input_val, input_val)
+ elif isinstance(input_val, list) and all(
+ isinstance(item, str) for item in input_val
+ ):
+ return ",".join(input_val)
+ else:
+ # print(f"Type of input is: {type(input_val)}")
+ return str(input_val)
+
+
+def get_func_map() -> FloatMap:
+ process_map: FloatMap = defaultdict(lambda: None)
+
+ def func_1(e: float, t: float):
+ e_str = to_str(process_map[e])
+ t_str = to_str(process_map[t])
+ if e_str is not None and t_str is not None:
+ res = process_turnstile_token(e_str, t_str)
+ process_map[e] = res
+ else:
+ pass
+ # print(f"Warning: Unable to process func_1 for e={e}, t={t}")
+
+ def func_2(e: float, t: Any):
+ process_map[e] = t
+
+ def func_5(e: float, t: float):
+ n = process_map[e]
+ tres = process_map[t]
+ if n is None:
+ process_map[e] = tres
+ elif is_slice(n):
+ nt = n + [tres] if tres is not None else n
+ process_map[e] = nt
+ else:
+ if is_string(n) or is_string(tres):
+ res = to_str(n) + to_str(tres)
+ elif is_float(n) and is_float(tres):
+ res = n + tres
+ else:
+ res = "NaN"
+ process_map[e] = res
+
+ def func_6(e: float, t: float, n: float):
+ tv = process_map[t]
+ nv = process_map[n]
+ if is_string(tv) and is_string(nv):
+ res = f"{tv}.{nv}"
+ if res == "window.document.location":
+ process_map[e] = "https://chatgpt.com/"
+ else:
+ process_map[e] = res
+ else:
+ pass
+ # print("func type 6 error")
+
+ def func_24(e: float, t: float, n: float):
+ tv = process_map[t]
+ nv = process_map[n]
+ if is_string(tv) and is_string(nv):
+ process_map[e] = f"{tv}.{nv}"
+ else:
+ pass
+ # print("func type 24 error")
+
+ def func_7(e: float, *args):
+ n = [process_map[arg] for arg in args]
+ ev = process_map[e]
+ if isinstance(ev, str):
+ if ev == "window.Reflect.set":
+ obj = n[0]
+ key_str = str(n[1])
+ val = n[2]
+ obj.add(key_str, val)
+ elif callable(ev):
+ ev(*n)
+
+ def func_17(e: float, t: float, *args):
+ i = [process_map[arg] for arg in args]
+ tv = process_map[t]
+ res = None
+ if isinstance(tv, str):
+ if tv == "window.performance.now":
+ current_time = time.time_ns()
+ elapsed_ns = current_time - int(start_time * 1e9)
+ res = (elapsed_ns + random.random()) / 1e6
+ elif tv == "window.Object.create":
+ res = OrderedMap()
+ elif tv == "window.Object.keys":
+ if isinstance(i[0], str) and i[0] == "window.localStorage":
+ res = [
+ "STATSIG_LOCAL_STORAGE_INTERNAL_STORE_V4",
+ "STATSIG_LOCAL_STORAGE_STABLE_ID",
+ "client-correlated-secret",
+ "oai/apps/capExpiresAt",
+ "oai-did",
+ "STATSIG_LOCAL_STORAGE_LOGGING_REQUEST",
+ "UiState.isNavigationCollapsed.1",
+ ]
+ elif tv == "window.Math.random":
+ res = random.random()
+ elif callable(tv):
+ res = tv(*i)
+ process_map[e] = res
+
+ def func_8(e: float, t: float):
+ process_map[e] = process_map[t]
+
+ def func_14(e: float, t: float):
+ tv = process_map[t]
+ if is_string(tv):
+ try:
+ token_list = json.loads(tv)
+ process_map[e] = token_list
+ except json.JSONDecodeError:
+ # print(f"Warning: Unable to parse JSON for key {t}")
+ process_map[e] = None
+ else:
+ # print(f"Warning: Value for key {t} is not a string")
+ process_map[e] = None
+
+ def func_15(e: float, t: float):
+ tv = process_map[t]
+ process_map[e] = json.dumps(tv)
+
+ def func_18(e: float):
+ ev = process_map[e]
+ e_str = to_str(ev)
+ decoded = base64.b64decode(e_str).decode()
+ process_map[e] = decoded
+
+ def func_19(e: float):
+ ev = process_map[e]
+ e_str = to_str(ev)
+ encoded = base64.b64encode(e_str.encode()).decode()
+ process_map[e] = encoded
+
+ def func_20(e: float, t: float, n: float, *args):
+ o = [process_map[arg] for arg in args]
+ ev = process_map[e]
+ tv = process_map[t]
+ if ev == tv:
+ nv = process_map[n]
+ if callable(nv):
+ nv(*o)
+ else:
+ pass
+ # print("func type 20 error")
+
+ def func_21(*args):
+ pass
+
+ def func_23(e: float, t: float, *args):
+ i = list(args)
+ ev = process_map[e]
+ tv = process_map[t]
+ if ev is not None and callable(tv):
+ tv(*i)
+
+ process_map.update(
+ {
+ 1: func_1,
+ 2: func_2,
+ 5: func_5,
+ 6: func_6,
+ 7: func_7,
+ 8: func_8,
+ 10: "window",
+ 14: func_14,
+ 15: func_15,
+ 17: func_17,
+ 18: func_18,
+ 19: func_19,
+ 20: func_20,
+ 21: func_21,
+ 23: func_23,
+ 24: func_24,
+ }
+ )
+
+ return process_map
+
+
+def process_turnstile(dx: str, p: str) -> str:
+ tokens = get_turnstile_token(dx, p)
+ res = ""
+ token_list = json.loads(tokens)
+ process_map = get_func_map()
+
+ def func_3(e: str):
+ nonlocal res
+ res = base64.b64encode(e.encode()).decode()
+
+ process_map[3] = func_3
+ process_map[9] = token_list
+ process_map[16] = p
+
+ for token in token_list:
+ try:
+ e = token[0]
+ t = token[1:]
+ f = process_map.get(e)
+ if callable(f):
+ f(*t)
+ else:
+ pass
+ # print(f"Warning: No function found for key {e}")
+ except Exception as exc:
+ raise Exception(f"Error processing token {token}: {exc}")
+ # print(f"Error processing token {token}: {exc}")
+
+ return res \ No newline at end of file
diff --git a/g4f/Provider/selenium/AItianhuSpace.py b/g4f/Provider/selenium/AItianhuSpace.py
deleted file mode 100644
index 4c438e3b..00000000
--- a/g4f/Provider/selenium/AItianhuSpace.py
+++ /dev/null
@@ -1,116 +0,0 @@
-from __future__ import annotations
-
-import time
-import random
-
-from ...typing import CreateResult, Messages
-from ..base_provider import AbstractProvider
-from ..helper import format_prompt, get_random_string
-from ...webdriver import WebDriver, WebDriverSession, element_send_text
-from ... import debug
-
-class AItianhuSpace(AbstractProvider):
- url = "https://chat3.aiyunos.top/"
- working = True
- supports_stream = True
- supports_gpt_35_turbo = True
- _domains = ["aitianhu.com", "aitianhu1.top"]
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- domain: str = None,
- proxy: str = None,
- timeout: int = 120,
- webdriver: WebDriver = None,
- headless: bool = True,
- **kwargs
- ) -> CreateResult:
- if not model:
- model = "gpt-3.5-turbo"
- if not domain:
- rand = get_random_string(6)
- domain = random.choice(cls._domains)
- domain = f"{rand}.{domain}"
- if debug.logging:
- print(f"AItianhuSpace | using domain: {domain}")
- url = f"https://{domain}"
- prompt = format_prompt(messages)
-
- with WebDriverSession(webdriver, "", headless=headless, proxy=proxy) as driver:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
-
- wait = WebDriverWait(driver, timeout)
-
- # Bypass devtools detection
- driver.get("https://blank.page/")
- wait.until(EC.visibility_of_element_located((By.ID, "sheet")))
- driver.execute_script(f"""
- document.getElementById('sheet').addEventListener('click', () => {{
- window.open(arguments[0]);
- }});
- """, url)
- driver.find_element(By.ID, "sheet").click()
- time.sleep(10)
-
- original_window = driver.current_window_handle
- for window_handle in driver.window_handles:
- if window_handle != original_window:
- driver.close()
- driver.switch_to.window(window_handle)
- break
-
- # Wait for page load
- wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea.n-input__textarea-el")))
-
- # Register hook in XMLHttpRequest
- script = """
-const _http_request_open = XMLHttpRequest.prototype.open;
-window._last_message = window._message = "";
-window._loadend = false;
-XMLHttpRequest.prototype.open = function(method, url) {
- if (url == "/api/chat-process") {
- this.addEventListener("progress", (event) => {
- const lines = this.responseText.split("\\n");
- try {
- window._message = JSON.parse(lines[lines.length-1])["text"];
- } catch(e) { }
- });
- this.addEventListener("loadend", (event) => {
- window._loadend = true;
- });
- }
- return _http_request_open.call(this, method, url);
-}
-"""
- driver.execute_script(script)
-
- # Submit prompt
- element_send_text(driver.find_element(By.CSS_SELECTOR, "textarea.n-input__textarea-el"), prompt)
-
- # Read response
- while True:
- chunk = driver.execute_script("""
-if (window._message && window._message != window._last_message) {
- try {
- return window._message.substring(window._last_message.length);
- } finally {
- window._last_message = window._message;
- }
-}
-if (window._loadend) {
- return null;
-}
-return "";
-""")
- if chunk:
- yield chunk
- elif chunk != "":
- break
- else:
- time.sleep(0.1) \ No newline at end of file
diff --git a/g4f/Provider/selenium/Bard.py b/g4f/Provider/selenium/Bard.py
deleted file mode 100644
index 9c809128..00000000
--- a/g4f/Provider/selenium/Bard.py
+++ /dev/null
@@ -1,80 +0,0 @@
-from __future__ import annotations
-
-import time
-import os
-
-try:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
-except ImportError:
- pass
-
-from ...typing import CreateResult, Messages
-from ..base_provider import AbstractProvider
-from ..helper import format_prompt
-from ...webdriver import WebDriver, WebDriverSession, element_send_text
-
-
-class Bard(AbstractProvider):
- url = "https://bard.google.com"
- working = False
- needs_auth = True
- webdriver = True
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- webdriver: WebDriver = None,
- user_data_dir: str = None,
- headless: bool = True,
- **kwargs
- ) -> CreateResult:
- prompt = format_prompt(messages)
- session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
- with session as driver:
- try:
- driver.get(f"{cls.url}/chat")
- wait = WebDriverWait(driver, 10 if headless else 240)
- wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
- except:
- # Reopen browser for login
- if not webdriver:
- driver = session.reopen()
- driver.get(f"{cls.url}/chat")
- login_url = os.environ.get("G4F_LOGIN_URL")
- if login_url:
- yield f"Please login: [Google Bard]({login_url})\n\n"
- wait = WebDriverWait(driver, 240)
- wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
- else:
- raise RuntimeError("Prompt textarea not found. You may not be logged in.")
-
- # Add hook in XMLHttpRequest
- script = """
-const _http_request_open = XMLHttpRequest.prototype.open;
-window._message = "";
-XMLHttpRequest.prototype.open = function(method, url) {
- if (url.includes("/assistant.lamda.BardFrontendService/StreamGenerate")) {
- this.addEventListener("load", (event) => {
- window._message = JSON.parse(JSON.parse(this.responseText.split("\\n")[3])[0][2])[4][0][1][0];
- });
- }
- return _http_request_open.call(this, method, url);
-}
-"""
- driver.execute_script(script)
-
- element_send_text(driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea"), prompt)
-
- while True:
- chunk = driver.execute_script("return window._message;")
- if chunk:
- yield chunk
- return
- else:
- time.sleep(0.1) \ No newline at end of file
diff --git a/g4f/Provider/selenium/MyShell.py b/g4f/Provider/selenium/MyShell.py
index a3f246ff..02e182d4 100644
--- a/g4f/Provider/selenium/MyShell.py
+++ b/g4f/Provider/selenium/MyShell.py
@@ -9,7 +9,7 @@ from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare
class MyShell(AbstractProvider):
url = "https://app.myshell.ai/chat"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_stream = True
@@ -73,4 +73,4 @@ return content;
elif chunk != "":
break
else:
- time.sleep(0.1) \ No newline at end of file
+ time.sleep(0.1)
diff --git a/g4f/Provider/selenium/PerplexityAi.py b/g4f/Provider/selenium/PerplexityAi.py
index 6b529d5b..d965dbf7 100644
--- a/g4f/Provider/selenium/PerplexityAi.py
+++ b/g4f/Provider/selenium/PerplexityAi.py
@@ -16,7 +16,7 @@ from ...webdriver import WebDriver, WebDriverSession, element_send_text
class PerplexityAi(AbstractProvider):
url = "https://www.perplexity.ai"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_stream = True
@@ -105,4 +105,4 @@ if(window._message && window._message != window._last_message) {
elif chunk != "":
break
else:
- time.sleep(0.1) \ No newline at end of file
+ time.sleep(0.1)
diff --git a/g4f/Provider/selenium/TalkAi.py b/g4f/Provider/selenium/TalkAi.py
index 89280598..a7b63375 100644
--- a/g4f/Provider/selenium/TalkAi.py
+++ b/g4f/Provider/selenium/TalkAi.py
@@ -8,7 +8,7 @@ from ...webdriver import WebDriver, WebDriverSession
class TalkAi(AbstractProvider):
url = "https://talkai.info"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_stream = True
@@ -83,4 +83,4 @@ return content;
elif chunk != "":
break
else:
- time.sleep(0.1) \ No newline at end of file
+ time.sleep(0.1)
diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py
index 9a020460..3a59ea58 100644
--- a/g4f/Provider/selenium/__init__.py
+++ b/g4f/Provider/selenium/__init__.py
@@ -1,6 +1,4 @@
-from .AItianhuSpace import AItianhuSpace
from .MyShell import MyShell
from .PerplexityAi import PerplexityAi
from .Phind import Phind
from .TalkAi import TalkAi
-from .Bard import Bard \ No newline at end of file
diff --git a/g4f/Provider/unfinished/AiChatting.py b/g4f/Provider/unfinished/AiChatting.py
deleted file mode 100644
index f062fa98..00000000
--- a/g4f/Provider/unfinished/AiChatting.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from __future__ import annotations
-
-from urllib.parse import unquote
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AbstractProvider
-from ...webdriver import WebDriver
-from ...requests import Session, get_session_from_browser
-
-class AiChatting(AbstractProvider):
- url = "https://www.aichatting.net"
- supports_gpt_35_turbo = True
- _session: Session = None
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- timeout: int = 120,
- webdriver: WebDriver = None,
- **kwargs
- ) -> AsyncResult:
- if not cls._session:
- cls._session = get_session_from_browser(cls.url, webdriver, proxy, timeout)
- visitorId = unquote(cls._session.cookies.get("aichatting.website.visitorId"))
-
- headers = {
- "accept": "application/json, text/plain, */*",
- "lang": "en",
- "source": "web"
- }
- data = {
- "roleId": 0,
- }
- try:
- response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/record/conversation/create", json=data, headers=headers)
- response.raise_for_status()
- conversation_id = response.json()["data"]["conversationId"]
- except Exception as e:
- cls.reset()
- raise e
- headers = {
- "authority": "aga-api.aichatting.net",
- "accept": "text/event-stream,application/json, text/event-stream",
- "lang": "en",
- "source": "web",
- "vtoken": visitorId,
- }
- data = {
- "spaceHandle": True,
- "roleId": 0,
- "messages": messages,
- "conversationId": conversation_id,
- }
- response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/v2/stream", json=data, headers=headers, stream=True)
- response.raise_for_status()
- for chunk in response.iter_lines():
- if chunk.startswith(b"data:"):
- yield chunk[5:].decode().replace("-=- --", " ").replace("-=-n--", "\n").replace("--@DONE@--", "")
-
- @classmethod
- def reset(cls):
- cls._session = None \ No newline at end of file
diff --git a/g4f/Provider/unfinished/ChatAiGpt.py b/g4f/Provider/unfinished/ChatAiGpt.py
deleted file mode 100644
index bc962623..00000000
--- a/g4f/Provider/unfinished/ChatAiGpt.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from __future__ import annotations
-
-import re
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-
-
-class ChatAiGpt(AsyncGeneratorProvider):
- url = "https://chataigpt.org"
- supports_gpt_35_turbo = True
- _nonce = None
- _post_id = None
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
- "Accept": "*/*",
- "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
- "Accept-Encoding": "gzip, deflate, br",
- "Origin": cls.url,
- "Alt-Used": cls.url,
- "Connection": "keep-alive",
- "Referer": cls.url,
- "Pragma": "no-cache",
- "Cache-Control": "no-cache",
- "TE": "trailers",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- }
- async with ClientSession(headers=headers) as session:
- if not cls._nonce:
- async with session.get(f"{cls.url}/", proxy=proxy) as response:
- response.raise_for_status()
- response = await response.text()
-
- result = re.search(
- r'data-nonce=(.*?) data-post-id=([0-9]+)', response
- )
-
- if result:
- cls._nonce, cls._post_id = result.group(1), result.group(2)
- else:
- raise RuntimeError("No nonce found")
- prompt = format_prompt(messages)
- data = {
- "_wpnonce": cls._nonce,
- "post_id": cls._post_id,
- "url": cls.url,
- "action": "wpaicg_chat_shortcode_message",
- "message": prompt,
- "bot_id": 0
- }
- async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
- response.raise_for_status()
- async for chunk in response.content:
- if chunk:
- yield chunk.decode() \ No newline at end of file
diff --git a/g4f/Provider/unfinished/Komo.py b/g4f/Provider/unfinished/Komo.py
deleted file mode 100644
index 84d8d634..00000000
--- a/g4f/Provider/unfinished/Komo.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from __future__ import annotations
-
-import json
-
-from ...requests import StreamSession
-from ...typing import AsyncGenerator
-from ..base_provider import AsyncGeneratorProvider, format_prompt
-
-class Komo(AsyncGeneratorProvider):
- url = "https://komo.ai/api/ask"
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: list[dict[str, str]],
- **kwargs
- ) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107") as session:
- prompt = format_prompt(messages)
- data = {
- "query": prompt,
- "FLAG_URLEXTRACT": "false",
- "token": "",
- "FLAG_MODELA": "1",
- }
- headers = {
- 'authority': 'komo.ai',
- 'accept': 'text/event-stream',
- 'cache-control': 'no-cache',
- 'referer': 'https://komo.ai/',
- }
-
- async with session.get(cls.url, params=data, headers=headers) as response:
- response.raise_for_status()
- next = False
- async for line in response.iter_lines():
- if line == b"event: line":
- next = True
- elif next and line.startswith(b"data: "):
- yield json.loads(line[6:])
- next = False
-
diff --git a/g4f/Provider/unfinished/MikuChat.py b/g4f/Provider/unfinished/MikuChat.py
deleted file mode 100644
index bf19631f..00000000
--- a/g4f/Provider/unfinished/MikuChat.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from __future__ import annotations
-
-import random, json
-from datetime import datetime
-from ...requests import StreamSession
-
-from ...typing import AsyncGenerator
-from ..base_provider import AsyncGeneratorProvider
-
-
-class MikuChat(AsyncGeneratorProvider):
- url = "https://ai.okmiku.com"
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: list[dict[str, str]],
- **kwargs
- ) -> AsyncGenerator:
- if not model:
- model = "gpt-3.5-turbo"
- headers = {
- "authority": "api.catgpt.cc",
- "accept": "application/json",
- "origin": cls.url,
- "referer": f"{cls.url}/chat/",
- 'x-app-version': 'undefined',
- 'x-date': get_datetime(),
- 'x-fingerprint': get_fingerprint(),
- 'x-platform': 'web'
- }
- async with StreamSession(headers=headers, impersonate="chrome107") as session:
- data = {
- "model": model,
- "top_p": 0.8,
- "temperature": 0.5,
- "presence_penalty": 1,
- "frequency_penalty": 0,
- "max_tokens": 2000,
- "stream": True,
- "messages": messages,
- }
- async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response:
- print(await response.text())
- response.raise_for_status()
- async for line in response.iter_lines():
- if line.startswith(b"data: "):
- line = json.loads(line[6:])
- chunk = line["choices"][0]["delta"].get("content")
- if chunk:
- yield chunk
-
-def k(e: str, t: int):
- a = len(e) & 3
- s = len(e) - a
- i = t
- c = 3432918353
- o = 461845907
- n = 0
- r = 0
- while n < s:
- r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24)
- n += 4
- r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
- r = (r << 15) | (r >> 17)
- r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
- i ^= r
- i = (i << 13) | (i >> 19)
- l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295
- i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16)
-
- if a == 3:
- r ^= (ord(e[n + 2]) & 255) << 16
- elif a == 2:
- r ^= (ord(e[n + 1]) & 255) << 8
- elif a == 1:
- r ^= ord(e[n]) & 255
- r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
- r = (r << 15) | (r >> 17)
- r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
- i ^= r
-
- i ^= len(e)
- i ^= i >> 16
- i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295
- i ^= i >> 13
- i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295
- i ^= i >> 16
- return i & 0xFFFFFFFF
-
-def get_fingerprint() -> str:
- return str(k(str(int(random.random() * 100000)), 256))
-
-def get_datetime() -> str:
- return datetime.now().strftime("%Y-%m-%d %H:%M:%S") \ No newline at end of file
diff --git a/g4f/Provider/unfinished/__init__.py b/g4f/Provider/unfinished/__init__.py
deleted file mode 100644
index eb5e8825..00000000
--- a/g4f/Provider/unfinished/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .MikuChat import MikuChat
-from .Komo import Komo
-from .ChatAiGpt import ChatAiGpt
-from .AiChatting import AiChatting \ No newline at end of file
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 017eb2e6..d77fe760 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -4,6 +4,7 @@ import os
from . import debug, version
from .models import Model
+from .client import Client, AsyncClient
from .typing import Messages, CreateResult, AsyncResult, Union
from .errors import StreamNotSupportedError, ModelNotAllowedError
from .cookies import get_cookies, set_cookies
@@ -23,30 +24,6 @@ class ChatCompletion:
ignore_stream: bool = False,
patch_provider: callable = None,
**kwargs) -> Union[CreateResult, str]:
- """
- Creates a chat completion using the specified model, provider, and messages.
-
- Args:
- model (Union[Model, str]): The model to use, either as an object or a string identifier.
- messages (Messages): The messages for which the completion is to be created.
- provider (Union[ProviderType, str, None], optional): The provider to use, either as an object, a string identifier, or None.
- stream (bool, optional): Indicates if the operation should be performed as a stream.
- auth (Union[str, None], optional): Authentication token or credentials, if required.
- ignored (list[str], optional): List of provider names to be ignored.
- ignore_working (bool, optional): If True, ignores the working status of the provider.
- ignore_stream (bool, optional): If True, ignores the stream and authentication requirement checks.
- patch_provider (callable, optional): Function to modify the provider.
- **kwargs: Additional keyword arguments.
-
- Returns:
- Union[CreateResult, str]: The result of the chat completion operation.
-
- Raises:
- AuthenticationRequiredError: If authentication is required but not provided.
- ProviderNotFoundError, ModelNotFoundError: If the specified provider or model is not found.
- ProviderNotWorkingError: If the provider is not operational.
- StreamNotSupportedError: If streaming is requested but not supported by the provider.
- """
model, provider = get_model_and_provider(
model, provider, stream,
ignored, ignore_working,
@@ -64,7 +41,8 @@ class ChatCompletion:
if patch_provider:
provider = patch_provider(provider)
- result = provider.create_completion(model, messages, stream, **kwargs)
+ result = provider.create_completion(model, messages, stream=stream, **kwargs)
+
return result if stream else ''.join([str(chunk) for chunk in result])
@staticmethod
@@ -76,24 +54,6 @@ class ChatCompletion:
ignore_working: bool = False,
patch_provider: callable = None,
**kwargs) -> Union[AsyncResult, str]:
- """
- Asynchronously creates a completion using the specified model and provider.
-
- Args:
- model (Union[Model, str]): The model to use, either as an object or a string identifier.
- messages (Messages): Messages to be processed.
- provider (Union[ProviderType, str, None]): The provider to use, either as an object, a string identifier, or None.
- stream (bool): Indicates if the operation should be performed as a stream.
- ignored (list[str], optional): List of provider names to be ignored.
- patch_provider (callable, optional): Function to modify the provider.
- **kwargs: Additional keyword arguments.
-
- Returns:
- Union[AsyncResult, str]: The result of the asynchronous chat completion operation.
-
- Raises:
- StreamNotSupportedError: If streaming is requested but not supported by the provider.
- """
model, provider = get_model_and_provider(model, provider, False, ignored, ignore_working)
if stream:
@@ -113,23 +73,6 @@ class Completion:
provider : Union[ProviderType, None] = None,
stream : bool = False,
ignored : list[str] = None, **kwargs) -> Union[CreateResult, str]:
- """
- Creates a completion based on the provided model, prompt, and provider.
-
- Args:
- model (Union[Model, str]): The model to use, either as an object or a string identifier.
- prompt (str): The prompt text for which the completion is to be created.
- provider (Union[ProviderType, None], optional): The provider to use, either as an object or None.
- stream (bool, optional): Indicates if the operation should be performed as a stream.
- ignored (list[str], optional): List of provider names to be ignored.
- **kwargs: Additional keyword arguments.
-
- Returns:
- Union[CreateResult, str]: The result of the completion operation.
-
- Raises:
- ModelNotAllowedError: If the specified model is not allowed for use with this method.
- """
allowed_models = [
'code-davinci-002',
'text-ada-001',
@@ -143,6 +86,6 @@ class Completion:
model, provider = get_model_and_provider(model, provider, stream, ignored)
- result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream, **kwargs)
+ result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream=stream, **kwargs)
- return result if stream else ''.join(result) \ No newline at end of file
+ return result if stream else ''.join(result)
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index acb27e9c..754a48f1 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -12,30 +12,40 @@ from fastapi.security import APIKeyHeader
from starlette.exceptions import HTTPException
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY, HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
from fastapi.encoders import jsonable_encoder
+from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
-from typing import Union, Optional
+from typing import Union, Optional, Iterator
import g4f
import g4f.debug
-from g4f.client import AsyncClient
+from g4f.client import Client, ChatCompletion, ChatCompletionChunk, ImagesResponse
from g4f.typing import Messages
from g4f.cookies import read_cookie_files
-def create_app():
+def create_app(g4f_api_key: str = None):
app = FastAPI()
- api = Api(app)
+
+ # Add CORS middleware
+ app.add_middleware(
+ CORSMiddleware,
+ allow_origin_regex=".*",
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+ )
+
+ api = Api(app, g4f_api_key=g4f_api_key)
api.register_routes()
api.register_authorization()
api.register_validation_exception_handler()
+
+ # Read cookie files if not ignored
if not AppConfig.ignore_cookie_files:
read_cookie_files()
- return app
-def create_app_debug():
- g4f.debug.logging = True
- return create_app()
+ return app
-class ChatCompletionsForm(BaseModel):
+class ChatCompletionsConfig(BaseModel):
messages: Messages
model: str
provider: Optional[str] = None
@@ -47,16 +57,13 @@ class ChatCompletionsForm(BaseModel):
web_search: Optional[bool] = None
proxy: Optional[str] = None
-class ImagesGenerateForm(BaseModel):
- model: Optional[str] = None
- provider: Optional[str] = None
+class ImageGenerationConfig(BaseModel):
prompt: str
- response_format: Optional[str] = None
- api_key: Optional[str] = None
- proxy: Optional[str] = None
+ model: Optional[str] = None
+ response_format: str = "url"
-class AppConfig():
- list_ignored_providers: Optional[list[str]] = None
+class AppConfig:
+ ignored_providers: Optional[list[str]] = None
g4f_api_key: Optional[str] = None
ignore_cookie_files: bool = False
defaults: dict = {}
@@ -66,16 +73,23 @@ class AppConfig():
for key, value in data.items():
setattr(cls, key, value)
+list_ignored_providers: list[str] = None
+
+def set_list_ignored_providers(ignored: list[str]):
+ global list_ignored_providers
+ list_ignored_providers = ignored
+
class Api:
- def __init__(self, app: FastAPI) -> None:
+ def __init__(self, app: FastAPI, g4f_api_key=None) -> None:
self.app = app
- self.client = AsyncClient()
+ self.client = Client()
+ self.g4f_api_key = g4f_api_key
self.get_g4f_api_key = APIKeyHeader(name="g4f-api-key")
def register_authorization(self):
@self.app.middleware("http")
async def authorization(request: Request, call_next):
- if AppConfig.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions"]:
+ if self.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions", "/v1/images/generate"]:
try:
user_g4f_api_key = await self.get_g4f_api_key(request)
except HTTPException as e:
@@ -84,22 +98,26 @@ class Api:
status_code=HTTP_401_UNAUTHORIZED,
content=jsonable_encoder({"detail": "G4F API key required"}),
)
- if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key):
+ if not secrets.compare_digest(self.g4f_api_key, user_g4f_api_key):
return JSONResponse(
status_code=HTTP_403_FORBIDDEN,
content=jsonable_encoder({"detail": "Invalid G4F API key"}),
)
- return await call_next(request)
+
+ response = await call_next(request)
+ return response
def register_validation_exception_handler(self):
@self.app.exception_handler(RequestValidationError)
async def validation_exception_handler(request: Request, exc: RequestValidationError):
details = exc.errors()
- modified_details = [{
- "loc": error["loc"],
- "message": error["msg"],
- "type": error["type"],
- } for error in details]
+ modified_details = []
+ for error in details:
+ modified_details.append({
+ "loc": error["loc"],
+ "message": error["msg"],
+ "type": error["type"],
+ })
return JSONResponse(
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder({"detail": modified_details}),
@@ -113,25 +131,23 @@ class Api:
@self.app.get("/v1")
async def read_root_v1():
return HTMLResponse('g4f API: Go to '
- '<a href="/v1/chat/completions">chat/completions</a> '
- 'or <a href="/v1/models">models</a>.')
+ '<a href="/v1/chat/completions">chat/completions</a>, '
+ '<a href="/v1/models">models</a>, or '
+ '<a href="/v1/images/generate">images/generate</a>.')
@self.app.get("/v1/models")
async def models():
- model_list = {
- model: g4f.models.ModelUtils.convert[model]
+ model_list = dict(
+ (model, g4f.models.ModelUtils.convert[model])
for model in g4f.Model.__all__()
- }
+ )
model_list = [{
'id': model_id,
'object': 'model',
'created': 0,
'owned_by': model.base_provider
} for model_id, model in model_list.items()]
- return JSONResponse({
- "object": "list",
- "data": model_list,
- })
+ return JSONResponse(model_list)
@self.app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
@@ -147,7 +163,7 @@ class Api:
return JSONResponse({"error": "The model does not exist."})
@self.app.post("/v1/chat/completions")
- async def chat_completions(config: ChatCompletionsForm, request: Request = None, provider: str = None):
+ async def chat_completions(config: ChatCompletionsConfig, request: Request = None, provider: str = None):
try:
config.provider = provider if config.provider is None else config.provider
if config.api_key is None and request is not None:
@@ -156,16 +172,27 @@ class Api:
auth_header = auth_header.split(None, 1)[-1]
if auth_header and auth_header != "Bearer":
config.api_key = auth_header
+
+ # Create the completion response
response = self.client.chat.completions.create(
**{
**AppConfig.defaults,
**config.dict(exclude_none=True),
},
- ignored=AppConfig.list_ignored_providers
+ ignored=AppConfig.ignored_providers
)
+
+ # Check if the response is synchronous or asynchronous
+ if isinstance(response, ChatCompletion):
+ # Synchronous response
+ return JSONResponse(response.to_json())
+
if not config.stream:
- return JSONResponse((await response).to_json())
+ # If the response is an iterator but not streaming, collect the result
+ response_list = list(response) if isinstance(response, Iterator) else [response]
+ return JSONResponse(response_list[0].to_json())
+ # Streaming response
async def streaming():
try:
async for chunk in response:
@@ -176,40 +203,38 @@ class Api:
logging.exception(e)
yield f'data: {format_exception(e, config)}\n\n'
yield "data: [DONE]\n\n"
+
return StreamingResponse(streaming(), media_type="text/event-stream")
except Exception as e:
logging.exception(e)
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
- @self.app.post("/v1/completions")
- async def completions():
- return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
-
- @self.app.post("/v1/images/generations")
- async def images_generate(config: ImagesGenerateForm, request: Request = None, provider: str = None):
+ @self.app.post("/v1/images/generate")
+ async def generate_image(config: ImageGenerationConfig):
try:
- config.provider = provider if config.provider is None else config.provider
- if config.api_key is None and request is not None:
- auth_header = request.headers.get("Authorization")
- if auth_header is not None:
- auth_header = auth_header.split(None, 1)[-1]
- if auth_header and auth_header != "Bearer":
- config.api_key = auth_header
- response = self.client.images.generate(
- **config.dict(exclude_none=True),
+ response: ImagesResponse = await self.client.images.async_generate(
+ prompt=config.prompt,
+ model=config.model,
+ response_format=config.response_format
)
- return JSONResponse((await response).to_json())
+ # Convert Image objects to dictionaries
+ response_data = [image.to_dict() for image in response.data]
+ return JSONResponse({"data": response_data})
except Exception as e:
logging.exception(e)
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
-def format_exception(e: Exception, config: ChatCompletionsForm) -> str:
+ @self.app.post("/v1/completions")
+ async def completions():
+ return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
+
+def format_exception(e: Exception, config: Union[ChatCompletionsConfig, ImageGenerationConfig]) -> str:
last_provider = g4f.get_last_provider(True)
return json.dumps({
"error": {"message": f"{e.__class__.__name__}: {e}"},
- "model": last_provider.get("model") if last_provider else config.model,
- "provider": last_provider.get("name") if last_provider else config.provider
+ "model": last_provider.get("model") if last_provider else getattr(config, 'model', None),
+ "provider": last_provider.get("name") if last_provider else getattr(config, 'provider', None)
})
def run_api(
@@ -218,18 +243,22 @@ def run_api(
bind: str = None,
debug: bool = False,
workers: int = None,
- use_colors: bool = None
+ use_colors: bool = None,
+ g4f_api_key: str = None
) -> None:
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]' + (" (debug)" if debug else ""))
if use_colors is None:
use_colors = debug
if bind is not None:
host, port = bind.split(":")
+ if debug:
+ g4f.debug.logging = True
uvicorn.run(
- f"g4f.api:create_app{'_debug' if debug else ''}",
- host=host, port=int(port),
- workers=workers,
- use_colors=use_colors,
- factory=True,
+ "g4f.api:create_app",
+ host=host,
+ port=int(port),
+ workers=workers,
+ use_colors=use_colors,
+ factory=True,
reload=debug
- ) \ No newline at end of file
+ )
diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py
index 5bb4ba35..d1e7e298 100644
--- a/g4f/client/__init__.py
+++ b/g4f/client/__init__.py
@@ -1,3 +1,2 @@
from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse
-from .client import Client
-from .async_client import AsyncClient \ No newline at end of file
+from .client import Client, AsyncClient
diff --git a/g4f/client/async_client.py b/g4f/client/async_client.py
deleted file mode 100644
index 2fe4640b..00000000
--- a/g4f/client/async_client.py
+++ /dev/null
@@ -1,275 +0,0 @@
-from __future__ import annotations
-
-import time
-import random
-import string
-import asyncio
-import base64
-from aiohttp import ClientSession, BaseConnector
-
-from .types import Client as BaseClient
-from .types import ProviderType, FinishReason
-from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse, Image
-from .types import AsyncIterResponse, ImageProvider
-from .image_models import ImageModels
-from .helper import filter_json, find_stop, filter_none, cast_iter_async
-from .service import get_last_provider, get_model_and_provider
-from ..Provider import ProviderUtils
-from ..typing import Union, Messages, AsyncIterator, ImageType
-from ..errors import NoImageResponseError, ProviderNotFoundError
-from ..requests.aiohttp import get_connector
-from ..providers.conversation import BaseConversation
-from ..image import ImageResponse as ImageProviderResponse, ImageDataResponse
-
-try:
- anext
-except NameError:
- async def anext(iter):
- async for chunk in iter:
- return chunk
-
-async def iter_response(
- response: AsyncIterator[str],
- stream: bool,
- response_format: dict = None,
- max_tokens: int = None,
- stop: list = None
-) -> AsyncIterResponse:
- content = ""
- finish_reason = None
- completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
- count: int = 0
- async for chunk in response:
- if isinstance(chunk, FinishReason):
- finish_reason = chunk.reason
- break
- elif isinstance(chunk, BaseConversation):
- yield chunk
- continue
- content += str(chunk)
- count += 1
- if max_tokens is not None and count >= max_tokens:
- finish_reason = "length"
- first, content, chunk = find_stop(stop, content, chunk)
- if first != -1:
- finish_reason = "stop"
- if stream:
- yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
- if finish_reason is not None:
- break
- finish_reason = "stop" if finish_reason is None else finish_reason
- if stream:
- yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
- else:
- if response_format is not None and "type" in response_format:
- if response_format["type"] == "json_object":
- content = filter_json(content)
- yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
-
-async def iter_append_model_and_provider(response: AsyncIterResponse) -> AsyncIterResponse:
- last_provider = None
- async for chunk in response:
- last_provider = get_last_provider(True) if last_provider is None else last_provider
- chunk.model = last_provider.get("model")
- chunk.provider = last_provider.get("name")
- yield chunk
-
-class AsyncClient(BaseClient):
- def __init__(
- self,
- provider: ProviderType = None,
- image_provider: ImageProvider = None,
- **kwargs
- ):
- super().__init__(**kwargs)
- self.chat: Chat = Chat(self, provider)
- self.images: Images = Images(self, image_provider)
-
-def create_response(
- messages: Messages,
- model: str,
- provider: ProviderType = None,
- stream: bool = False,
- proxy: str = None,
- max_tokens: int = None,
- stop: list[str] = None,
- api_key: str = None,
- **kwargs
-):
- has_asnyc = hasattr(provider, "create_async_generator")
- if has_asnyc:
- create = provider.create_async_generator
- else:
- create = provider.create_completion
- response = create(
- model, messages,
- stream=stream,
- **filter_none(
- proxy=proxy,
- max_tokens=max_tokens,
- stop=stop,
- api_key=api_key
- ),
- **kwargs
- )
- if not has_asnyc:
- response = cast_iter_async(response)
- return response
-
-class Completions():
- def __init__(self, client: AsyncClient, provider: ProviderType = None):
- self.client: AsyncClient = client
- self.provider: ProviderType = provider
-
- def create(
- self,
- messages: Messages,
- model: str,
- provider: ProviderType = None,
- stream: bool = False,
- proxy: str = None,
- max_tokens: int = None,
- stop: Union[list[str], str] = None,
- api_key: str = None,
- response_format: dict = None,
- ignored : list[str] = None,
- ignore_working: bool = False,
- ignore_stream: bool = False,
- **kwargs
- ) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]:
- model, provider = get_model_and_provider(
- model,
- self.provider if provider is None else provider,
- stream,
- ignored,
- ignore_working,
- ignore_stream
- )
- stop = [stop] if isinstance(stop, str) else stop
- response = create_response(
- messages, model,
- provider, stream,
- proxy=self.client.get_proxy() if proxy is None else proxy,
- max_tokens=max_tokens,
- stop=stop,
- api_key=self.client.api_key if api_key is None else api_key,
- **kwargs
- )
- response = iter_response(response, stream, response_format, max_tokens, stop)
- response = iter_append_model_and_provider(response)
- return response if stream else anext(response)
-
-class Chat():
- completions: Completions
-
- def __init__(self, client: AsyncClient, provider: ProviderType = None):
- self.completions = Completions(client, provider)
-
-async def iter_image_response(
- response: AsyncIterator,
- response_format: str = None,
- connector: BaseConnector = None,
- proxy: str = None
-) -> Union[ImagesResponse, None]:
- async for chunk in response:
- if isinstance(chunk, ImageProviderResponse):
- if response_format == "b64_json":
- async with ClientSession(
- connector=get_connector(connector, proxy),
- cookies=chunk.options.get("cookies")
- ) as session:
- async def fetch_image(image):
- async with session.get(image) as response:
- return base64.b64encode(await response.content.read()).decode()
- images = await asyncio.gather(*[fetch_image(image) for image in chunk.get_list()])
- return ImagesResponse([Image(None, image, chunk.alt) for image in images], int(time.time()))
- return ImagesResponse([Image(image, None, chunk.alt) for image in chunk.get_list()], int(time.time()))
- elif isinstance(chunk, ImageDataResponse):
- return ImagesResponse([Image(None, image, chunk.alt) for image in chunk.get_list()], int(time.time()))
-
-def create_image(provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
- if isinstance(provider, type) and provider.__name__ == "You":
- kwargs["chat_mode"] = "create"
- else:
- prompt = f"create a image with: {prompt}"
- return provider.create_async_generator(
- model,
- [{"role": "user", "content": prompt}],
- stream=True,
- **kwargs
- )
-
-class Images():
- def __init__(self, client: AsyncClient, provider: ImageProvider = None):
- self.client: AsyncClient = client
- self.provider: ImageProvider = provider
- self.models: ImageModels = ImageModels(client)
-
- def get_provider(self, model: str, provider: ProviderType = None):
- if isinstance(provider, str):
- if provider in ProviderUtils.convert:
- provider = ProviderUtils.convert[provider]
- else:
- raise ProviderNotFoundError(f'Provider not found: {provider}')
- else:
- provider = self.models.get(model, self.provider)
- return provider
-
- async def generate(
- self,
- prompt,
- model: str = "",
- provider: ProviderType = None,
- response_format: str = None,
- connector: BaseConnector = None,
- proxy: str = None,
- **kwargs
- ) -> ImagesResponse:
- provider = self.get_provider(model, provider)
- if hasattr(provider, "create_async_generator"):
- response = create_image(
- provider,
- prompt,
- **filter_none(
- response_format=response_format,
- connector=connector,
- proxy=self.client.get_proxy() if proxy is None else proxy,
- ),
- **kwargs
- )
- else:
- response = await provider.create_async(prompt)
- return ImagesResponse([Image(image) for image in response.get_list()])
- image = await iter_image_response(response, response_format, connector, proxy)
- if image is None:
- raise NoImageResponseError()
- return image
-
- async def create_variation(
- self,
- image: ImageType,
- model: str = None,
- response_format: str = None,
- connector: BaseConnector = None,
- proxy: str = None,
- **kwargs
- ):
- provider = self.get_provider(model, provider)
- result = None
- if hasattr(provider, "create_async_generator"):
- response = provider.create_async_generator(
- "",
- [{"role": "user", "content": "create a image like this"}],
- stream=True,
- image=image,
- **filter_none(
- response_format=response_format,
- connector=connector,
- proxy=self.client.get_proxy() if proxy is None else proxy,
- ),
- **kwargs
- )
- result = iter_image_response(response, response_format, connector, proxy)
- if result is None:
- raise NoImageResponseError()
- return result
diff --git a/g4f/client/client.py b/g4f/client/client.py
index 56644913..8e195213 100644
--- a/g4f/client/client.py
+++ b/g4f/client/client.py
@@ -4,12 +4,16 @@ import os
import time
import random
import string
-import logging
+import threading
import asyncio
-from typing import Union
+import base64
+import aiohttp
+import queue
+from typing import Union, AsyncIterator, Iterator
+
from ..providers.base_provider import AsyncGeneratorProvider
from ..image import ImageResponse, to_image, to_data_uri
-from ..typing import Union, Iterator, Messages, ImageType
+from ..typing import Messages, ImageType
from ..providers.types import BaseProvider, ProviderType, FinishReason
from ..providers.conversation import BaseConversation
from ..image import ImageResponse as ImageProviderResponse
@@ -23,44 +27,83 @@ from .helper import find_stop, filter_json, filter_none
from ..models import ModelUtils
from ..Provider import IterListProvider
+# Helper function to convert an async generator to a synchronous iterator
+def to_sync_iter(async_gen: AsyncIterator) -> Iterator:
+ q = queue.Queue()
+ loop = asyncio.new_event_loop()
+ done = object()
+
+ def _run():
+ asyncio.set_event_loop(loop)
+
+ async def iterate():
+ try:
+ async for item in async_gen:
+ q.put(item)
+ finally:
+ q.put(done)
+
+ loop.run_until_complete(iterate())
+ loop.close()
+
+ threading.Thread(target=_run).start()
+ while True:
+ item = q.get()
+ if item is done:
+ break
+ yield item
+
+# Helper function to convert a synchronous iterator to an async iterator
+async def to_async_iterator(iterator):
+ for item in iterator:
+ yield item
+
+# Synchronous iter_response function
def iter_response(
- response: Iterator[str],
+ response: Union[Iterator[str], AsyncIterator[str]],
stream: bool,
response_format: dict = None,
max_tokens: int = None,
stop: list = None
-) -> IterResponse:
+) -> Iterator[Union[ChatCompletion, ChatCompletionChunk]]:
content = ""
finish_reason = None
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
-
- for idx, chunk in enumerate(response):
+ idx = 0
+
+ if hasattr(response, '__aiter__'):
+ # It's an async iterator, wrap it into a sync iterator
+ response = to_sync_iter(response)
+
+ for chunk in response:
if isinstance(chunk, FinishReason):
finish_reason = chunk.reason
break
elif isinstance(chunk, BaseConversation):
yield chunk
continue
-
+
content += str(chunk)
-
+
if max_tokens is not None and idx + 1 >= max_tokens:
finish_reason = "length"
-
+
first, content, chunk = find_stop(stop, content, chunk if stream else None)
-
+
if first != -1:
finish_reason = "stop"
-
+
if stream:
yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
-
+
if finish_reason is not None:
break
-
+
+ idx += 1
+
finish_reason = "stop" if finish_reason is None else finish_reason
-
+
if stream:
yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
else:
@@ -69,16 +112,16 @@ def iter_response(
content = filter_json(content)
yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
-
-def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
+# Synchronous iter_append_model_and_provider function
+def iter_append_model_and_provider(response: Iterator) -> Iterator:
last_provider = None
+
for chunk in response:
last_provider = get_last_provider(True) if last_provider is None else last_provider
chunk.model = last_provider.get("model")
chunk.provider = last_provider.get("name")
yield chunk
-
class Client(BaseClient):
def __init__(
self,
@@ -97,6 +140,28 @@ class Client(BaseClient):
async def async_images(self) -> Images:
return self._images
+# For backwards compatibility and legacy purposes, use Client instead
+class AsyncClient(Client):
+ """Legacy AsyncClient that redirects to the main Client class.
+ This class exists for backwards compatibility."""
+
+ def __init__(self, *args, **kwargs):
+ import warnings
+ warnings.warn(
+ "AsyncClient is deprecated and will be removed in a future version. "
+ "Use Client instead, which now supports both sync and async operations.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+ super().__init__(*args, **kwargs)
+
+ async def chat_complete(self, *args, **kwargs):
+ """Legacy method that redirects to async_create"""
+ return await self.chat.completions.async_create(*args, **kwargs)
+
+ async def create_image(self, *args, **kwargs):
+ """Legacy method that redirects to async_generate"""
+ return await self.images.async_generate(*args, **kwargs)
class Completions:
def __init__(self, client: Client, provider: ProviderType = None):
@@ -129,25 +194,115 @@ class Completions:
)
stop = [stop] if isinstance(stop, str) else stop
-
- response = provider.create_completion(
+
+ if asyncio.iscoroutinefunction(provider.create_completion):
+ # Run the asynchronous function in an event loop
+ response = asyncio.run(provider.create_completion(
+ model,
+ messages,
+ stream=stream,
+ **filter_none(
+ proxy=self.client.get_proxy() if proxy is None else proxy,
+ max_tokens=max_tokens,
+ stop=stop,
+ api_key=self.client.api_key if api_key is None else api_key
+ ),
+ **kwargs
+ ))
+ else:
+ response = provider.create_completion(
+ model,
+ messages,
+ stream=stream,
+ **filter_none(
+ proxy=self.client.get_proxy() if proxy is None else proxy,
+ max_tokens=max_tokens,
+ stop=stop,
+ api_key=self.client.api_key if api_key is None else api_key
+ ),
+ **kwargs
+ )
+
+ if stream:
+ if hasattr(response, '__aiter__'):
+ # It's an async generator, wrap it into a sync iterator
+ response = to_sync_iter(response)
+
+ # Now 'response' is an iterator
+ response = iter_response(response, stream, response_format, max_tokens, stop)
+ response = iter_append_model_and_provider(response)
+ return response
+ else:
+ if hasattr(response, '__aiter__'):
+ # If response is an async generator, collect it into a list
+ response = list(to_sync_iter(response))
+ response = iter_response(response, stream, response_format, max_tokens, stop)
+ response = iter_append_model_and_provider(response)
+ return next(response)
+
+ async def async_create(
+ self,
+ messages: Messages,
+ model: str,
+ provider: ProviderType = None,
+ stream: bool = False,
+ proxy: str = None,
+ response_format: dict = None,
+ max_tokens: int = None,
+ stop: Union[list[str], str] = None,
+ api_key: str = None,
+ ignored: list[str] = None,
+ ignore_working: bool = False,
+ ignore_stream: bool = False,
+ **kwargs
+ ) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]:
+ model, provider = get_model_and_provider(
model,
- messages,
- stream=stream,
- **filter_none(
- proxy=self.client.get_proxy() if proxy is None else proxy,
- max_tokens=max_tokens,
- stop=stop,
- api_key=self.client.api_key if api_key is None else api_key
- ),
- **kwargs
+ self.provider if provider is None else provider,
+ stream,
+ ignored,
+ ignore_working,
+ ignore_stream,
)
-
- response = iter_response(response, stream, response_format, max_tokens, stop)
- response = iter_append_model_and_provider(response)
-
- return response if stream else next(response)
+ stop = [stop] if isinstance(stop, str) else stop
+
+ if asyncio.iscoroutinefunction(provider.create_completion):
+ response = await provider.create_completion(
+ model,
+ messages,
+ stream=stream,
+ **filter_none(
+ proxy=self.client.get_proxy() if proxy is None else proxy,
+ max_tokens=max_tokens,
+ stop=stop,
+ api_key=self.client.api_key if api_key is None else api_key
+ ),
+ **kwargs
+ )
+ else:
+ response = provider.create_completion(
+ model,
+ messages,
+ stream=stream,
+ **filter_none(
+ proxy=self.client.get_proxy() if proxy is None else proxy,
+ max_tokens=max_tokens,
+ stop=stop,
+ api_key=self.client.api_key if api_key is None else api_key
+ ),
+ **kwargs
+ )
+
+ # Removed 'await' here since 'async_iter_response' returns an async generator
+ response = async_iter_response(response, stream, response_format, max_tokens, stop)
+ response = async_iter_append_model_and_provider(response)
+
+ if stream:
+ return response
+ else:
+ async for result in response:
+ return result
class Chat:
completions: Completions
@@ -155,153 +310,225 @@ class Chat:
def __init__(self, client: Client, provider: ProviderType = None):
self.completions = Completions(client, provider)
+# Asynchronous versions of the helper functions
+async def async_iter_response(
+ response: Union[AsyncIterator[str], Iterator[str]],
+ stream: bool,
+ response_format: dict = None,
+ max_tokens: int = None,
+ stop: list = None
+) -> AsyncIterator[Union[ChatCompletion, ChatCompletionChunk]]:
+ content = ""
+ finish_reason = None
+ completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
+ idx = 0
-def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
- logging.info("Starting iter_image_response")
- response_list = list(response)
- logging.info(f"Response list: {response_list}")
-
- for chunk in response_list:
- logging.info(f"Processing chunk: {chunk}")
+ if not hasattr(response, '__aiter__'):
+ response = to_async_iterator(response)
+
+ async for chunk in response:
+ if isinstance(chunk, FinishReason):
+ finish_reason = chunk.reason
+ break
+ elif isinstance(chunk, BaseConversation):
+ yield chunk
+ continue
+
+ content += str(chunk)
+
+ if max_tokens is not None and idx + 1 >= max_tokens:
+ finish_reason = "length"
+
+ first, content, chunk = find_stop(stop, content, chunk if stream else None)
+
+ if first != -1:
+ finish_reason = "stop"
+
+ if stream:
+ yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
+
+ if finish_reason is not None:
+ break
+
+ idx += 1
+
+ finish_reason = "stop" if finish_reason is None else finish_reason
+
+ if stream:
+ yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
+ else:
+ if response_format is not None and "type" in response_format:
+ if response_format["type"] == "json_object":
+ content = filter_json(content)
+ yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
+
+async def async_iter_append_model_and_provider(response: AsyncIterator) -> AsyncIterator:
+ last_provider = None
+
+ if not hasattr(response, '__aiter__'):
+ response = to_async_iterator(response)
+
+ async for chunk in response:
+ last_provider = get_last_provider(True) if last_provider is None else last_provider
+ chunk.model = last_provider.get("model")
+ chunk.provider = last_provider.get("name")
+ yield chunk
+
+async def iter_image_response(response: AsyncIterator) -> Union[ImagesResponse, None]:
+ response_list = []
+ async for chunk in response:
if isinstance(chunk, ImageProviderResponse):
- logging.info("Found ImageProviderResponse")
- return ImagesResponse([Image(image) for image in chunk.get_list()])
-
- logging.warning("No ImageProviderResponse found in the response")
- return None
+ response_list.extend(chunk.get_list())
+ elif isinstance(chunk, str):
+ response_list.append(chunk)
+ if response_list:
+ return ImagesResponse([Image(image) for image in response_list])
-def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> Iterator:
- logging.info(f"Creating image with provider: {provider}, model: {model}, prompt: {prompt}")
-
+ return None
+
+async def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
if isinstance(provider, type) and provider.__name__ == "You":
kwargs["chat_mode"] = "create"
else:
prompt = f"create an image with: {prompt}"
-
- response = provider.create_completion(
- model,
- [{"role": "user", "content": prompt}],
- stream=True,
- proxy=client.get_proxy(),
- **kwargs
- )
-
- logging.info(f"Response from create_completion: {response}")
+
+ if asyncio.iscoroutinefunction(provider.create_completion):
+ response = await provider.create_completion(
+ model,
+ [{"role": "user", "content": prompt}],
+ stream=True,
+ proxy=client.get_proxy(),
+ **kwargs
+ )
+ else:
+ response = provider.create_completion(
+ model,
+ [{"role": "user", "content": prompt}],
+ stream=True,
+ proxy=client.get_proxy(),
+ **kwargs
+ )
+
+ # Wrap synchronous iterator into async iterator if necessary
+ if not hasattr(response, '__aiter__'):
+ response = to_async_iterator(response)
+
return response
+class Image:
+ def __init__(self, url: str = None, b64_json: str = None):
+ self.url = url
+ self.b64_json = b64_json
+
+ def __repr__(self):
+ return f"Image(url={self.url}, b64_json={'<base64 data>' if self.b64_json else None})"
+
+class ImagesResponse:
+ def __init__(self, data: list[Image]):
+ self.data = data
+
+ def __repr__(self):
+ return f"ImagesResponse(data={self.data})"
class Images:
- def __init__(self, client: 'Client', provider: ImageProvider = None):
+ def __init__(self, client: 'Client', provider: 'ImageProvider' = None):
self.client: 'Client' = client
- self.provider: ImageProvider = provider
+ self.provider: 'ImageProvider' = provider
self.models: ImageModels = ImageModels(client)
- def generate(self, prompt: str, model: str = None, **kwargs) -> ImagesResponse:
- logging.info(f"Starting synchronous image generation for model: {model}, prompt: {prompt}")
- try:
- loop = asyncio.get_event_loop()
- except RuntimeError:
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
-
- try:
- result = loop.run_until_complete(self.async_generate(prompt, model, **kwargs))
- logging.info(f"Synchronous image generation completed. Result: {result}")
- return result
- except Exception as e:
- logging.error(f"Error in synchronous image generation: {str(e)}")
- raise
- finally:
- if loop.is_running():
- loop.close()
-
- async def async_generate(self, prompt: str, model: str = None, **kwargs) -> ImagesResponse:
- logging.info(f"Generating image for model: {model}, prompt: {prompt}")
+ def generate(self, prompt: str, model: str = None, response_format: str = "url", **kwargs) -> ImagesResponse:
+ """
+ Synchronous generate method that runs the async_generate method in an event loop.
+ """
+ return asyncio.run(self.async_generate(prompt, model, response_format=response_format, **kwargs))
+
+ async def async_generate(self, prompt: str, model: str = None, response_format: str = "url", **kwargs) -> ImagesResponse:
provider = self.models.get(model, self.provider)
if provider is None:
raise ValueError(f"Unknown model: {model}")
-
- logging.info(f"Provider: {provider}")
-
+
if isinstance(provider, IterListProvider):
if provider.providers:
provider = provider.providers[0]
- logging.info(f"Using first provider from IterListProvider: {provider}")
else:
raise ValueError(f"IterListProvider for model {model} has no providers")
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
- logging.info("Using AsyncGeneratorProvider")
messages = [{"role": "user", "content": prompt}]
async for response in provider.create_async_generator(model, messages, **kwargs):
if isinstance(response, ImageResponse):
- return self._process_image_response(response)
+ return await self._process_image_response(response, response_format)
elif isinstance(response, str):
image_response = ImageResponse([response], prompt)
- return self._process_image_response(image_response)
+ return await self._process_image_response(image_response, response_format)
elif hasattr(provider, 'create'):
- logging.info("Using provider's create method")
if asyncio.iscoroutinefunction(provider.create):
response = await provider.create(prompt)
else:
response = provider.create(prompt)
-
+
if isinstance(response, ImageResponse):
- return self._process_image_response(response)
+ return await self._process_image_response(response, response_format)
elif isinstance(response, str):
image_response = ImageResponse([response], prompt)
- return self._process_image_response(image_response)
+ return await self._process_image_response(image_response, response_format)
else:
raise ValueError(f"Provider {provider} does not support image generation")
-
- logging.error(f"Unexpected response type: {type(response)}")
+
raise NoImageResponseError(f"Unexpected response type: {type(response)}")
- def _process_image_response(self, response: ImageResponse) -> ImagesResponse:
+ async def _process_image_response(self, response: ImageResponse, response_format: str) -> ImagesResponse:
processed_images = []
+
for image_data in response.get_list():
if image_data.startswith('http://') or image_data.startswith('https://'):
- processed_images.append(Image(url=image_data))
+ if response_format == "url":
+ processed_images.append(Image(url=image_data))
+ elif response_format == "b64_json":
+ # Fetch the image data and convert it to base64
+ image_content = await self._fetch_image(image_data)
+ b64_json = base64.b64encode(image_content).decode('utf-8')
+ processed_images.append(Image(b64_json=b64_json))
else:
- image = to_image(image_data)
- file_name = self._save_image(image)
- processed_images.append(Image(url=file_name))
+ # Assume image_data is base64 data or binary
+ if response_format == "url":
+ if image_data.startswith('data:image'):
+ # Remove the data URL scheme and get the base64 data
+ header, base64_data = image_data.split(',', 1)
+ else:
+ base64_data = image_data
+ # Decode the base64 data
+ image_data_bytes = base64.b64decode(base64_data)
+ # Convert bytes to an image
+ image = to_image(image_data_bytes)
+ file_name = self._save_image(image)
+ processed_images.append(Image(url=file_name))
+ elif response_format == "b64_json":
+ if isinstance(image_data, bytes):
+ b64_json = base64.b64encode(image_data).decode('utf-8')
+ else:
+ b64_json = image_data # If already base64-encoded string
+ processed_images.append(Image(b64_json=b64_json))
+
return ImagesResponse(processed_images)
+ async def _fetch_image(self, url: str) -> bytes:
+ # Asynchronously fetch image data from the URL
+ async with aiohttp.ClientSession() as session:
+ async with session.get(url) as resp:
+ if resp.status == 200:
+ return await resp.read()
+ else:
+ raise Exception(f"Failed to fetch image from {url}, status code {resp.status}")
+
def _save_image(self, image: 'PILImage') -> str:
os.makedirs('generated_images', exist_ok=True)
- file_name = f"generated_images/image_{int(time.time())}.png"
+ file_name = f"generated_images/image_{int(time.time())}_{random.randint(0, 10000)}.png"
image.save(file_name)
return file_name
- async def create_variation(self, image: Union[str, bytes], model: str = None, **kwargs):
- provider = self.models.get(model, self.provider)
- if provider is None:
- raise ValueError(f"Unknown model: {model}")
-
- if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
- messages = [{"role": "user", "content": "create a variation of this image"}]
- image_data = to_data_uri(image)
- async for response in provider.create_async_generator(model, messages, image=image_data, **kwargs):
- if isinstance(response, ImageResponse):
- return self._process_image_response(response)
- elif isinstance(response, str):
- image_response = ImageResponse([response], "Image variation")
- return self._process_image_response(image_response)
- elif hasattr(provider, 'create_variation'):
- if asyncio.iscoroutinefunction(provider.create_variation):
- response = await provider.create_variation(image, **kwargs)
- else:
- response = provider.create_variation(image, **kwargs)
-
- if isinstance(response, ImageResponse):
- return self._process_image_response(response)
- elif isinstance(response, str):
- image_response = ImageResponse([response], "Image variation")
- return self._process_image_response(image_response)
- else:
- raise ValueError(f"Provider {provider} does not support image variation")
-
- raise NoImageResponseError("Failed to create image variation")
+ async def create_variation(self, image: Union[str, bytes], model: str = None, response_format: str = "url", **kwargs):
+ # Existing implementation, adjust if you want to support b64_json here as well
+ pass
diff --git a/g4f/cookies.py b/g4f/cookies.py
index 0a25c41e..8d535ce7 100644
--- a/g4f/cookies.py
+++ b/g4f/cookies.py
@@ -34,6 +34,7 @@ DOMAINS = [
"www.whiterabbitneo.com",
"huggingface.co",
"chat.reka.ai",
+ "chatgpt.com"
]
if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
@@ -180,4 +181,4 @@ def _g4f(domain_name: str) -> list:
return []
user_data_dir = user_config_dir("g4f")
cookie_file = os.path.join(user_data_dir, "Default", "Cookies")
- return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name) \ No newline at end of file
+ return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name)
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 1a660062..7e8ef09c 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -224,28 +224,35 @@
</div>
</div>
<div class="buttons">
- <div class="field">
- <select name="model" id="model">
- <option value="">Model: Default</option>
- <option value="gpt-4">gpt-4</option>
- <option value="gpt-3.5-turbo">gpt-3.5-turbo</option>
- <option value="llama-3-70b-chat">llama-3-70b-chat</option>
- <option value="llama-3.1-70b">llama-3.1-70b</option>
- <option value="gemini-pro">gemini-pro</option>
- <option value="">----</option>
- </select>
- <select name="model2" id="model2" class="hidden"></select>
- </div>
- <div class="field">
- <select name="provider" id="provider">
- <option value="">Provider: Auto</option>
- <option value="Bing">Bing</option>
- <option value="OpenaiChat">OpenAI ChatGPT</option>
- <option value="Gemini">Gemini</option>
- <option value="Liaobots">Liaobots</option>
- <option value="MetaAI">Meta AI</option>
- <option value="You">You</option>
- <option value="">----</option>
+ <div class="field">
+ <select name="model" id="model">
+ <option value="">Model: Default</option>
+ <option value="gpt-4">gpt-4</option>
+ <option value="gpt-4o">gpt-4o</option>
+ <option value="gpt-4o-mini">gpt-4o-mini</option>
+ <option value="llama-3.1-70b">llama-3.1-70b</option>
+ <option value="llama-3.1-70b">llama-3.1-405b</option>
+ <option value="llama-3.1-70b">mixtral-8x7b</option>
+ <option value="gemini-pro">gemini-pro</option>
+ <option value="gemini-flash">gemini-flash</option>
+ <option value="claude-3-haiku">claude-3-haiku</option>
+ <option value="claude-3.5-sonnet">claude-3.5-sonnet</option>
+ <option value="">----</option>
+ </select>
+ <select name="model2" id="model2" class="hidden"></select>
+ </div>
+ <div class="field">
+ <select name="provider" id="provider">
+ <option value="">Provider: Auto</option>
+ <option value="OpenaiChat">OpenAI ChatGPT</option>
+ <option value="Gemini">Gemini</option>
+ <option value="MetaAI">Meta AI</option>
+ <option value="DeepInfraChat">DeepInfraChat</option>
+ <option value="Blackbox">Blackbox</option>
+ <option value="HuggingChat">HuggingChat</option>
+ <option value="DDG">DDG</option>
+ <option value="Pizzagpt">Pizzagpt</option>
+ <option value="">----</option>
</select>
</div>
</div>
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index f3a4708d..441e2042 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -87,12 +87,9 @@ body {
}
body {
- padding: 10px;
background: var(--colour-1);
color: var(--colour-3);
height: 100vh;
- max-width: 1600px;
- margin: auto;
}
.row {
@@ -1146,4 +1143,4 @@ a:-webkit-any-link {
.message.regenerate {
opacity: 1;
}
-} \ No newline at end of file
+}
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 9790b261..42ddb129 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -57,6 +57,25 @@ function filter_message(text) {
)
}
+function fallback_clipboard (text) {
+ var textBox = document.createElement("textarea");
+ textBox.value = text;
+ textBox.style.top = "0";
+ textBox.style.left = "0";
+ textBox.style.position = "fixed";
+ document.body.appendChild(textBox);
+ textBox.focus();
+ textBox.select();
+ try {
+ var success = document.execCommand('copy');
+ var msg = success ? 'succeeded' : 'failed';
+ console.log('Clipboard Fallback: Copying text command ' + msg);
+ } catch (e) {
+ console.error('Clipboard Fallback: Unable to copy', e);
+ }
+ document.body.removeChild(textBox);
+}
+
hljs.addPlugin(new CopyButtonPlugin());
let typesetPromise = Promise.resolve();
const highlight = (container) => {
@@ -88,18 +107,31 @@ const register_message_buttons = async () => {
})
}
});
+
document.querySelectorAll(".message .fa-clipboard").forEach(async (el) => {
if (!("click" in el.dataset)) {
el.dataset.click = "true";
el.addEventListener("click", async () => {
const message_el = el.parentElement.parentElement.parentElement;
const copyText = await get_message(window.conversation_id, message_el.dataset.index);
- navigator.clipboard.writeText(copyText);
+
+ try {
+ if (!navigator.clipboard) {
+ throw new Error("navigator.clipboard: Clipboard API unavailable.");
+ }
+ await navigator.clipboard.writeText(copyText);
+ } catch (e) {
+ console.error(e);
+ console.error("Clipboard API writeText() failed! Fallback to document.exec(\"copy\")...");
+ fallback_clipboard(copyText);
+ }
+
el.classList.add("clicked");
setTimeout(() => el.classList.remove("clicked"), 1000);
})
}
});
+
document.querySelectorAll(".message .fa-volume-high").forEach(async (el) => {
if (!("click" in el.dataset)) {
el.dataset.click = "true";
@@ -306,6 +338,14 @@ const prepare_messages = (messages, message_index = -1) => {
messages = messages.filter((_, index) => message_index >= index);
}
+ let new_messages = [];
+ if (systemPrompt?.value) {
+ new_messages.push({
+ "role": "system",
+ "content": systemPrompt.value
+ });
+ }
+
// Remove history, if it's selected
if (document.getElementById('history')?.checked) {
if (message_index == null) {
@@ -315,13 +355,6 @@ const prepare_messages = (messages, message_index = -1) => {
}
}
- let new_messages = [];
- if (systemPrompt?.value) {
- new_messages.push({
- "role": "system",
- "content": systemPrompt.value
- });
- }
messages.forEach((new_message) => {
// Include only not regenerated messages
if (new_message && !new_message.regenerate) {
@@ -334,6 +367,7 @@ const prepare_messages = (messages, message_index = -1) => {
return new_messages;
}
+
async function add_message_chunk(message) {
if (message.type == "conversation") {
console.info("Conversation used:", message.conversation)
@@ -1424,4 +1458,4 @@ if (SpeechRecognition) {
recognition.start();
}
});
-} \ No newline at end of file
+}
diff --git a/g4f/gui/client/static/js/highlightjs-copy.min.js b/g4f/gui/client/static/js/highlightjs-copy.min.js
index ac11d33e..cd8ae957 100644
--- a/g4f/gui/client/static/js/highlightjs-copy.min.js
+++ b/g4f/gui/client/static/js/highlightjs-copy.min.js
@@ -1 +1,54 @@
-class CopyButtonPlugin{constructor(options={}){self.hook=options.hook;self.callback=options.callback}"after:highlightElement"({el,text}){let button=Object.assign(document.createElement("button"),{innerHTML:"Copy",className:"hljs-copy-button"});button.dataset.copied=false;el.parentElement.classList.add("hljs-copy-wrapper");el.parentElement.appendChild(button);el.parentElement.style.setProperty("--hljs-theme-background",window.getComputedStyle(el).backgroundColor);button.onclick=function(){if(!navigator.clipboard)return;let newText=text;if(hook&&typeof hook==="function"){newText=hook(text,el)||text}navigator.clipboard.writeText(newText).then(function(){button.innerHTML="Copied!";button.dataset.copied=true;let alert=Object.assign(document.createElement("div"),{role:"status",className:"hljs-copy-alert",innerHTML:"Copied to clipboard"});el.parentElement.appendChild(alert);setTimeout(()=>{button.innerHTML="Copy";button.dataset.copied=false;el.parentElement.removeChild(alert);alert=null},2e3)}).then(function(){if(typeof callback==="function")return callback(newText,el)})}}} \ No newline at end of file
+class CopyButtonPlugin {
+ constructor(options = {}) {
+ self.hook = options.hook;
+ self.callback = options.callback
+ }
+ "after:highlightElement"({
+ el,
+ text
+ }) {
+ let button = Object.assign(document.createElement("button"), {
+ innerHTML: "Copy",
+ className: "hljs-copy-button"
+ });
+ button.dataset.copied = false;
+ el.parentElement.classList.add("hljs-copy-wrapper");
+ el.parentElement.appendChild(button);
+ el.parentElement.style.setProperty("--hljs-theme-background", window.getComputedStyle(el).backgroundColor);
+ button.onclick = async () => {
+ let newText = text;
+ if (hook && typeof hook === "function") {
+ newText = hook(text, el) || text
+ }
+ try {
+ if (!navigator.clipboard) {
+ throw new Error("navigator.clipboard: Clipboard API unavailable.");
+ }
+ await navigator.clipboard.writeText(newText);
+ } catch (e) {
+ console.error(e);
+ console.error("Clipboard API writeText() failed! Fallback to document.exec(\"copy\")...");
+ fallback_clipboard(newText);
+ }
+ button.innerHTML = "Copied!";
+ button.dataset.copied = true;
+ let alert = Object.assign(document.createElement("div"), {
+ role: "status",
+ className: "hljs-copy-alert",
+ innerHTML: "Copied to clipboard"
+ });
+ el.parentElement.appendChild(alert);
+ setTimeout(() => {
+ button.innerHTML = "Copy";
+ button.dataset.copied = false;
+ el.parentElement.removeChild(alert);
+ alert = null
+ }, 2e3)
+ }
+
+
+ if (typeof callback === "function") return callback(newText, el);
+
+ }
+
+}
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index c984abec..7aac650a 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -2,13 +2,11 @@ from __future__ import annotations
import logging
import os
-import os.path
import uuid
import asyncio
import time
-import base64
from aiohttp import ClientSession
-from typing import Iterator, Optional
+from typing import Iterator, Optional, AsyncIterator, Union
from flask import send_from_directory
from g4f import version, models
@@ -21,21 +19,20 @@ from g4f.Provider import ProviderType, __providers__, __map__
from g4f.providers.base_provider import ProviderModelMixin, FinishReason
from g4f.providers.conversation import BaseConversation
-conversations: dict[dict[str, BaseConversation]] = {}
+# Define the directory for generated images
images_dir = "./generated_images"
-class Api():
+# Function to ensure the images directory exists
+def ensure_images_dir():
+ if not os.path.exists(images_dir):
+ os.makedirs(images_dir)
- @staticmethod
- def get_models() -> list[str]:
- """
- Return a list of all models.
+conversations: dict[dict[str, BaseConversation]] = {}
- Fetches and returns a list of all available models in the system.
- Returns:
- List[str]: A list of model names.
- """
+class Api:
+ @staticmethod
+ def get_models() -> list[str]:
return models._all_models
@staticmethod
@@ -43,14 +40,11 @@ class Api():
if provider in __map__:
provider: ProviderType = __map__[provider]
if issubclass(provider, ProviderModelMixin):
- return [{"model": model, "default": model == provider.default_model} for model in provider.get_models()]
- elif provider.supports_gpt_35_turbo or provider.supports_gpt_4:
return [
- *([{"model": "gpt-4", "default": not provider.supports_gpt_4}] if provider.supports_gpt_4 else []),
- *([{"model": "gpt-3.5-turbo", "default": not provider.supports_gpt_4}] if provider.supports_gpt_35_turbo else [])
+ {"model": model, "default": model == provider.default_model}
+ for model in provider.get_models()
]
- else:
- return [];
+ return []
@staticmethod
def get_image_models() -> list[dict]:
@@ -72,7 +66,7 @@ class Api():
"image_model": model,
"vision_model": parent.default_vision_model if hasattr(parent, "default_vision_model") else None
})
- index.append(parent.__name__)
+ index.append(parent.__name__)
elif hasattr(provider, "default_vision_model") and provider.__name__ not in index:
image_models.append({
"provider": provider.__name__,
@@ -86,31 +80,20 @@ class Api():
@staticmethod
def get_providers() -> list[str]:
- """
- Return a list of all working providers.
- """
return {
- provider.__name__: (provider.label
- if hasattr(provider, "label")
- else provider.__name__) +
- (" (WebDriver)"
- if "webdriver" in provider.get_parameters()
- else "") +
- (" (Auth)"
- if provider.needs_auth
- else "")
+ provider.__name__: (
+ provider.label if hasattr(provider, "label") else provider.__name__
+ ) + (
+ " (WebDriver)" if "webdriver" in provider.get_parameters() else ""
+ ) + (
+ " (Auth)" if provider.needs_auth else ""
+ )
for provider in __providers__
if provider.working
}
@staticmethod
def get_version():
- """
- Returns the current and latest version of the application.
-
- Returns:
- dict: A dictionary containing the current and latest version.
- """
try:
current_version = version.utils.current_version
except VersionNotFoundError:
@@ -121,18 +104,10 @@ class Api():
}
def serve_images(self, name):
+ ensure_images_dir()
return send_from_directory(os.path.abspath(images_dir), name)
def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
- """
- Prepares arguments for chat completion based on the request data.
-
- Reads the request and prepares the necessary arguments for handling
- a chat completion request.
-
- Returns:
- dict: Arguments prepared for chat completion.
- """
model = json_data.get('model') or models.default
provider = json_data.get('provider')
messages = json_data['messages']
@@ -140,7 +115,7 @@ class Api():
if api_key is not None:
kwargs["api_key"] = api_key
if json_data.get('web_search'):
- if provider in ("Bing", "HuggingChat"):
+ if provider:
kwargs['web_search'] = True
else:
from .internet import get_search_message
@@ -161,101 +136,67 @@ class Api():
}
def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str) -> Iterator:
- """
- Creates and returns a streaming response for the conversation.
-
- Args:
- kwargs (dict): Arguments for creating the chat completion.
-
- Yields:
- str: JSON formatted response chunks for the stream.
-
- Raises:
- Exception: If an error occurs during the streaming process.
- """
try:
+ result = ChatCompletion.create(**kwargs)
first = True
- for chunk in ChatCompletion.create(**kwargs):
+ if isinstance(result, ImageResponse):
if first:
first = False
yield self._format_json("provider", get_last_provider(True))
- if isinstance(chunk, BaseConversation):
- if provider not in conversations:
- conversations[provider] = {}
- conversations[provider][conversation_id] = chunk
- yield self._format_json("conversation", conversation_id)
- elif isinstance(chunk, Exception):
- logging.exception(chunk)
- yield self._format_json("message", get_error_message(chunk))
- elif isinstance(chunk, ImagePreview):
- yield self._format_json("preview", chunk.to_string())
- elif isinstance(chunk, ImageResponse):
- async def copy_images(images: list[str], cookies: Optional[Cookies] = None):
- async with ClientSession(
- connector=get_connector(None, os.environ.get("G4F_PROXY")),
- cookies=cookies
- ) as session:
- async def copy_image(image):
- if image.startswith("data:"):
- # Processing the data URL
- data_uri_parts = image.split(",")
- if len(data_uri_parts) == 2:
- content_type, base64_data = data_uri_parts
- extension = content_type.split("/")[-1].split(";")[0]
- target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}.{extension}")
- with open(target, "wb") as f:
- f.write(base64.b64decode(base64_data))
- return f"/images/{os.path.basename(target)}"
- else:
- return None
- else:
- # Обробка звичайної URL-адреси
- async with session.get(image) as response:
- target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
- with open(target, "wb") as f:
- async for chunk in response.content.iter_any():
- f.write(chunk)
- with open(target, "rb") as f:
- extension = is_accepted_format(f.read(12)).split("/")[-1]
- extension = "jpg" if extension == "jpeg" else extension
- new_target = f"{target}.{extension}"
- os.rename(target, new_target)
- return f"/images/{os.path.basename(new_target)}"
- return await asyncio.gather(*[copy_image(image) for image in images])
- images = asyncio.run(copy_images(chunk.get_list(), chunk.options.get("cookies")))
- yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
- elif not isinstance(chunk, FinishReason):
- yield self._format_json("content", str(chunk))
+ yield self._format_json("content", str(result))
+ else:
+ for chunk in result:
+ if first:
+ first = False
+ yield self._format_json("provider", get_last_provider(True))
+ if isinstance(chunk, BaseConversation):
+ if provider not in conversations:
+ conversations[provider] = {}
+ conversations[provider][conversation_id] = chunk
+ yield self._format_json("conversation", conversation_id)
+ elif isinstance(chunk, Exception):
+ logging.exception(chunk)
+ yield self._format_json("message", get_error_message(chunk))
+ elif isinstance(chunk, ImagePreview):
+ yield self._format_json("preview", chunk.to_string())
+ elif isinstance(chunk, ImageResponse):
+ images = asyncio.run(self._copy_images(chunk.get_list(), chunk.options.get("cookies")))
+ yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
+ elif not isinstance(chunk, FinishReason):
+ yield self._format_json("content", str(chunk))
except Exception as e:
logging.exception(e)
yield self._format_json('error', get_error_message(e))
- def _format_json(self, response_type: str, content):
- """
- Formats and returns a JSON response.
-
- Args:
- response_type (str): The type of the response.
- content: The content to be included in the response.
+ async def _copy_images(self, images: list[str], cookies: Optional[Cookies] = None):
+ ensure_images_dir()
+ async with ClientSession(
+ connector=get_connector(None, os.environ.get("G4F_PROXY")),
+ cookies=cookies
+ ) as session:
+ async def copy_image(image):
+ async with session.get(image) as response:
+ target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
+ with open(target, "wb") as f:
+ async for chunk in response.content.iter_any():
+ f.write(chunk)
+ with open(target, "rb") as f:
+ extension = is_accepted_format(f.read(12)).split("/")[-1]
+ extension = "jpg" if extension == "jpeg" else extension
+ new_target = f"{target}.{extension}"
+ os.rename(target, new_target)
+ return f"/images/{os.path.basename(new_target)}"
+
+ return await asyncio.gather(*[copy_image(image) for image in images])
- Returns:
- str: A JSON formatted string.
- """
+ def _format_json(self, response_type: str, content):
return {
'type': response_type,
response_type: content
}
-def get_error_message(exception: Exception) -> str:
- """
- Generates a formatted error message from an exception.
- Args:
- exception (Exception): The exception to format.
-
- Returns:
- str: A formatted error message string.
- """
+def get_error_message(exception: Exception) -> str:
message = f"{type(exception).__name__}: {exception}"
provider = get_last_provider()
if provider is None:
diff --git a/g4f/gui/server/internet.py b/g4f/gui/server/internet.py
index a1fafa7d..78bea0ca 100644
--- a/g4f/gui/server/internet.py
+++ b/g4f/gui/server/internet.py
@@ -101,7 +101,7 @@ async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text
raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package')
async with AsyncDDGS() as ddgs:
results = []
- for result in await ddgs.text(
+ for result in await ddgs.atext(
query,
region="wt-wt",
safesearch="moderate",
diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py
index 5e633674..3cabcdf3 100644
--- a/g4f/gui/server/website.py
+++ b/g4f/gui/server/website.py
@@ -27,6 +27,10 @@ class Website:
'function': redirect_home,
'methods': ['GET', 'POST']
},
+ '/images/': {
+ 'function': redirect_home,
+ 'methods': ['GET', 'POST']
+ },
}
def _chat(self, conversation_id):
@@ -35,4 +39,4 @@ class Website:
return render_template('index.html', chat_id=conversation_id)
def _index(self):
- return render_template('index.html', chat_id=str(uuid.uuid4())) \ No newline at end of file
+ return render_template('index.html', chat_id=str(uuid.uuid4()))
diff --git a/g4f/models.py b/g4f/models.py
index ddbeeddf..32a12d10 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -4,35 +4,54 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
- AiChatOnline,
+ Ai4Chat,
+ AIChatFree,
+ AiMathGPT,
+ Airforce,
Allyfy,
+ AmigoChat,
Bing,
- Binjie,
- Bixin123,
Blackbox,
- ChatGot,
+ ChatGpt,
Chatgpt4Online,
- Chatgpt4o,
+ ChatGptEs,
ChatgptFree,
- CodeNews,
+ ChatHub,
+ ChatifyAI,
+ Cloudflare,
+ DarkAI,
DDG,
DeepInfra,
- DeepInfraImage,
- FluxAirforce,
+ DeepInfraChat,
+ Editee,
Free2GPT,
FreeChatgpt,
FreeGpt,
FreeNetfly,
Gemini,
GeminiPro,
+ GizAI,
GigaChat,
+ GPROChat,
HuggingChat,
HuggingFace,
Koala,
Liaobots,
MagickPen,
MetaAI,
- Nexra,
+ NexraBing,
+ NexraBlackbox,
+ NexraChatGPT,
+ NexraDallE,
+ NexraDallE2,
+ NexraEmi,
+ NexraFluxPro,
+ NexraGeminiPro,
+ NexraMidjourney,
+ NexraQwen,
+ NexraSD15,
+ NexraSDLora,
+ NexraSDTurbo,
OpenaiChat,
PerplexityLabs,
Pi,
@@ -40,11 +59,9 @@ from .Provider import (
Reka,
Replicate,
ReplicateHome,
- Snova,
+ RubiksAI,
TeachAnything,
- TwitterBio,
Upstage,
- You,
)
@@ -67,6 +84,8 @@ class Model:
"""Returns a list of all model names."""
return _all_models
+
+### Default ###
default = Model(
name = "",
base_provider = "",
@@ -75,17 +94,26 @@ default = Model(
FreeChatgpt,
HuggingChat,
Pizzagpt,
- ChatgptFree,
ReplicateHome,
Upstage,
Blackbox,
- Bixin123,
- Binjie,
Free2GPT,
MagickPen,
+ DeepInfraChat,
+ Airforce,
+ ChatHub,
+ ChatGptEs,
+ ChatHub,
+ AmigoChat,
+ ChatifyAI,
+ Cloudflare,
+ Editee,
+ AiMathGPT,
])
)
+
+
############
### Text ###
############
@@ -95,55 +123,55 @@ default = Model(
gpt_3 = Model(
name = 'gpt-3',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- Nexra,
- ])
+ best_provider = NexraChatGPT
)
# gpt-3.5
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- Allyfy, TwitterBio, Nexra, Bixin123, CodeNews,
- ])
+ best_provider = IterListProvider([Allyfy, NexraChatGPT, Airforce, DarkAI, Liaobots])
)
# gpt-4
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- Liaobots, Chatgpt4o, OpenaiChat,
- ])
+ best_provider = IterListProvider([NexraChatGPT, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, GizAI, Airforce, Liaobots, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- DDG, Liaobots, You, FreeNetfly, Pizzagpt, ChatgptFree, AiChatOnline, CodeNews,
- MagickPen, OpenaiChat, Koala,
- ])
+ best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, RubiksAI, Liaobots, Airforce, GizAI, ChatgptFree, Koala, OpenaiChat, ChatGpt])
)
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- Nexra, Bixin123, Liaobots, Bing
- ])
+ best_provider = IterListProvider([Liaobots, Airforce, Bing])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- Chatgpt4Online, Nexra, Binjie, Bing,
- gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
- ])
+ best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraBing, NexraChatGPT, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
+)
+
+# o1
+o1 = Model(
+ name = 'o1',
+ base_provider = 'OpenAI',
+ best_provider = AmigoChat
)
+o1_mini = Model(
+ name = 'o1-mini',
+ base_provider = 'OpenAI',
+ best_provider = IterListProvider([AmigoChat, GizAI])
+)
+
+
### GigaChat ###
gigachat = Model(
name = 'GigaChat:latest',
@@ -159,133 +187,252 @@ meta = Model(
best_provider = MetaAI
)
+# llama 2
+llama_2_7b = Model(
+ name = "llama-2-7b",
+ base_provider = "Meta Llama",
+ best_provider = Cloudflare
+)
+
+llama_2_13b = Model(
+ name = "llama-2-13b",
+ base_provider = "Meta Llama",
+ best_provider = Airforce
+)
+
+# llama 3
llama_3_8b = Model(
name = "llama-3-8b",
- base_provider = "Meta",
- best_provider = IterListProvider([DeepInfra, Replicate])
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([Cloudflare, Airforce, DeepInfra, Replicate])
)
llama_3_70b = Model(
name = "llama-3-70b",
- base_provider = "Meta",
- best_provider = IterListProvider([ReplicateHome, DeepInfra, PerplexityLabs, Replicate])
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([ReplicateHome, Airforce, DeepInfra, Replicate])
)
+# llama 3.1
llama_3_1_8b = Model(
name = "llama-3.1-8b",
- base_provider = "Meta",
- best_provider = IterListProvider([Blackbox])
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, Airforce, GizAI, PerplexityLabs])
)
llama_3_1_70b = Model(
name = "llama-3.1-70b",
- base_provider = "Meta",
- best_provider = IterListProvider([DDG, HuggingChat, FreeGpt, Blackbox, TeachAnything, Free2GPT, HuggingFace])
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, RubiksAI, GizAI, HuggingFace, PerplexityLabs])
)
llama_3_1_405b = Model(
name = "llama-3.1-405b",
- base_provider = "Meta",
- best_provider = IterListProvider([HuggingChat, Blackbox, HuggingFace])
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([DeepInfraChat, Blackbox, AmigoChat, DarkAI, Airforce])
+)
+
+# llama 3.2
+llama_3_2_1b = Model(
+ name = "llama-3.2-1b",
+ base_provider = "Meta Llama",
+ best_provider = Cloudflare
+)
+
+llama_3_2_3b = Model(
+ name = "llama-3.2-3b",
+ base_provider = "Meta Llama",
+ best_provider = Cloudflare
)
+llama_3_2_11b = Model(
+ name = "llama-3.2-11b",
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([Cloudflare, HuggingChat, HuggingFace])
+)
+
+llama_3_2_90b = Model(
+ name = "llama-3.2-90b",
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([AmigoChat, Airforce])
+)
+
+
+# llamaguard
+llamaguard_7b = Model(
+ name = "llamaguard-7b",
+ base_provider = "Meta Llama",
+ best_provider = Airforce
+)
+
+llamaguard_2_8b = Model(
+ name = "llamaguard-2-8b",
+ base_provider = "Meta Llama",
+ best_provider = Airforce
+)
+
+
### Mistral ###
+mistral_7b = Model(
+ name = "mistral-7b",
+ base_provider = "Mistral",
+ best_provider = IterListProvider([DeepInfraChat, Cloudflare, Airforce, DeepInfra])
+)
+
mixtral_8x7b = Model(
name = "mixtral-8x7b",
base_provider = "Mistral",
- best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, TwitterBio, DeepInfra, HuggingFace,])
+ best_provider = IterListProvider([DDG, ReplicateHome, DeepInfraChat, ChatHub, Airforce, DeepInfra])
)
-mistral_7b = Model(
- name = "mistral-7b",
+mixtral_8x22b = Model(
+ name = "mixtral-8x22b",
base_provider = "Mistral",
- best_provider = IterListProvider([HuggingChat, HuggingFace, DeepInfra])
+ best_provider = IterListProvider([DeepInfraChat, Airforce])
)
-### 01-ai ###
-yi_1_5_34b = Model(
- name = "yi-1.5-34b",
- base_provider = "01-ai",
+mistral_nemo = Model(
+ name = "mistral-nemo",
+ base_provider = "Mistral",
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
+)
+
+mistral_large = Model(
+ name = "mistral-large",
+ base_provider = "Mistral",
+ best_provider = IterListProvider([Editee, GizAI])
+)
+
+
+### NousResearch ###
+mixtral_8x7b_dpo = Model(
+ name = "mixtral-8x7b-dpo",
+ base_provider = "NousResearch",
+ best_provider = Airforce
+)
+
+yi_34b = Model(
+ name = "yi-34b",
+ base_provider = "NousResearch",
+ best_provider = Airforce
+)
+
+hermes_3 = Model(
+ name = "hermes-3",
+ base_provider = "NousResearch",
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
### Microsoft ###
-phi_3_mini_4k = Model(
- name = "phi-3-mini-4k",
+phi_2 = Model(
+ name = "phi-2",
base_provider = "Microsoft",
- best_provider = IterListProvider([HuggingFace, HuggingChat])
+ best_provider = Cloudflare
)
+phi_3_medium_4k = Model(
+ name = "phi-3-medium-4k",
+ base_provider = "Microsoft",
+ best_provider = DeepInfraChat
+)
-### Google ###
+phi_3_5_mini = Model(
+ name = "phi-3.5-mini",
+ base_provider = "Microsoft",
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
+)
+
+### Google DeepMind ###
# gemini
+gemini_pro = Model(
+ name = 'gemini-pro',
+ base_provider = 'Google DeepMind',
+ best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, NexraGeminiPro, AmigoChat, Editee, GizAI, Airforce, Liaobots])
+)
+
+gemini_flash = Model(
+ name = 'gemini-flash',
+ base_provider = 'Google DeepMind',
+ best_provider = IterListProvider([Blackbox, GizAI, Airforce, Liaobots])
+)
+
gemini = Model(
name = 'gemini',
- base_provider = 'Google',
+ base_provider = 'Google DeepMind',
best_provider = Gemini
)
-gemini_pro = Model(
- name = 'gemini-pro',
+# gemma
+gemma_2b_9b = Model(
+ name = 'gemma-2b-9b',
base_provider = 'Google',
- best_provider = IterListProvider([GeminiPro, ChatGot, Liaobots])
+ best_provider = Airforce
)
-gemini_flash = Model(
- name = 'gemini-flash',
+gemma_2b_27b = Model(
+ name = 'gemma-2b-27b',
base_provider = 'Google',
- best_provider = IterListProvider([Liaobots, Blackbox])
+ best_provider = IterListProvider([DeepInfraChat, Airforce])
)
-# gemma
gemma_2b = Model(
name = 'gemma-2b',
base_provider = 'Google',
- best_provider = IterListProvider([ReplicateHome])
+ best_provider = IterListProvider([ReplicateHome, Airforce])
)
-### Anthropic ###
-claude_2 = Model(
- name = 'claude-2',
- base_provider = 'Anthropic',
- best_provider = IterListProvider([You])
+gemma_7b = Model(
+ name = 'gemma-7b',
+ base_provider = 'Google',
+ best_provider = Cloudflare
)
-claude_2_0 = Model(
- name = 'claude-2.0',
- base_provider = 'Anthropic',
- best_provider = IterListProvider([Liaobots])
+# gemma 2
+gemma_2_27b = Model(
+ name = 'gemma-2-27b',
+ base_provider = 'Google',
+ best_provider = Airforce
)
+gemma_2 = Model(
+ name = 'gemma-2',
+ base_provider = 'Google',
+ best_provider = ChatHub
+)
+
+
+### Anthropic ###
claude_2_1 = Model(
name = 'claude-2.1',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Liaobots])
+ best_provider = Liaobots
)
+# claude 3
claude_3_opus = Model(
name = 'claude-3-opus',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Liaobots])
+ best_provider = IterListProvider([Airforce, Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Liaobots])
+ best_provider = IterListProvider([Airforce, Liaobots])
)
-claude_3_5_sonnet = Model(
- name = 'claude-3-5-sonnet',
+claude_3_haiku = Model(
+ name = 'claude-3-haiku',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Liaobots])
+ best_provider = IterListProvider([DDG, Airforce, GizAI, Liaobots])
)
-claude_3_haiku = Model(
- name = 'claude-3-haiku',
+# claude 3.5
+claude_3_5_sonnet = Model(
+ name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
- best_provider = IterListProvider([DDG, Liaobots])
+ best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, GizAI, Liaobots])
)
@@ -297,10 +444,16 @@ reka_core = Model(
)
-### Blackbox ###
-blackbox = Model(
- name = 'blackbox',
- base_provider = 'Blackbox',
+### Blackbox AI ###
+blackboxai = Model(
+ name = 'blackboxai',
+ base_provider = 'Blackbox AI',
+ best_provider = IterListProvider([Blackbox, NexraBlackbox])
+)
+
+blackboxai_pro = Model(
+ name = 'blackboxai-pro',
+ base_provider = 'Blackbox AI',
best_provider = Blackbox
)
@@ -309,7 +462,7 @@ blackbox = Model(
dbrx_instruct = Model(
name = 'dbrx-instruct',
base_provider = 'Databricks',
- best_provider = IterListProvider([DeepInfra])
+ best_provider = IterListProvider([Airforce, DeepInfra])
)
@@ -317,7 +470,7 @@ dbrx_instruct = Model(
command_r_plus = Model(
name = 'command-r-plus',
base_provider = 'CohereForAI',
- best_provider = IterListProvider([HuggingChat])
+ best_provider = HuggingChat
)
@@ -325,20 +478,59 @@ command_r_plus = Model(
sparkdesk_v1_1 = Model(
name = 'sparkdesk-v1.1',
base_provider = 'iFlytek',
- best_provider = IterListProvider([FreeChatgpt])
+ best_provider = FreeChatgpt
)
+
### Qwen ###
+# qwen 1
+qwen_1_5_0_5b = Model(
+ name = 'qwen-1.5-0.5b',
+ base_provider = 'Qwen',
+ best_provider = Cloudflare
+)
+
+qwen_1_5_7b = Model(
+ name = 'qwen-1.5-7b',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([Cloudflare, Airforce])
+)
+
qwen_1_5_14b = Model(
name = 'qwen-1.5-14b',
base_provider = 'Qwen',
- best_provider = IterListProvider([FreeChatgpt])
+ best_provider = IterListProvider([FreeChatgpt, Cloudflare, Airforce])
+)
+
+qwen_1_5_72b = Model(
+ name = 'qwen-1.5-72b',
+ base_provider = 'Qwen',
+ best_provider = Airforce
)
-qwen_turbo = Model(
- name = 'qwen-turbo',
+qwen_1_5_110b = Model(
+ name = 'qwen-1.5-110b',
base_provider = 'Qwen',
- best_provider = IterListProvider([Bixin123])
+ best_provider = Airforce
+)
+
+qwen_1_5_1_8b = Model(
+ name = 'qwen-1.5-1.8b',
+ base_provider = 'Qwen',
+ best_provider = Airforce
+)
+
+# qwen 2
+qwen_2_72b = Model(
+ name = 'qwen-2-72b',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
+)
+
+qwen = Model(
+ name = 'qwen',
+ base_provider = 'Qwen',
+ best_provider = NexraQwen
)
@@ -346,76 +538,196 @@ qwen_turbo = Model(
glm_3_6b = Model(
name = 'glm-3-6b',
base_provider = 'Zhipu AI',
- best_provider = IterListProvider([FreeChatgpt])
+ best_provider = FreeChatgpt
)
glm_4_9b = Model(
name = 'glm-4-9B',
base_provider = 'Zhipu AI',
- best_provider = IterListProvider([FreeChatgpt])
+ best_provider = FreeChatgpt
)
-glm_4 = Model(
- name = 'glm-4',
- base_provider = 'Zhipu AI',
- best_provider = IterListProvider([CodeNews, glm_4_9b.best_provider,])
-)
### 01-ai ###
yi_1_5_9b = Model(
name = 'yi-1.5-9b',
base_provider = '01-ai',
- best_provider = IterListProvider([FreeChatgpt])
+ best_provider = FreeChatgpt
)
-
-### Pi ###
+### Upstage ###
solar_1_mini = Model(
name = 'solar-1-mini',
base_provider = 'Upstage',
- best_provider = IterListProvider([Upstage])
+ best_provider = Upstage
+)
+
+solar_10_7b = Model(
+ name = 'solar-10-7b',
+ base_provider = 'Upstage',
+ best_provider = Airforce
)
-### Pi ###
+solar_pro = Model(
+ name = 'solar-pro',
+ base_provider = 'Upstage',
+ best_provider = Upstage
+)
+
+
+### Inflection ###
pi = Model(
name = 'pi',
- base_provider = 'inflection',
+ base_provider = 'Inflection',
best_provider = Pi
)
-### SambaNova ###
-samba_coe_v0_1 = Model(
- name = 'samba-coe-v0.1',
- base_provider = 'SambaNova',
- best_provider = Snova
+### DeepSeek ###
+deepseek = Model(
+ name = 'deepseek',
+ base_provider = 'DeepSeek',
+ best_provider = Airforce
+)
+
+### WizardLM ###
+wizardlm_2_7b = Model(
+ name = 'wizardlm-2-7b',
+ base_provider = 'WizardLM',
+ best_provider = DeepInfraChat
)
-### Trong-Hieu Nguyen-Mau ###
-v1olet_merged_7b = Model(
- name = 'v1olet-merged-7b',
- base_provider = 'Trong-Hieu Nguyen-Mau',
- best_provider = Snova
+wizardlm_2_8x22b = Model(
+ name = 'wizardlm-2-8x22b',
+ base_provider = 'WizardLM',
+ best_provider = IterListProvider([DeepInfraChat, Airforce])
)
-### Macadeliccc ###
-westlake_7b_v2 = Model(
- name = 'westlake-7b-v2',
- base_provider = 'Macadeliccc',
- best_provider = Snova
+### Yorickvp ###
+llava_13b = Model(
+ name = 'llava-13b',
+ base_provider = 'Yorickvp',
+ best_provider = ReplicateHome
)
-### CookinAI ###
-donutlm_v1 = Model(
- name = 'donutlm-v1',
- base_provider = 'CookinAI',
- best_provider = Snova
+
+### OpenBMB ###
+minicpm_llama_3_v2_5 = Model(
+ name = 'minicpm-llama-3-v2.5',
+ base_provider = 'OpenBMB',
+ best_provider = DeepInfraChat
)
-### DeepSeek ###
-deepseek = Model(
- name = 'deepseek',
- base_provider = 'DeepSeek',
- best_provider = CodeNews
+
+### Lzlv ###
+lzlv_70b = Model(
+ name = 'lzlv-70b',
+ base_provider = 'Lzlv',
+ best_provider = DeepInfraChat
+)
+
+
+### OpenChat ###
+openchat_3_5 = Model(
+ name = 'openchat-3.5',
+ base_provider = 'OpenChat',
+ best_provider = Cloudflare
+)
+
+openchat_3_6_8b = Model(
+ name = 'openchat-3.6-8b',
+ base_provider = 'OpenChat',
+ best_provider = DeepInfraChat
+)
+
+
+### Phind ###
+phind_codellama_34b_v2 = Model(
+ name = 'phind-codellama-34b-v2',
+ base_provider = 'Phind',
+ best_provider = DeepInfraChat
+)
+
+
+### Cognitive Computations ###
+dolphin_2_9_1_llama_3_70b = Model(
+ name = 'dolphin-2.9.1-llama-3-70b',
+ base_provider = 'Cognitive Computations',
+ best_provider = DeepInfraChat
+)
+
+
+### x.ai ###
+grok_2 = Model(
+ name = 'grok-2',
+ base_provider = 'x.ai',
+ best_provider = Liaobots
+)
+
+grok_2_mini = Model(
+ name = 'grok-2-mini',
+ base_provider = 'x.ai',
+ best_provider = Liaobots
+)
+
+
+### Perplexity AI ###
+sonar_online = Model(
+ name = 'sonar-online',
+ base_provider = 'Perplexity AI',
+ best_provider = IterListProvider([ChatHub, PerplexityLabs])
+)
+
+sonar_chat = Model(
+ name = 'sonar-chat',
+ base_provider = 'Perplexity AI',
+ best_provider = PerplexityLabs
+)
+
+
+### Gryphe ###
+mythomax_l2_13b = Model(
+ name = 'mythomax-l2-13b',
+ base_provider = 'Gryphe',
+ best_provider = Airforce
+)
+
+
+### Pawan ###
+cosmosrp = Model(
+ name = 'cosmosrp',
+ base_provider = 'Pawan',
+ best_provider = Airforce
+)
+
+
+### TheBloke ###
+german_7b = Model(
+ name = 'german-7b',
+ base_provider = 'TheBloke',
+ best_provider = Cloudflare
+)
+
+
+### Tinyllama ###
+tinyllama_1_1b = Model(
+ name = 'tinyllama-1.1b',
+ base_provider = 'Tinyllama',
+ best_provider = Cloudflare
+)
+
+
+### Fblgit ###
+cybertron_7b = Model(
+ name = 'cybertron-7b',
+ base_provider = 'Fblgit',
+ best_provider = Cloudflare
+)
+
+### Nvidia ###
+nemotron_70b = Model(
+ name = 'nemotron-70b',
+ base_provider = 'Nvidia',
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
)
@@ -425,83 +737,157 @@ deepseek = Model(
#############
### Stability AI ###
+sdxl_turbo = Model(
+ name = 'sdxl-turbo',
+ base_provider = 'Stability AI',
+ best_provider = NexraSDTurbo
+
+)
+
+sdxl_lora = Model(
+ name = 'sdxl-lora',
+ base_provider = 'Stability AI',
+ best_provider = NexraSDLora
+
+)
+
sdxl = Model(
name = 'sdxl',
base_provider = 'Stability AI',
- best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
+ best_provider = IterListProvider([ReplicateHome])
+
+)
+
+sd_1_5 = Model(
+ name = 'sd-1.5',
+ base_provider = 'Stability AI',
+ best_provider = IterListProvider([NexraSD15, GizAI])
)
sd_3 = Model(
name = 'sd-3',
base_provider = 'Stability AI',
- best_provider = IterListProvider([ReplicateHome])
+ best_provider = ReplicateHome
+
+)
+
+sd_3_5 = Model(
+ name = 'sd-3.5',
+ base_provider = 'Stability AI',
+ best_provider = GizAI
)
### Playground ###
playground_v2_5 = Model(
name = 'playground-v2.5',
- base_provider = 'Stability AI',
- best_provider = IterListProvider([ReplicateHome])
+ base_provider = 'Playground AI',
+ best_provider = ReplicateHome
)
+
### Flux AI ###
flux = Model(
name = 'flux',
base_provider = 'Flux AI',
- best_provider = IterListProvider([FluxAirforce])
+ best_provider = IterListProvider([Airforce, Blackbox])
+
+)
+
+flux_pro = Model(
+ name = 'flux-pro',
+ base_provider = 'Flux AI',
+ best_provider = IterListProvider([NexraFluxPro, AmigoChat])
)
flux_realism = Model(
name = 'flux-realism',
base_provider = 'Flux AI',
- best_provider = IterListProvider([FluxAirforce])
+ best_provider = IterListProvider([Airforce, AmigoChat])
)
flux_anime = Model(
name = 'flux-anime',
base_provider = 'Flux AI',
- best_provider = IterListProvider([FluxAirforce])
+ best_provider = Airforce
)
flux_3d = Model(
name = 'flux-3d',
base_provider = 'Flux AI',
- best_provider = IterListProvider([FluxAirforce])
+ best_provider = Airforce
)
flux_disney = Model(
name = 'flux-disney',
base_provider = 'Flux AI',
- best_provider = IterListProvider([FluxAirforce])
+ best_provider = Airforce
+
+)
+
+flux_pixel = Model(
+ name = 'flux-pixel',
+ base_provider = 'Flux AI',
+ best_provider = Airforce
+
+)
+
+flux_4o = Model(
+ name = 'flux-4o',
+ base_provider = 'Flux AI',
+ best_provider = Airforce
+
+)
+
+flux_schnell = Model(
+ name = 'flux-schnell',
+ base_provider = 'Flux AI',
+ best_provider = IterListProvider([ReplicateHome, GizAI])
+
+)
+
+
+### OpenAI ###
+dalle_2 = Model(
+ name = 'dalle-2',
+ base_provider = 'OpenAI',
+ best_provider = NexraDallE2
)
-### ###
dalle = Model(
name = 'dalle',
- base_provider = '',
- best_provider = IterListProvider([Nexra])
+ base_provider = 'OpenAI',
+ best_provider = NexraDallE
)
-dalle_mini = Model(
- name = 'dalle-mini',
- base_provider = '',
- best_provider = IterListProvider([Nexra])
+### Midjourney ###
+midjourney = Model(
+ name = 'midjourney',
+ base_provider = 'Midjourney',
+ best_provider = NexraMidjourney
)
+### Other ###
emi = Model(
name = 'emi',
base_provider = '',
- best_provider = IterListProvider([Nexra])
+ best_provider = NexraEmi
+
+)
+
+any_dark = Model(
+ name = 'any-dark',
+ base_provider = '',
+ best_provider = Airforce
)
@@ -526,15 +912,23 @@ class ModelUtils:
'gpt-3.5-turbo': gpt_35_turbo,
# gpt-4
-'gpt-4o' : gpt_4o,
-'gpt-4o-mini' : gpt_4o_mini,
-'gpt-4' : gpt_4,
-'gpt-4-turbo' : gpt_4_turbo,
-
+'gpt-4o': gpt_4o,
+'gpt-4o-mini': gpt_4o_mini,
+'gpt-4': gpt_4,
+'gpt-4-turbo': gpt_4_turbo,
+
+# o1
+'o1': o1,
+'o1-mini': o1_mini,
+
### Meta ###
"meta-ai": meta,
+# llama-2
+'llama-2-7b': llama_2_7b,
+'llama-2-13b': llama_2_13b,
+
# llama-3
'llama-3-8b': llama_3_8b,
'llama-3-70b': llama_3_70b,
@@ -543,20 +937,37 @@ class ModelUtils:
'llama-3.1-8b': llama_3_1_8b,
'llama-3.1-70b': llama_3_1_70b,
'llama-3.1-405b': llama_3_1_405b,
-
+
+# llama-3.2
+'llama-3.2-1b': llama_3_2_1b,
+'llama-3.2-3b': llama_3_2_3b,
+'llama-3.2-11b': llama_3_2_11b,
+'llama-3.2-90b': llama_3_2_90b,
+
+# llamaguard
+'llamaguard-7b': llamaguard_7b,
+'llamaguard-2-8b': llamaguard_2_8b,
+
### Mistral ###
-'mixtral-8x7b': mixtral_8x7b,
'mistral-7b': mistral_7b,
-
-
-### 01-ai ###
-'yi-1.5-34b': yi_1_5_34b,
+'mixtral-8x7b': mixtral_8x7b,
+'mixtral-8x22b': mixtral_8x22b,
+'mistral-nemo': mistral_nemo,
+'mistral-large': mistral_large,
+
+
+### NousResearch ###
+'mixtral-8x7b-dpo': mixtral_8x7b_dpo,
+'hermes-3': hermes_3,
+
+'yi-34b': yi_34b,
### Microsoft ###
-'phi-3-mini-4k': phi_3_mini_4k,
-
+'phi-2': phi_2,
+'phi_3_medium-4k': phi_3_medium_4k,
+'phi-3.5-mini': phi_3_5_mini,
### Google ###
# gemini
@@ -566,25 +977,34 @@ class ModelUtils:
# gemma
'gemma-2b': gemma_2b,
+'gemma-2b-9b': gemma_2b_9b,
+'gemma-2b-27b': gemma_2b_27b,
+'gemma-7b': gemma_7b,
+
+# gemma-2
+'gemma-2': gemma_2,
+'gemma-2-27b': gemma_2_27b,
### Anthropic ###
-'claude-2': claude_2,
-'claude-2.0': claude_2_0,
'claude-2.1': claude_2_1,
-
+
+# claude 3
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
-'claude-3-5-sonnet': claude_3_5_sonnet,
'claude-3-haiku': claude_3_haiku,
+
+# claude 3.5
+'claude-3.5-sonnet': claude_3_5_sonnet,
### Reka AI ###
'reka-core': reka_core,
-### Blackbox ###
-'blackbox': blackbox,
+### Blackbox AI ###
+'blackboxai': blackboxai,
+'blackboxai-pro': blackboxai_pro,
### CohereForAI ###
@@ -604,14 +1024,19 @@ class ModelUtils:
### Qwen ###
+'qwen': qwen,
+'qwen-1.5-0.5b': qwen_1_5_0_5b,
+'qwen-1.5-7b': qwen_1_5_7b,
'qwen-1.5-14b': qwen_1_5_14b,
-'qwen-turbo': qwen_turbo,
+'qwen-1.5-72b': qwen_1_5_72b,
+'qwen-1.5-110b': qwen_1_5_110b,
+'qwen-1.5-1.8b': qwen_1_5_1_8b,
+'qwen-2-72b': qwen_2_72b,
### Zhipu AI ###
'glm-3-6b': glm_3_6b,
'glm-4-9b': glm_4_9b,
-'glm-4': glm_4,
### 01-ai ###
@@ -619,30 +1044,80 @@ class ModelUtils:
### Upstage ###
-'solar-1-mini': solar_1_mini,
+'solar-mini': solar_1_mini,
+'solar-10-7b': solar_10_7b,
+'solar-pro': solar_pro,
-### Pi ###
+### Inflection ###
'pi': pi,
+### DeepSeek ###
+'deepseek': deepseek,
+
+
+### Yorickvp ###
+'llava-13b': llava_13b,
-### SambaNova ###
-'samba-coe-v0.1': samba_coe_v0_1,
+### WizardLM ###
+'wizardlm-2-7b': wizardlm_2_7b,
+'wizardlm-2-8x22b': wizardlm_2_8x22b,
+
+
+### OpenBMB ###
+'minicpm-llama-3-v2.5': minicpm_llama_3_v2_5,
+
+
+### Lzlv ###
+'lzlv-70b': lzlv_70b,
+
+
+### OpenChat ###
+'openchat-3.5': openchat_3_5,
+'openchat-3.6-8b': openchat_3_6_8b,
-### Trong-Hieu Nguyen-Mau ###
-'v1olet-merged-7b': v1olet_merged_7b,
+### Phind ###
+'phind-codellama-34b-v2': phind_codellama_34b_v2,
+
+
+### Cognitive Computations ###
+'dolphin-2.9.1-llama-3-70b': dolphin_2_9_1_llama_3_70b,
+
+
+### x.ai ###
+'grok-2': grok_2,
+'grok-2-mini': grok_2_mini,
+
+
+### Perplexity AI ###
+'sonar-online': sonar_online,
+'sonar-chat': sonar_chat,
-### Macadeliccc ###
-'westlake-7b-v2': westlake_7b_v2,
+### Gryphe ###
+'mythomax-l2-13b': sonar_chat,
-### CookinAI ###
-'donutlm-v1': donutlm_v1,
+
+### Pawan ###
+'cosmosrp': cosmosrp,
+
+
+### TheBloke ###
+'german-7b': german_7b,
-### DeepSeek ###
-'deepseek': deepseek,
+
+### Tinyllama ###
+'tinyllama-1.1b': tinyllama_1_1b,
+
+
+### Fblgit ###
+'cybertron-7b': cybertron_7b,
+
+
+### Nvidia ###
+'nemotron-70b': nemotron_70b,
@@ -652,7 +1127,11 @@ class ModelUtils:
### Stability AI ###
'sdxl': sdxl,
+'sdxl-lora': sdxl_lora,
+'sdxl-turbo': sdxl_turbo,
+'sd-1.5': sd_1_5,
'sd-3': sd_3,
+'sd-3.5': sd_3_5,
### Playground ###
@@ -661,16 +1140,27 @@ class ModelUtils:
### Flux AI ###
'flux': flux,
+'flux-pro': flux_pro,
'flux-realism': flux_realism,
'flux-anime': flux_anime,
'flux-3d': flux_3d,
'flux-disney': flux_disney,
+'flux-pixel': flux_pixel,
+'flux-4o': flux_4o,
+'flux-schnell': flux_schnell,
-### ###
+### OpenAI ###
'dalle': dalle,
-'dalle-mini': dalle_mini,
+'dalle-2': dalle_2,
+
+### Midjourney ###
+'midjourney': midjourney,
+
+
+### Other ###
'emi': emi,
+'any-dark': any_dark,
}
_all_models = list(ModelUtils.convert.keys())
diff --git a/g4f/providers/types.py b/g4f/providers/types.py
index 50c14431..69941a26 100644
--- a/g4f/providers/types.py
+++ b/g4f/providers/types.py
@@ -13,9 +13,8 @@ class BaseProvider(ABC):
working (bool): Indicates if the provider is currently working.
needs_auth (bool): Indicates if the provider needs authentication.
supports_stream (bool): Indicates if the provider supports streaming.
- supports_gpt_35_turbo (bool): Indicates if the provider supports GPT-3.5 Turbo.
- supports_gpt_4 (bool): Indicates if the provider supports GPT-4.
supports_message_history (bool): Indicates if the provider supports message history.
+ supports_system_message (bool): Indicates if the provider supports system messages.
params (str): List parameters for the provider.
"""
@@ -23,8 +22,6 @@ class BaseProvider(ABC):
working: bool = False
needs_auth: bool = False
supports_stream: bool = False
- supports_gpt_35_turbo: bool = False
- supports_gpt_4: bool = False
supports_message_history: bool = False
supports_system_message: bool = False
params: str
@@ -109,4 +106,4 @@ class Streaming():
self.data = data
def __str__(self) -> str:
- return self.data \ No newline at end of file
+ return self.data
diff --git a/g4f/version.py b/g4f/version.py
index eda2b8fe..403ce370 100644
--- a/g4f/version.py
+++ b/g4f/version.py
@@ -116,4 +116,4 @@ class VersionUtils:
except Exception as e:
print(f'Failed to check g4f version: {e}')
-utils = VersionUtils() \ No newline at end of file
+utils = VersionUtils()