summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2024-10-17 17:56:51 +0200
committerGitHub <noreply@github.com>2024-10-17 17:56:51 +0200
commit66a305998d47e724efaea696bf352428cfcd8291 (patch)
treee372492a190abfa1254e6b05afea8d154aa48225
parentMerge pull request #2275 from hansipie/setollamahost (diff)
parentUpdate (g4f/Provider/Blackbox.py) (diff)
downloadgpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar
gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.gz
gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.bz2
gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.lz
gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.xz
gpt4free-66a305998d47e724efaea696bf352428cfcd8291.tar.zst
gpt4free-66a305998d47e724efaea696bf352428cfcd8291.zip
-rw-r--r--README.md36
-rw-r--r--docs/async_client.md2
-rw-r--r--docs/client.md3
-rw-r--r--docs/interference.md4
-rw-r--r--g4f/Provider/Ai4Chat.py70
-rw-r--r--g4f/Provider/AiMathGPT.py78
-rw-r--r--g4f/Provider/Blackbox.py325
-rw-r--r--g4f/Provider/ChatifyAI.py4
-rw-r--r--g4f/Provider/Editee.py78
-rw-r--r--g4f/Provider/HuggingChat.py2
-rw-r--r--g4f/Provider/RubiksAI.py163
-rw-r--r--g4f/Provider/__init__.py4
-rw-r--r--g4f/api/__init__.py16
-rw-r--r--g4f/models.py37
14 files changed, 679 insertions, 143 deletions
diff --git a/README.md b/README.md
index 84cfdabf..83e65cf6 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@ Written by [@xtekky](https://github.com/xtekky)
<div id="top"></div>
> [!IMPORTANT]
-> By using this repository or any code related to it, you agree to the [legal notice](https://github.com/xtekky/gpt4free/blob/main/LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
+> By using this repository or any code related to it, you agree to the [legal notice](LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
> [!WARNING]
> _"gpt4free"_ serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control.
@@ -126,13 +126,13 @@ By following these steps, you should be able to successfully install and run the
Run the **Webview UI** on other Platfroms:
-- [/docs/guides/webview](https://github.com/xtekky/gpt4free/blob/main/docs/webview.md)
+- [/docs/guides/webview](docs/webview.md)
##### Use your smartphone:
Run the Web UI on Your Smartphone:
-- [/docs/guides/phone](https://github.com/xtekky/gpt4free/blob/main/docs/guides/phone.md)
+- [/docs/guides/phone](docs/guides/phone.md)
#### Use python
@@ -148,17 +148,17 @@ pip install -U g4f[all]
```
How do I install only parts or do disable parts?
-Use partial requirements: [/docs/requirements](https://github.com/xtekky/gpt4free/blob/main/docs/requirements.md)
+Use partial requirements: [/docs/requirements](docs/requirements.md)
##### Install from source:
How do I load the project using git and installing the project requirements?
-Read this tutorial and follow it step by step: [/docs/git](https://github.com/xtekky/gpt4free/blob/main/docs/git.md)
+Read this tutorial and follow it step by step: [/docs/git](docs/git.md)
##### Install using Docker:
How do I build and run composer image from source?
-Use docker-compose: [/docs/docker](https://github.com/xtekky/gpt4free/blob/main/docs/docker.md)
+Use docker-compose: [/docs/docker](docs/docker.md)
## 💡 Usage
@@ -171,7 +171,7 @@ client = Client()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello"}],
- ...
+ # Add any other necessary parameters
)
print(response.choices[0].message.content)
```
@@ -187,20 +187,22 @@ from g4f.client import Client
client = Client()
response = client.images.generate(
- model="gemini",
- prompt="a white siamese cat",
- ...
+ model="dall-e-3",
+ prompt="a white siamese cat",
+ # Add any other necessary parameters
)
+
image_url = response.data[0].url
+print(f"Generated image URL: {image_url}")
```
-[![Image with cat](/docs/cat.jpeg)](https://github.com/xtekky/gpt4free/blob/main/docs/client.md)
+[![Image with cat](/docs/cat.jpeg)](docs/client.md)
**Full Documentation for Python API**
-- New AsyncClient API from G4F: [/docs/async_client](https://github.com/xtekky/gpt4free/blob/main/docs/async_client.md)
-- Client API like the OpenAI Python library: [/docs/client](https://github.com/xtekky/gpt4free/blob/main/docs/client.md)
-- Legacy API with python modules: [/docs/legacy](https://github.com/xtekky/gpt4free/blob/main/docs/legacy.md)
+- AsyncClient API from G4F: [/docs/async_client](docs/async_client.md)
+- Client API like the OpenAI Python library: [/docs/client](docs/client.md)
+- Legacy API with python modules: [/docs/legacy](docs/legacy.md)
#### Web UI
@@ -221,7 +223,7 @@ python -m g4f.cli gui -port 8080 -debug
You can use the Interference API to serve other OpenAI integrations with G4F.
-See docs: [/docs/interference](https://github.com/xtekky/gpt4free/blob/main/docs/interference.md)
+See docs: [/docs/interference](docs/interference.md)
Access with: http://localhost:1337/v1
@@ -781,11 +783,11 @@ We welcome contributions from the community. Whether you're adding new providers
###### Guide: How do i create a new Provider?
-- Read: [/docs/guides/create_provider](https://github.com/xtekky/gpt4free/blob/main/docs/guides/create_provider.md)
+- Read: [/docs/guides/create_provider](docs/guides/create_provider.md)
###### Guide: How can AI help me with writing code?
-- Read: [/docs/guides/help_me](https://github.com/xtekky/gpt4free/blob/main/docs/guides/help_me.md)
+- Read: [/docs/guides/help_me](docs/guides/help_me.md)
## 🙌 Contributors
diff --git a/docs/async_client.md b/docs/async_client.md
index a3f773fa..f5ac5392 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -187,7 +187,7 @@ async def main():
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
)
- task2 = client.images.generate(
+ task2 = client.images.async_generate(
model="dall-e-3",
prompt="a white siamese cat",
)
diff --git a/docs/client.md b/docs/client.md
index 5e6b79ba..e95c510d 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -61,8 +61,8 @@ You can use the `ChatCompletions` endpoint to generate text completions as follo
```python
from g4f.client import Client
-client = Client()
+client = Client()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
@@ -77,7 +77,6 @@ Also streaming are supported:
from g4f.client import Client
client = Client()
-
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
diff --git a/docs/interference.md b/docs/interference.md
index b140f66a..1b4f0c11 100644
--- a/docs/interference.md
+++ b/docs/interference.md
@@ -54,7 +54,7 @@ Send the POST request to /v1/chat/completions with body containing the `model` m
import requests
url = "http://localhost:1337/v1/chat/completions"
body = {
- "model": "gpt-3.5-turbo-16k",
+ "model": "gpt-3.5-turbo",
"stream": False,
"messages": [
{"role": "assistant", "content": "What can you do?"}
@@ -66,4 +66,4 @@ for choice in json_response:
print(choice.get('message', {}).get('content', ''))
```
-[Return to Home](/) \ No newline at end of file
+[Return to Home](/)
diff --git a/g4f/Provider/Ai4Chat.py b/g4f/Provider/Ai4Chat.py
new file mode 100644
index 00000000..81633b7a
--- /dev/null
+++ b/g4f/Provider/Ai4Chat.py
@@ -0,0 +1,70 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import re
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.ai4chat.co"
+ api_endpoint = "https://www.ai4chat.co/generate-response"
+ working = True
+ supports_gpt_4 = False
+ supports_stream = False
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4'
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'cookie': 'messageCount=2',
+ 'origin': 'https://www.ai4chat.co',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://www.ai4chat.co/gpt/talkdirtytome',
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ payload = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ]
+ }
+
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ message = response_data.get('message', '')
+ clean_message = re.sub('<[^<]+?>', '', message).strip()
+ yield clean_message
diff --git a/g4f/Provider/AiMathGPT.py b/g4f/Provider/AiMathGPT.py
new file mode 100644
index 00000000..4399320a
--- /dev/null
+++ b/g4f/Provider/AiMathGPT.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class AiMathGPT(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aimathgpt.forit.ai"
+ api_endpoint = "https://aimathgpt.forit.ai/api/ai"
+ working = True
+ supports_stream = False
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama3'
+ models = ['llama3']
+
+ model_aliases = {"llama-3.1-70b": "llama3",}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f'{cls.url}/',
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {
+ "role": "system",
+ "content": ""
+ },
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ],
+ "model": model
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ filtered_response = response_data['result']['response']
+ yield filtered_response
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 250ffe48..317df1d4 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -1,19 +1,27 @@
from __future__ import annotations
-import re
+import asyncio
+import aiohttp
import random
import string
import json
-from aiohttp import ClientSession
+import uuid
+import re
+from typing import Optional, AsyncGenerator, Union
+
+from aiohttp import ClientSession, ClientResponseError
-from ..typing import AsyncResult, Messages, ImageType
-from ..image import ImageResponse, to_data_uri
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Blackbox AI"
url = "https://www.blackbox.ai"
api_endpoint = "https://www.blackbox.ai/api/chat"
working = True
+ supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
@@ -23,18 +31,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
models = [
default_model,
'blackboxai-pro',
-
+ *image_models,
"llama-3.1-8b",
'llama-3.1-70b',
'llama-3.1-405b',
-
'gpt-4o',
-
'gemini-pro',
'gemini-1.5-flash',
-
'claude-sonnet-3.5',
-
'PythonAgent',
'JavaAgent',
'JavaScriptAgent',
@@ -48,7 +52,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'ReactAgent',
'XcodeAgent',
'AngularJSAgent',
- *image_models,
]
agentMode = {
@@ -76,18 +79,17 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
}
-
+
userSelectedModel = {
"gpt-4o": "gpt-4o",
"gemini-pro": "gemini-pro",
'claude-sonnet-3.5': "claude-sonnet-3.5",
}
-
+
model_prefixes = {
'gpt-4o': '@GPT-4o',
'gemini-pro': '@Gemini-PRO',
'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
-
'PythonAgent': '@Python Agent',
'JavaAgent': '@Java Agent',
'JavaScriptAgent': '@JavaScript Agent',
@@ -104,14 +106,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'blackboxai-pro': '@BLACKBOXAI-PRO',
'ImageGeneration': '@Image Generation',
}
-
+
model_referers = {
- "blackboxai": f"{url}/?model=blackboxai",
- "gpt-4o": f"{url}/?model=gpt-4o",
- "gemini-pro": f"{url}/?model=gemini-pro",
- "claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5"
+ "blackboxai": "/?model=blackboxai",
+ "gpt-4o": "/?model=gpt-4o",
+ "gemini-pro": "/?model=gemini-pro",
+ "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
}
-
+
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-sonnet-3.5",
@@ -122,68 +124,131 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
- elif model in cls.userSelectedModel:
- return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
+ @staticmethod
+ def generate_random_string(length: int = 7) -> str:
+ characters = string.ascii_letters + string.digits
+ return ''.join(random.choices(characters, k=length))
+
+ @staticmethod
+ def generate_next_action() -> str:
+ return uuid.uuid4().hex
+
+ @staticmethod
+ def generate_next_router_state_tree() -> str:
+ router_state = [
+ "",
+ {
+ "children": [
+ "(chat)",
+ {
+ "children": [
+ "__PAGE__",
+ {}
+ ]
+ }
+ ]
+ },
+ None,
+ None,
+ True
+ ]
+ return json.dumps(router_state)
+
+ @staticmethod
+ def clean_response(text: str) -> str:
+ pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
+ cleaned_text = re.sub(pattern, '', text)
+ return cleaned_text
+
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- proxy: str = None,
- image: ImageType = None,
- image_name: str = None,
- webSearchMode: bool = False,
+ proxy: Optional[str] = None,
+ websearch: bool = False,
**kwargs
- ) -> AsyncResult:
+ ) -> AsyncGenerator[Union[str, ImageResponse], None]:
+ """
+ Creates an asynchronous generator for streaming responses from Blackbox AI.
+
+ Parameters:
+ model (str): Model to use for generating responses.
+ messages (Messages): Message history.
+ proxy (Optional[str]): Proxy URL, if needed.
+ websearch (bool): Enables or disables web search mode.
+ **kwargs: Additional keyword arguments.
+
+ Yields:
+ Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
+ """
model = cls.get_model(model)
-
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": cls.url,
- "pragma": "no-cache",
- "referer": cls.model_referers.get(model, cls.url),
- "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
- }
- if model in cls.model_prefixes:
- prefix = cls.model_prefixes[model]
- if not messages[0]['content'].startswith(prefix):
- messages[0]['content'] = f"{prefix} {messages[0]['content']}"
+ chat_id = cls.generate_random_string()
+ next_action = cls.generate_next_action()
+ next_router_state_tree = cls.generate_next_router_state_tree()
+
+ agent_mode = cls.agentMode.get(model, {})
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
+
+ prefix = cls.model_prefixes.get(model, "")
- random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
- messages[-1]['id'] = random_id
- messages[-1]['role'] = 'user'
-
- if image is not None:
- messages[-1]['data'] = {
- 'fileText': '',
- 'imageBase64': to_data_uri(image),
- 'title': image_name
- }
- messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
+ formatted_prompt = ""
+ for message in messages:
+ role = message.get('role', '').capitalize()
+ content = message.get('content', '')
+ if role and content:
+ formatted_prompt += f"{role}: {content}\n"
- data = {
- "messages": messages,
- "id": random_id,
+ if prefix:
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
+
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
+ referer_url = f"{cls.url}{referer_path}"
+
+ common_headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
+ 'Chrome/129.0.0.0 Safari/537.36'
+ }
+
+ headers_api_chat = {
+ 'Content-Type': 'application/json',
+ 'Referer': referer_url
+ }
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
+
+ payload_api_chat = {
+ "messages": [
+ {
+ "id": chat_id,
+ "content": formatted_prompt,
+ "role": "user"
+ }
+ ],
+ "id": chat_id,
"previewToken": None,
"userId": None,
"codeModelMode": True,
- "agentMode": {},
- "trendingAgentMode": {},
+ "agentMode": agent_mode,
+ "trendingAgentMode": trending_agent_mode,
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": 1024,
@@ -196,47 +261,99 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"clickedForceWebSearch": False,
"visitFromDelta": False,
"mobileClient": False,
- "userSelectedModel": None,
- "webSearchMode": webSearchMode,
+ "webSearchMode": websearch,
+ "userSelectedModel": cls.userSelectedModel.get(model, model)
}
- if model in cls.agentMode:
- data["agentMode"] = cls.agentMode[model]
- elif model in cls.trendingAgentMode:
- data["trendingAgentMode"] = cls.trendingAgentMode[model]
- elif model in cls.userSelectedModel:
- data["userSelectedModel"] = cls.userSelectedModel[model]
-
- async with ClientSession(headers=headers) as session:
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- if model == 'ImageGeneration':
- response_text = await response.text()
- url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
- if url_match:
- image_url = url_match.group(0)
- yield ImageResponse(image_url, alt=messages[-1]['content'])
- else:
- raise Exception("Image URL not found in the response")
- else:
- full_response = ""
- search_results_json = ""
- async for chunk in response.content.iter_any():
- if chunk:
- decoded_chunk = chunk.decode()
- decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
- if decoded_chunk.strip():
- if '$~~~$' in decoded_chunk:
- search_results_json += decoded_chunk
- else:
- full_response += decoded_chunk
- yield decoded_chunk
-
- if data["webSearchMode"] and search_results_json:
- match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL)
+ headers_chat = {
+ 'Accept': 'text/x-component',
+ 'Content-Type': 'text/plain;charset=UTF-8',
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
+ 'next-action': next_action,
+ 'next-router-state-tree': next_router_state_tree,
+ 'next-url': '/'
+ }
+ headers_chat_combined = {**common_headers, **headers_chat}
+
+ data_chat = '[]'
+
+ async with ClientSession(headers=common_headers) as session:
+ try:
+ async with session.post(
+ cls.api_endpoint,
+ headers=headers_api_chat_combined,
+ json=payload_api_chat,
+ proxy=proxy
+ ) as response_api_chat:
+ response_api_chat.raise_for_status()
+ text = await response_api_chat.text()
+ cleaned_response = cls.clean_response(text)
+
+ if model in cls.image_models:
+ match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
if match:
- search_results = json.loads(match.group(1))
- formatted_results = "\n\n**Sources:**\n"
- for i, result in enumerate(search_results[:5], 1):
- formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
- yield formatted_results
+ image_url = match.group(1)
+ image_response = ImageResponse(images=image_url, alt="Generated Image")
+ yield image_response
+ else:
+ yield cleaned_response
+ else:
+ if websearch:
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
+ if match:
+ source_part = match.group(1).strip()
+ answer_part = cleaned_response[match.end():].strip()
+ try:
+ sources = json.loads(source_part)
+ source_formatted = "**Source:**\n"
+ for item in sources:
+ title = item.get('title', 'No Title')
+ link = item.get('link', '#')
+ position = item.get('position', '')
+ source_formatted += f"{position}. [{title}]({link})\n"
+ final_response = f"{answer_part}\n\n{source_formatted}"
+ except json.JSONDecodeError:
+ final_response = f"{answer_part}\n\nSource information is unavailable."
+ else:
+ final_response = cleaned_response
+ else:
+ if '$~~~$' in cleaned_response:
+ final_response = cleaned_response.split('$~~~$')[0].strip()
+ else:
+ final_response = cleaned_response
+
+ yield final_response
+ except ClientResponseError as e:
+ error_text = f"Error {e.status}: {e.message}"
+ try:
+ error_response = await e.response.text()
+ cleaned_error = cls.clean_response(error_response)
+ error_text += f" - {cleaned_error}"
+ except Exception:
+ pass
+ yield error_text
+ except Exception as e:
+ yield f"Unexpected error during /api/chat request: {str(e)}"
+
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
+
+ try:
+ async with session.post(
+ chat_url,
+ headers=headers_chat_combined,
+ data=data_chat,
+ proxy=proxy
+ ) as response_chat:
+ response_chat.raise_for_status()
+ pass
+ except ClientResponseError as e:
+ error_text = f"Error {e.status}: {e.message}"
+ try:
+ error_response = await e.response.text()
+ cleaned_error = cls.clean_response(error_response)
+ error_text += f" - {cleaned_error}"
+ except Exception:
+ pass
+ yield error_text
+ except Exception as e:
+ yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
diff --git a/g4f/Provider/ChatifyAI.py b/g4f/Provider/ChatifyAI.py
index a999afac..7e43b065 100644
--- a/g4f/Provider/ChatifyAI.py
+++ b/g4f/Provider/ChatifyAI.py
@@ -65,19 +65,15 @@ class ChatifyAI(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
response_text = await response.text()
- # Фільтруємо та форматуємо відповідь
filtered_response = cls.filter_response(response_text)
yield filtered_response
@staticmethod
def filter_response(response_text: str) -> str:
- # Розділяємо рядок на частини
parts = response_text.split('"')
- # Вибираємо лише текстові частини (кожна друга частина)
text_parts = parts[1::2]
- # Об'єднуємо текстові частини
clean_text = ''.join(text_parts)
return clean_text
diff --git a/g4f/Provider/Editee.py b/g4f/Provider/Editee.py
new file mode 100644
index 00000000..6d297169
--- /dev/null
+++ b/g4f/Provider/Editee.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Editee(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Editee"
+ url = "https://editee.com"
+ api_endpoint = "https://editee.com/submit/chatgptfree"
+ working = True
+ supports_gpt_4 = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'claude'
+ models = ['claude', 'gpt4', 'gemini' 'mistrallarge']
+
+ model_aliases = {
+ "claude-3.5-sonnet": "claude",
+ "gpt-4o": "gpt4",
+ "gemini-pro": "gemini",
+ "mistral-large": "mistrallarge",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Cache-Control": "no-cache",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Pragma": "no-cache",
+ "Priority": "u=1, i",
+ "Referer": f"{cls.url}/chat-gpt",
+ "Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "Sec-CH-UA-Mobile": '?0',
+ "Sec-CH-UA-Platform": '"Linux"',
+ "Sec-Fetch-Dest": 'empty',
+ "Sec-Fetch-Mode": 'cors',
+ "Sec-Fetch-Site": 'same-origin',
+ "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
+ "X-Requested-With": 'XMLHttpRequest',
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "user_input": prompt,
+ "context": " ",
+ "template_id": "",
+ "selected_model": model
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ yield response_data['text']
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 30e97d7d..45f3a0d2 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -17,6 +17,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/Qwen2.5-72B-Instruct',
+ 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
'meta-llama/Llama-3.2-11B-Vision-Instruct',
'NousResearch/Hermes-3-Llama-3.1-8B',
'mistralai/Mistral-Nemo-Instruct-2407',
@@ -27,6 +28,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
+ "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
"mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py
new file mode 100644
index 00000000..184322c8
--- /dev/null
+++ b/g4f/Provider/RubiksAI.py
@@ -0,0 +1,163 @@
+from __future__ import annotations
+
+import asyncio
+import aiohttp
+import random
+import string
+import json
+from urllib.parse import urlencode
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Rubiks AI"
+ url = "https://rubiks.ai"
+ api_endpoint = "https://rubiks.ai/search/api.php"
+ working = True
+ supports_gpt_4 = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3.1-70b-versatile'
+ models = [default_model, 'gpt-4o-mini']
+
+ model_aliases = {
+ "llama-3.1-70b": "llama-3.1-70b-versatile",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @staticmethod
+ def generate_mid() -> str:
+ """
+ Generates a 'mid' string following the pattern:
+ 6 characters - 4 characters - 4 characters - 4 characters - 12 characters
+ Example: 0r7v7b-quw4-kdy3-rvdu-ekief6xbuuq4
+ """
+ parts = [
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
+ ''.join(random.choices(string.ascii_lowercase + string.digits, k=12))
+ ]
+ return '-'.join(parts)
+
+ @staticmethod
+ def create_referer(q: str, mid: str, model: str = '') -> str:
+ """
+ Creates a Referer URL with dynamic q and mid values, using urlencode for safe parameter encoding.
+ """
+ params = {'q': q, 'model': model, 'mid': mid}
+ encoded_params = urlencode(params)
+ return f'https://rubiks.ai/search/?{encoded_params}'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ websearch: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ """
+ Creates an asynchronous generator that sends requests to the Rubiks AI API and yields the response.
+
+ Parameters:
+ - model (str): The model to use in the request.
+ - messages (Messages): The messages to send as a prompt.
+ - proxy (str, optional): Proxy URL, if needed.
+ - websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False.
+ """
+ model = cls.get_model(model)
+ prompt = format_prompt(messages)
+ q_value = prompt
+ mid_value = cls.generate_mid()
+ referer = cls.create_referer(q=q_value, mid=mid_value, model=model)
+
+ url = cls.api_endpoint
+ params = {
+ 'q': q_value,
+ 'model': model,
+ 'id': '',
+ 'mid': mid_value
+ }
+
+ headers = {
+ 'Accept': 'text/event-stream',
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Pragma': 'no-cache',
+ 'Referer': referer,
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"'
+ }
+
+ try:
+ timeout = aiohttp.ClientTimeout(total=None)
+ async with ClientSession(timeout=timeout) as session:
+ async with session.get(url, headers=headers, params=params, proxy=proxy) as response:
+ if response.status != 200:
+ yield f"Request ended with status code {response.status}"
+ return
+
+ assistant_text = ''
+ sources = []
+
+ async for line in response.content:
+ decoded_line = line.decode('utf-8').strip()
+ if not decoded_line.startswith('data: '):
+ continue
+ data = decoded_line[6:]
+ if data in ('[DONE]', '{"done": ""}'):
+ break
+ try:
+ json_data = json.loads(data)
+ except json.JSONDecodeError:
+ continue
+
+ if 'url' in json_data and 'title' in json_data:
+ if websearch:
+ sources.append({'title': json_data['title'], 'url': json_data['url']})
+
+ elif 'choices' in json_data:
+ for choice in json_data['choices']:
+ delta = choice.get('delta', {})
+ content = delta.get('content', '')
+ role = delta.get('role', '')
+ if role == 'assistant':
+ continue
+ assistant_text += content
+
+ if websearch and sources:
+ sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)])
+ assistant_text += f"\n\n**Source:**\n{sources_text}"
+
+ yield assistant_text
+
+ except asyncio.CancelledError:
+ yield "The request was cancelled."
+ except aiohttp.ClientError as e:
+ yield f"An error occurred during the request: {e}"
+ except Exception as e:
+ yield f"An unexpected error occurred: {e}"
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 3d6539fc..c794dd0b 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -11,6 +11,7 @@ from .needs_auth import *
from .nexra import *
+from .Ai4Chat import Ai4Chat
from .AI365VIP import AI365VIP
from .AIChatFree import AIChatFree
from .AIUncensored import AIUncensored
@@ -18,6 +19,7 @@ from .Allyfy import Allyfy
from .AmigoChat import AmigoChat
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
+from .AiMathGPT import AiMathGPT
from .Airforce import Airforce
from .Aura import Aura
from .Bing import Bing
@@ -37,6 +39,7 @@ from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraChat import DeepInfraChat
from .DeepInfraImage import DeepInfraImage
+from .Editee import Editee
from .FlowGpt import FlowGpt
from .Free2GPT import Free2GPT
from .FreeChatgpt import FreeChatgpt
@@ -61,6 +64,7 @@ from .Prodia import Prodia
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
+from .RubiksAI import RubiksAI
from .TeachAnything import TeachAnything
from .Upstage import Upstage
from .WhiteRabbitNeo import WhiteRabbitNeo
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 2c723978..da35319a 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -17,7 +17,7 @@ from typing import Union, Optional
import g4f
import g4f.debug
-from g4f.client import AsyncClient
+from g4f.client import Client
from g4f.typing import Messages
from g4f.cookies import read_cookie_files
@@ -69,7 +69,7 @@ class AppConfig():
class Api:
def __init__(self, app: FastAPI) -> None:
self.app = app
- self.client = AsyncClient()
+ self.client = Client()
self.get_g4f_api_key = APIKeyHeader(name="g4f-api-key")
def register_authorization(self):
@@ -156,7 +156,8 @@ class Api:
auth_header = auth_header.split(None, 1)[-1]
if auth_header and auth_header != "Bearer":
config.api_key = auth_header
- response = self.client.chat.completions.create(
+ # Use the asynchronous create method and await it
+ response = await self.client.chat.completions.async_create(
**{
**AppConfig.defaults,
**config.dict(exclude_none=True),
@@ -164,7 +165,7 @@ class Api:
ignored=AppConfig.ignored_providers
)
if not config.stream:
- return JSONResponse((await response).to_json())
+ return JSONResponse(response.to_json())
async def streaming():
try:
@@ -196,10 +197,11 @@ class Api:
auth_header = auth_header.split(None, 1)[-1]
if auth_header and auth_header != "Bearer":
config.api_key = auth_header
- response = self.client.images.generate(
+ # Use the asynchronous generate method and await it
+ response = await self.client.images.async_generate(
**config.dict(exclude_none=True),
)
- return JSONResponse((await response).to_json())
+ return JSONResponse(response.to_json())
except Exception as e:
logging.exception(e)
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
@@ -232,4 +234,4 @@ def run_api(
use_colors=use_colors,
factory=True,
reload=debug
- ) \ No newline at end of file
+ )
diff --git a/g4f/models.py b/g4f/models.py
index f124cf86..e84f9103 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -5,7 +5,9 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
+ Ai4Chat,
AIChatFree,
+ AiMathGPT,
Airforce,
Allyfy,
AmigoChat,
@@ -23,6 +25,7 @@ from .Provider import (
DeepInfra,
DeepInfraChat,
DeepInfraImage,
+ Editee,
Free2GPT,
FreeChatgpt,
FreeGpt,
@@ -56,6 +59,7 @@ from .Provider import (
Reka,
Replicate,
ReplicateHome,
+ RubiksAI,
TeachAnything,
Upstage,
)
@@ -101,6 +105,9 @@ default = Model(
AmigoChat,
ChatifyAI,
Cloudflare,
+ Ai4Chat,
+ Editee,
+ AiMathGPT,
])
)
@@ -127,13 +134,13 @@ gpt_35_turbo = Model(
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
- best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Liaobots, Airforce, OpenaiChat])
+ best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, Liaobots, Airforce, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, Liaobots, Airforce, ChatgptFree, Koala, OpenaiChat, ChatGpt])
+ best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, RubiksAI, Liaobots, Airforce, ChatgptFree, Koala, OpenaiChat, ChatGpt])
)
gpt_4_turbo = Model(
@@ -145,7 +152,7 @@ gpt_4_turbo = Model(
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
- best_provider = IterListProvider([NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Airforce, Chatgpt4Online, Bing, OpenaiChat])
+ best_provider = IterListProvider([NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Ai4Chat, Airforce, Chatgpt4Online, Bing, OpenaiChat])
)
# o1
@@ -213,7 +220,7 @@ llama_3_1_8b = Model(
llama_3_1_70b = Model(
name = "llama-3.1-70b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, RubiksAI, HuggingFace, PerplexityLabs])
)
llama_3_1_405b = Model(
@@ -287,6 +294,12 @@ mistral_nemo = Model(
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
+mistral_large = Model(
+ name = "mistral-large",
+ base_provider = "Mistral",
+ best_provider = Editee
+)
+
### NousResearch ###
mixtral_8x7b_dpo = Model(
@@ -332,7 +345,7 @@ phi_3_5_mini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, AmigoChat, Liaobots, Airforce])
+ best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, AmigoChat, Editee, Liaobots, Airforce])
)
gemini_flash = Model(
@@ -416,7 +429,7 @@ claude_3_haiku = Model(
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Blackbox, Airforce, AmigoChat, Liaobots])
+ best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, Liaobots])
)
@@ -707,6 +720,13 @@ cybertron_7b = Model(
best_provider = Cloudflare
)
+### Nvidia ###
+nemotron_70b = Model(
+ name = 'nemotron-70b',
+ base_provider = 'Nvidia',
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
+)
+
#############
@@ -909,6 +929,7 @@ class ModelUtils:
'mixtral-8x7b': mixtral_8x7b,
'mixtral-8x22b': mixtral_8x22b,
'mistral-nemo': mistral_nemo,
+'mistral-large': mistral_large,
### NousResearch ###
@@ -1070,6 +1091,10 @@ class ModelUtils:
'cybertron-7b': cybertron_7b,
+### Nvidia ###
+'nemotron-70b': nemotron_70b,
+
+
#############
### Image ###