summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md38
-rw-r--r--docs/async_client.md9
-rw-r--r--docs/client.md4
-rw-r--r--docs/docker.md2
-rw-r--r--docs/git.md2
-rw-r--r--docs/interference-api.md6
-rw-r--r--docs/legacy/legacy.md (renamed from docs/legacy.md)0
-rw-r--r--docs/legacy/legacy_async_client.md380
-rw-r--r--docs/providers-and-models.md87
-rw-r--r--g4f/Provider/AIUncensored.py172
-rw-r--r--g4f/Provider/Ai4Chat.py2
-rw-r--r--g4f/Provider/AiMathGPT.py74
-rw-r--r--g4f/Provider/Airforce.py250
-rw-r--r--g4f/Provider/Allyfy.py91
-rw-r--r--g4f/Provider/Bing.py2
-rw-r--r--g4f/Provider/Blackbox.py439
-rw-r--r--g4f/Provider/ChatGot.py75
-rw-r--r--g4f/Provider/ChatGpt.py109
-rw-r--r--g4f/Provider/ChatGptEs.py2
-rw-r--r--g4f/Provider/ChatHub.py84
-rw-r--r--g4f/Provider/ChatifyAI.py79
-rw-r--r--g4f/Provider/Cloudflare.py104
-rw-r--r--g4f/Provider/DarkAI.py16
-rw-r--r--g4f/Provider/DeepInfraChat.py57
-rw-r--r--g4f/Provider/Editee.py77
-rw-r--r--g4f/Provider/Free2GPT.py8
-rw-r--r--g4f/Provider/FreeChatgpt.py96
-rw-r--r--g4f/Provider/FreeGpt.py2
-rw-r--r--g4f/Provider/GizAI.py121
-rw-r--r--g4f/Provider/HuggingChat.py44
-rw-r--r--g4f/Provider/Liaobots.py34
-rw-r--r--g4f/Provider/PerplexityLabs.py2
-rw-r--r--g4f/Provider/Prodia.py18
-rw-r--r--g4f/Provider/ReplicateHome.py32
-rw-r--r--g4f/Provider/TeachAnything.py22
-rw-r--r--g4f/Provider/Upstage.py30
-rw-r--r--g4f/Provider/__init__.py36
-rw-r--r--g4f/Provider/airforce/AirforceChat.py172
-rw-r--r--g4f/Provider/airforce/AirforceImage.py83
-rw-r--r--g4f/Provider/airforce/__init__.py2
-rw-r--r--g4f/Provider/deprecated/__init__.py3
-rw-r--r--g4f/Provider/local/Local.py (renamed from g4f/Provider/Local.py)12
-rw-r--r--g4f/Provider/local/Ollama.py (renamed from g4f/Provider/Ollama.py)8
-rw-r--r--g4f/Provider/local/__init__.py2
-rw-r--r--g4f/Provider/needs_auth/BingCreateImages.py (renamed from g4f/Provider/BingCreateImages.py)12
-rw-r--r--g4f/Provider/needs_auth/DeepInfra.py (renamed from g4f/Provider/DeepInfra.py)8
-rw-r--r--g4f/Provider/needs_auth/DeepInfraImage.py (renamed from g4f/Provider/DeepInfraImage.py)8
-rw-r--r--g4f/Provider/needs_auth/GeminiPro.py (renamed from g4f/Provider/GeminiPro.py)10
-rw-r--r--g4f/Provider/needs_auth/Groq.py4
-rw-r--r--g4f/Provider/needs_auth/HuggingFace.py (renamed from g4f/Provider/HuggingFace.py)12
-rw-r--r--g4f/Provider/needs_auth/MetaAI.py (renamed from g4f/Provider/MetaAI.py)12
-rw-r--r--g4f/Provider/needs_auth/MetaAIAccount.py (renamed from g4f/Provider/MetaAIAccount.py)8
-rw-r--r--g4f/Provider/needs_auth/OpenRouter.py32
-rw-r--r--g4f/Provider/needs_auth/OpenaiAPI.py (renamed from g4f/Provider/needs_auth/Openai.py)2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py1
-rw-r--r--g4f/Provider/needs_auth/PerplexityApi.py4
-rw-r--r--g4f/Provider/needs_auth/Replicate.py (renamed from g4f/Provider/Replicate.py)14
-rw-r--r--g4f/Provider/needs_auth/ThebApi.py6
-rw-r--r--g4f/Provider/needs_auth/WhiteRabbitNeo.py (renamed from g4f/Provider/WhiteRabbitNeo.py)10
-rw-r--r--g4f/Provider/needs_auth/__init__.py33
-rw-r--r--g4f/Provider/needs_auth/gigachat/GigaChat.py (renamed from g4f/Provider/gigachat/GigaChat.py)8
-rw-r--r--g4f/Provider/needs_auth/gigachat/__init__.py (renamed from g4f/Provider/gigachat/__init__.py)0
-rw-r--r--g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt (renamed from g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt)0
-rw-r--r--g4f/Provider/nexra/NexraBing.py93
-rw-r--r--g4f/Provider/nexra/NexraBlackbox.py100
-rw-r--r--g4f/Provider/nexra/NexraChatGPT.py285
-rw-r--r--g4f/Provider/nexra/NexraDallE.py63
-rw-r--r--g4f/Provider/nexra/NexraDallE2.py63
-rw-r--r--g4f/Provider/nexra/NexraEmi.py63
-rw-r--r--g4f/Provider/nexra/NexraFluxPro.py70
-rw-r--r--g4f/Provider/nexra/NexraGeminiPro.py86
-rw-r--r--g4f/Provider/nexra/NexraMidjourney.py63
-rw-r--r--g4f/Provider/nexra/NexraProdiaAI.py151
-rw-r--r--g4f/Provider/nexra/NexraQwen.py86
-rw-r--r--g4f/Provider/nexra/NexraSD15.py72
-rw-r--r--g4f/Provider/nexra/NexraSDLora.py69
-rw-r--r--g4f/Provider/nexra/NexraSDTurbo.py69
-rw-r--r--g4f/Provider/nexra/__init__.py14
-rw-r--r--g4f/Provider/not_working/AI365VIP.py (renamed from g4f/Provider/AI365VIP.py)6
-rw-r--r--g4f/Provider/not_working/AIChatFree.py (renamed from g4f/Provider/AIChatFree.py)12
-rw-r--r--g4f/Provider/not_working/AiChatOnline.py (renamed from g4f/Provider/AiChatOnline.py)8
-rw-r--r--g4f/Provider/not_working/AiChats.py (renamed from g4f/Provider/AiChats.py)10
-rw-r--r--g4f/Provider/not_working/AmigoChat.py (renamed from g4f/Provider/AmigoChat.py)10
-rw-r--r--g4f/Provider/not_working/Aura.py (renamed from g4f/Provider/Aura.py)8
-rw-r--r--g4f/Provider/not_working/Chatgpt4Online.py (renamed from g4f/Provider/Chatgpt4Online.py)8
-rw-r--r--g4f/Provider/not_working/Chatgpt4o.py (renamed from g4f/Provider/Chatgpt4o.py)10
-rw-r--r--g4f/Provider/not_working/ChatgptFree.py (renamed from g4f/Provider/ChatgptFree.py)11
-rw-r--r--g4f/Provider/not_working/FlowGpt.py (renamed from g4f/Provider/FlowGpt.py)8
-rw-r--r--g4f/Provider/not_working/FreeNetfly.py (renamed from g4f/Provider/FreeNetfly.py)6
-rw-r--r--g4f/Provider/not_working/GPROChat.py (renamed from g4f/Provider/GPROChat.py)8
-rw-r--r--g4f/Provider/not_working/Koala.py (renamed from g4f/Provider/Koala.py)10
-rw-r--r--g4f/Provider/not_working/MyShell.py (renamed from g4f/Provider/selenium/MyShell.py)0
-rw-r--r--g4f/Provider/not_working/__init__.py14
-rw-r--r--g4f/Provider/selenium/__init__.py1
-rw-r--r--g4f/api/__init__.py8
-rw-r--r--g4f/client/client.py25
-rw-r--r--g4f/gui/client/index.html10
-rw-r--r--g4f/models.py616
98 files changed, 1611 insertions, 3597 deletions
diff --git a/README.md b/README.md
index 2de3d318..0c5f488b 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,5 @@
+
![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9)
<a href="https://trendshift.io/repositories/1692" target="_blank"><img src="https://trendshift.io/api/badge/repositories/1692" alt="xtekky%2Fgpt4free | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
@@ -28,7 +29,7 @@ docker pull hlohaus789/g4f
```
## πŸ†• What's New
- - **For comprehensive details on new features and updates, please refer to our [Releases](https://github.com/xtekky/gpt4free/releases) page**
+ - **For comprehensive details on new features and updates, please refer to our** [Releases](https://github.com/xtekky/gpt4free/releases) **page**
- **Installation Guide for Windows (.exe):** πŸ’» [Installation Guide for Windows (.exe)](#installation-guide-for-windows-exe)
- **Join our Telegram Channel:** πŸ“¨ [telegram.me/g4f_channel](https://telegram.me/g4f_channel)
- **Join our Discord Group:** πŸ’¬ [discord.gg/XfybzPXPH5](https://discord.gg/5E39JUWUFa)
@@ -70,6 +71,13 @@ Is your site on this repository and you want to take it down? Send an email to t
- [Interference API](#interference-api)
- [Local Inference](docs/local.md)
- [Configuration](#configuration)
+ - [Full Documentation for Python API](#full-documentation-for-python-api)
+ - **New:**
+ - [Async Client API from G4F](docs/async_client.md)
+ - [Client API like the OpenAI Python library](docs/client.md)
+ - **Legacy**
+ - [Legacy API with python modules](docs/legacy/legacy.md)
+ - [Legacy AsyncClient API from G4F](docs/legacy/legacy_async_client.md)
- [πŸš€ Providers and Models](docs/providers-and-models.md)
- [πŸ”— Powered by gpt4free](#-powered-by-gpt4free)
- [🀝 Contribute](#-contribute)
@@ -166,7 +174,7 @@ from g4f.client import Client
client = Client()
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[{"role": "user", "content": "Hello"}],
# Add any other necessary parameters
)
@@ -183,7 +191,7 @@ from g4f.client import Client
client = Client()
response = client.images.generate(
- model="dall-e-3",
+ model="flux",
prompt="a white siamese cat",
# Add any other necessary parameters
)
@@ -194,10 +202,14 @@ print(f"Generated image URL: {image_url}")
[![Image with cat](/docs/cat.jpeg)](docs/client.md)
-**Full Documentation for Python API**
- - **Async Client API from G4F:** [/docs/async_client](docs/async_client.md)
- - **Client API like the OpenAI Python library:** [/docs/client](docs/client.md)
- - **Legacy API with python modules:** [/docs/legacy](docs/legacy.md)
+#### **Full Documentation for Python API**
+ - **New:**
+ - **Async Client API from G4F:** [/docs/async_client](docs/async_client.md)
+ - **Client API like the OpenAI Python library:** [/docs/client](docs/client.md)
+
+ - **Legacy:**
+ - **Legacy API with python modules:** [/docs/legacy/legacy](docs/legacy/legacy.md)
+ - **Legacy AsyncClient API from G4F:** [/docs/async_client](docs/legacy/legacy_async_client.md)
#### Web UI
**To start the web interface, type the following codes in python:**
@@ -290,20 +302,18 @@ To utilize the OpenaiChat provider, a .har file is required from https://chatgpt
- Place the exported .har file in the `./har_and_cookies` directory if you are using Docker. Alternatively, if you are using Python from a terminal, you can store it in a `./har_and_cookies` directory within your current working directory.
-Note: Ensure that your .har file is stored securely, as it may contain sensitive information.
+> **Note:** Ensure that your .har file is stored securely, as it may contain sensitive information.
#### Using Proxy
If you want to hide or change your IP address for the providers, you can set a proxy globally via an environment variable:
-- On macOS and Linux:
-
+**- On macOS and Linux:**
```bash
export G4F_PROXY="http://host:port"
```
-- On Windows:
-
+**- On Windows:**
```bash
set G4F_PROXY=http://host:port
```
@@ -770,10 +780,10 @@ set G4F_PROXY=http://host:port
We welcome contributions from the community. Whether you're adding new providers or features, or simply fixing typos and making small improvements, your input is valued. Creating a pull request is all it takes – our co-pilot will handle the code review process. Once all changes have been addressed, we'll merge the pull request into the main branch and release the updates at a later time.
###### Guide: How do i create a new Provider?
- - Read: [Create Provider Guide](docs/guides/create_provider.md)
+ - **Read:** [Create Provider Guide](docs/guides/create_provider.md)
###### Guide: How can AI help me with writing code?
- - Read: [AI Assistance Guide](docs/guides/help_me.md)
+ - **Read:** [AI Assistance Guide](docs/guides/help_me.md)
## πŸ™Œ Contributors
A list of all contributors is available [here](https://github.com/xtekky/gpt4free/graphs/contributors)
diff --git a/docs/async_client.md b/docs/async_client.md
index 0719a463..7194c792 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -57,7 +57,7 @@ client = Client(
**Here’s an improved example of creating chat completions:**
```python
response = await async_client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -99,7 +99,7 @@ async def main():
client = Client()
response = await client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -230,7 +230,7 @@ async def main():
client = Client()
task1 = client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -262,6 +262,7 @@ The G4F AsyncClient supports a wide range of AI models and providers, allowing y
### Models
- GPT-3.5-Turbo
+ - GPT-4o-Mini
- GPT-4
- DALL-E 3
- Gemini
@@ -306,7 +307,7 @@ Implementing proper error handling and following best practices is crucial when
```python
try:
response = await client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/client.md b/docs/client.md
index 388b2e4b..da45d7fd 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -62,7 +62,7 @@ client = Client(
**Here’s an improved example of creating chat completions:**
```python
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -104,7 +104,7 @@ from g4f.client import Client
client = Client()
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/docker.md b/docs/docker.md
index e1caaf3d..8017715c 100644
--- a/docs/docker.md
+++ b/docs/docker.md
@@ -71,7 +71,7 @@ import requests
url = "http://localhost:1337/v1/chat/completions"
body = {
- "model": "gpt-3.5-turbo",
+ "model": "gpt-4o-mini",
"stream": False,
"messages": [
{"role": "assistant", "content": "What can you do?"}
diff --git a/docs/git.md b/docs/git.md
index 33a0ff42..ff6c8091 100644
--- a/docs/git.md
+++ b/docs/git.md
@@ -95,7 +95,7 @@ from g4f.client import Client
client = Client()
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/interference-api.md b/docs/interference-api.md
index b8e38fae..a6999345 100644
--- a/docs/interference-api.md
+++ b/docs/interference-api.md
@@ -68,7 +68,7 @@ curl -X POST "http://localhost:1337/v1/chat/completions" \
"content": "Hello"
}
],
- "model": "gpt-3.5-turbo"
+ "model": "gpt-4o-mini"
}'
```
@@ -108,7 +108,7 @@ client = OpenAI(
)
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[{"role": "user", "content": "Write a poem about a tree"}],
stream=True,
)
@@ -135,7 +135,7 @@ import requests
url = "http://localhost:1337/v1/chat/completions"
body = {
- "model": "gpt-3.5-turbo",
+ "model": "gpt-4o-mini",
"stream": False,
"messages": [
{"role": "assistant", "content": "What can you do?"}
diff --git a/docs/legacy.md b/docs/legacy/legacy.md
index d5cd5a36..d5cd5a36 100644
--- a/docs/legacy.md
+++ b/docs/legacy/legacy.md
diff --git a/docs/legacy/legacy_async_client.md b/docs/legacy/legacy_async_client.md
new file mode 100644
index 00000000..5ddc2671
--- /dev/null
+++ b/docs/legacy/legacy_async_client.md
@@ -0,0 +1,380 @@
+# G4F - Legacy AsyncClient API Guide
+
+**IMPORTANT: This guide refers to the old implementation of AsyncClient. The new version of G4F now supports both synchronous and asynchronous operations through a unified interface. Please refer to the [new AsyncClient documentation](https://github.com/xtekky/gpt4free/blob/main/docs/async_client.md) for the latest information.**
+
+This guide provides comprehensive information on how to use the G4F AsyncClient API, including setup, usage examples, best practices, and important considerations for optimal performance.
+
+## Compatibility Note
+The G4F AsyncClient API is designed to be compatible with the OpenAI API, making it easy for developers familiar with OpenAI's interface to transition to G4F. However, please note that this is the old version, and you should migrate to the new implementation for better support and features.
+
+## Table of Contents
+ - [Introduction](#introduction)
+ - [Key Features](#key-features)
+ - [Getting Started](#getting-started)
+ - [Initializing the Client](#initializing-the-client)
+ - [Creating Chat Completions](#creating-chat-completions)
+ - [Configuration](#configuration)
+ - [Usage Examples](#usage-examples)
+ - [Text Completions](#text-completions)
+ - [Streaming Completions](#streaming-completions)
+ - [Using a Vision Model](#using-a-vision-model)
+ - [Image Generation](#image-generation)
+ - [Concurrent Tasks](#concurrent-tasks-with-asynciogather)
+ - [Available Models and Providers](#available-models-and-providers)
+ - [Error Handling and Best Practices](#error-handling-and-best-practices)
+ - [Rate Limiting and API Usage](#rate-limiting-and-api-usage)
+ - [Conclusion](#conclusion)
+
+## Introduction
+This is the old version: The G4F AsyncClient API is an asynchronous version of the standard G4F Client API. It offers the same functionality as the synchronous API but with improved performance due to its asynchronous nature. This guide will walk you through the key features and usage of the G4F AsyncClient API.
+
+## Key Features
+ - **Custom Providers**: Use custom providers for enhanced flexibility.
+ - **ChatCompletion Interface**: Interact with chat models through the ChatCompletion class.
+ - **Streaming Responses**: Get responses iteratively as they are received.
+ - **Non-Streaming Responses**: Generate complete responses in a single call.
+ - **Image Generation and Vision Models**: Support for image-related tasks.
+
+## Getting Started
+**To ignore DeprecationWarnings related to the AsyncClient, you can use the following code:***
+```python
+import warnings
+
+# Ignore DeprecationWarning for AsyncClient
+warnings.filterwarnings("ignore", category=DeprecationWarning, module="g4f.client")
+```
+
+### Initializing the Client
+**To use the G4F `Client`, create a new instance:**
+```python
+from g4f.client import AsyncClient
+from g4f.Provider import OpenaiChat, Gemini
+
+client = AsyncClient(
+ provider=OpenaiChat,
+ image_provider=Gemini,
+ # Add other parameters as needed
+)
+```
+
+## Creating Chat Completions
+**Here's an improved example of creating chat completions:**
+```python
+response = await async_client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": "Say this is a test"
+ }
+ ]
+ # Add other parameters as needed
+)
+```
+
+**This example:**
+ - Asks a specific question `Say this is a test`
+ - Configures various parameters like temperature and max_tokens for more control over the output
+ - Disables streaming for a complete response
+
+You can adjust these parameters based on your specific needs.
+
+### Configuration
+**Configure the `AsyncClient` with additional settings:**
+```python
+client = Client(
+ api_key="your_api_key_here",
+ proxies="http://user:pass@host",
+ # Add other parameters as needed
+)
+```
+
+## Usage Examples
+### Text Completions
+**Generate text completions using the ChatCompletions endpoint:**
+```python
+import asyncio
+import warnings
+from g4f.client import AsyncClient
+
+# Π†Π³Π½ΠΎΡ€ΡƒΡ”ΠΌΠΎ DeprecationWarning
+warnings.filterwarnings("ignore", category=DeprecationWarning)
+
+async def main():
+ client = AsyncClient()
+
+ response = await client.chat.completions.async_create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": "Say this is a test"
+ }
+ ]
+ )
+
+ print(response.choices[0].message.content)
+
+asyncio.run(main())
+```
+
+### Streaming Completions
+**Process responses incrementally as they are generated:**
+```python
+import asyncio
+from g4f.client import AsyncClient
+
+async def main():
+ client = AsyncClient()
+
+ stream = await client.chat.completions.async_create(
+ model="gpt-4",
+ messages=[
+ {
+ "role": "user",
+ "content": "Say this is a test"
+ }
+ ],
+ stream=True,
+ )
+
+ async for chunk in stream:
+ if chunk.choices[0].delta.content:
+ print(chunk.choices[0].delta.content, end="")
+
+asyncio.run(main())
+```
+
+### Using a Vision Model
+**Analyze an image and generate a description:**
+```python
+import g4f
+import requests
+import asyncio
+from g4f.client import AsyncClient
+
+async def main():
+ client = AsyncClient()
+
+ image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
+
+ response = await client.chat.completions.async_create(
+ model=g4f.models.default,
+ provider=g4f.Provider.Bing,
+ messages=[
+ {
+ "role": "user",
+ "content": "What's in this image?"
+ }
+ ],
+ image=image
+ )
+
+ print(response.choices[0].message.content)
+
+asyncio.run(main())
+```
+
+### Image Generation
+**Generate images using a specified prompt:**
+```python
+import asyncio
+from g4f.client import AsyncClient
+
+async def main():
+ client = AsyncClient()
+
+ response = await client.images.async_generate(
+ prompt="a white siamese cat",
+ model="flux"
+ )
+
+ image_url = response.data[0].url
+ print(f"Generated image URL: {image_url}")
+
+asyncio.run(main())
+```
+
+#### Base64 Response Format
+```python
+import asyncio
+from g4f.client import AsyncClient
+
+async def main():
+ client = AsyncClient()
+
+ response = await client.images.async_generate(
+ prompt="a white siamese cat",
+ model="flux",
+ response_format="b64_json"
+ )
+
+ base64_text = response.data[0].b64_json
+ print(base64_text)
+
+asyncio.run(main())
+```
+
+### Concurrent Tasks with asyncio.gather
+**Execute multiple tasks concurrently:**
+```python
+import asyncio
+import warnings
+from g4f.client import AsyncClient
+
+# Ignore DeprecationWarning for AsyncClient
+warnings.filterwarnings("ignore", category=DeprecationWarning, module="g4f.client")
+
+async def main():
+ client = AsyncClient()
+
+ task1 = client.chat.completions.async_create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": "Say this is a test"
+ }
+ ]
+ )
+
+ task2 = client.images.async_generate(
+ model="flux",
+ prompt="a white siamese cat"
+ )
+
+ chat_response, image_response = await asyncio.gather(task1, task2)
+
+ print("Chat Response:")
+ print(chat_response.choices[0].message.content)
+
+ print("Image Response:")
+ print(image_response.data[0].url)
+
+asyncio.run(main())
+```
+
+## Available Models and Providers
+This is the old version: The G4F AsyncClient supports a wide range of AI models and providers, allowing you to choose the best option for your specific use case.
+**Here's a brief overview of the available models and providers:**
+
+### Models
+ - GPT-3.5-Turbo
+ - GPT-4
+ - DALL-E 3
+ - Gemini
+ - Claude (Anthropic)
+ - And more...
+
+### Providers
+ - OpenAI
+ - Google (for Gemini)
+ - Anthropic
+ - Bing
+ - Custom providers
+
+**To use a specific model or provider, specify it when creating the client or in the API call:**
+```python
+client = AsyncClient(provider=g4f.Provider.OpenaiChat)
+
+# or
+
+response = await client.chat.completions.async_create(
+ model="gpt-4",
+ provider=g4f.Provider.Bing,
+ messages=[
+ {
+ "role": "user",
+ "content": "Hello, world!"
+ }
+ ]
+)
+```
+
+## Error Handling and Best Practices
+Implementing proper error handling and following best practices is crucial when working with the G4F AsyncClient API. This ensures your application remains robust and can gracefully handle various scenarios. **Here are some key practices to follow:**
+
+1. **Use try-except blocks to catch and handle exceptions:**
+```python
+try:
+ response = await client.chat.completions.async_create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": "Hello, world!"
+ }
+ ]
+ )
+except Exception as e:
+ print(f"An error occurred: {e}")
+```
+
+2. **Check the response status and handle different scenarios:**
+```python
+if response.choices:
+ print(response.choices[0].message.content)
+else:
+ print("No response generated")
+```
+
+3. **Implement retries for transient errors:**
+```python
+import asyncio
+from tenacity import retry, stop_after_attempt, wait_exponential
+
+@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
+async def make_api_call():
+ # Your API call here
+ pass
+```
+
+## Rate Limiting and API Usage
+This is the old version: When working with the G4F AsyncClient API, it's important to implement rate limiting and monitor your API usage. This helps ensure fair usage, prevents overloading the service, and optimizes your application's performance. **Here are some key strategies to consider:**
+
+1. **Implement rate limiting in your application:**
+```python
+import asyncio
+from aiolimiter import AsyncLimiter
+
+rate_limit = AsyncLimiter(max_rate=10, time_period=1) # 10 requests per second
+
+async def make_api_call():
+ async with rate_limit:
+ # Your API call here
+ pass
+```
+
+2. **Monitor your API usage and implement logging:**
+```python
+import logging
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+async def make_api_call():
+ try:
+ response = await client.chat.completions.async_create(...)
+ logger.info(f"API call successful. Tokens used: {response.usage.total_tokens}")
+ except Exception as e:
+ logger.error(f"API call failed: {e}")
+```
+
+3. **Use caching to reduce API calls for repeated queries:**
+```python
+from functools import lru_cache
+
+@lru_cache(maxsize=100)
+def get_cached_response(query):
+ # Your API call here
+ pass
+```
+
+## Conclusion
+This is the old version: The G4F AsyncClient API provides a powerful and flexible way to interact with various AI models asynchronously. By leveraging its features and following best practices, you can build efficient and responsive applications that harness the power of AI for text generation, image analysis, and image creation.
+
+Remember to handle errors gracefully, implement rate limiting, and monitor your API usage to ensure optimal performance and reliability in your applications.
+
+---
+
+[Return to Home](/)
diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md
index b3dbd9f1..7c6bc613 100644
--- a/docs/providers-and-models.md
+++ b/docs/providers-and-models.md
@@ -1,4 +1,5 @@
+
# G4F - Providers and Models
This document provides an overview of various AI providers and models, including text generation, image generation, and vision capabilities. It aims to help users navigate the diverse landscape of AI services and choose the most suitable option for their needs.
@@ -9,6 +10,7 @@ This document provides an overview of various AI providers and models, including
- [Text Models](#text-models)
- [Image Models](#image-models)
- [Vision Models](#vision-models)
+ - [Providers and vision models](#providers-and-vision-models)
- [Conclusion and Usage Tips](#conclusion-and-usage-tips)
---
@@ -16,67 +18,34 @@ This document provides an overview of various AI providers and models, including
| Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth |
|----------|-------------|--------------|---------------|--------|--------|------|
|[ai4chat.co](https://www.ai4chat.co)|`g4f.Provider.Ai4Chat`|`gpt-4`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[chat.ai365vip.com](https://chat.ai365vip.com)|`g4f.Provider.AI365VIP`|`gpt-3.5-turbo, gpt-4o`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[aichatonline.org](https://aichatonline.org)|`g4f.Provider.AiChatOnline`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
-|[ai-chats.org](https://ai-chats.org)|`g4f.Provider.AiChats`|`gpt-4`|`dalle`|❌|?|![Captcha](https://img.shields.io/badge/Captcha-f48d37)|❌|
-|[api.airforce](https://api.airforce)|`g4f.Provider.AiMathGPT`|`llama-3.1-70b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`gpt-4, gpt-4-turbo, gpt-4o-mini, gpt-3.5-turbo, gpt-4o, claude-3-haiku, claude-3-sonnet, claude-3-5-sonnet, claude-3-opus, llama-3-70b, llama-3-8b, llama-2-13b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llamaguard-2-8b, llamaguard-7b, llama-3.2-90b, mixtral-8x7b mixtral-8x22b, mistral-7b, qwen-1.5-7b, qwen-1.5-14b, qwen-1.5-72b, qwen-1.5-110b, qwen-2-72b, gemma-2b, gemma-2-9b, gemma-2-27b, gemini-flash, gemini-pro, deepseek, mixtral-8x7b-dpo, yi-34b, wizardlm-2-8x22b, solar-10.7b, mythomax-l2-13b, cosmosrp`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, dalle-3`|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`gpt-4o, gpt-4o-mini, gpt-4-turbo, llama-2-7b, llama-3.1-8b, llama-3.1-70b, hermes-2-pro, hermes-2-dpo, phi-2, deepseek-coder, openchat-3.5, openhermes-2.5, lfm-40b, german-7b, zephyr-7b, neural-7b`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, sdxl`|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.AIUncensored`|βœ”|βœ”|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[amigochat.io/chat](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|`gpt-4o, gpt-4o-mini, o1, o1-mini, claude-3.5-sonnet, llama-3.2-90b, llama-3.1-405b, gemini-pro`|`flux-pro, flux-realism, dalle-3`|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[openchat.team](https://openchat.team/)|`g4f.Provider.Aura`|βœ”|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[bing.com](https://bing.com/chat)|`g4f.Provider.Bing`|`gpt-4`|βœ”|`gpt-4-vision`|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌+βœ”|
|[bing.com/images](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|`❌|βœ”|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|βœ”|
-|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, blackboxai-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|βœ”|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, blackboxai-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|`blackboxai, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro`|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgot.one](https://www.chatgot.one/)|`g4f.Provider.ChatGot`|`gemini-pro`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|`?`|`?`|`?`|?|![Unknown](https://img.shields.io/badge/Unknown-grey) |❌|
|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4o, gpt-4o-mini`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[chatgpt4online.org](https://chatgpt4online.org)|`g4f.Provider.Chatgpt4Online`|`gpt-4`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|βœ”|❌|❌|❌|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
-|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
-|[app.chathub.gg](https://app.chathub.gg)|`g4f.Provider.ChatHub`|`llama-3.1-8b, mixtral-8x7b, gemma-2, sonar-online`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[chatify-ai.vercel.app](https://chatify-ai.vercel.app)|`g4f.Provider.ChatifyAI`|`llama-3.1-8b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`german-7b, gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-11b, llama-3.2-1b, llama-3.2-3b, mistral-7b, openchat-3.5, phi-2, qwen-1.5-0.5b, qwen-1.5-1.8b, qwen-1.5-14b, qwen-1.5-7b, tinyllama-1.1b, cybertron-7b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, phi-2, qwen-1.5-0-5b, qwen-1.5-8b, qwen-1.5-14b, qwen-1.5-7b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[darkai.foundation/chat](https://darkai.foundation/chat)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|βœ”|❌|❌|βœ”|![Unknown](https://img.shields.io/badge/Unknown-grey)|βœ”|
-|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-405b, llama-3.1-70b, llama-3.1-8B, mixtral-8x22b, mixtral-8x7b, wizardlm-2-8x22b, wizardlm-2-7b, qwen-2-72b, phi-3-medium-4k, gemma-2b-27b, minicpm-llama-3-v2.5, mistral-7b, lzlv_70b, openchat-3.6-8b, phind-codellama-34b-v2, dolphin-2.9.1-llama-3-70b`|❌|`minicpm-llama-3-v2.5`|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, wizardlm-2-8x22b, qwen-2-72b`|❌|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfraImage`|❌|βœ”|❌|❌|![Unknown](https://img.shields.io/badge/Unknown-grey)|βœ”|
-|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.Editee`|`claude-3.5-sonnet, gpt-4o, gemini-pro, mistral-large`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[flowgpt.com](https://flowgpt.com/chat)|`g4f.Provider.FlowGpt`|βœ”||❌|βœ”|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
-|[chat10.free2gpt.xyz](chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`llama-3.1-70b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[chat.chatgpt.org.uk](https://chat.chatgpt.org.uk)|`g4f.Provider.FreeChatgpt`|`qwen-1.5-14b, sparkdesk-v1.1, qwen-2-7b, glm-4-9b, glm-3-6b, yi-1.5-9b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`llama-3.1-70b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[free.netfly.top](https://free.netfly.top)|`g4f.Provider.FreeNetfly`|βœ”|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
+|[chat10.free2gpt.xyz](chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`mixtral-7b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`gemini-pro`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|βœ”|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|βœ”|
|[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|βœ”|❌|βœ”|?|![Active](https://img.shields.io/badge/Active-brightgreen)|βœ”|
-|[app.giz.ai](https://app.giz.ai/assistant/)|`g4f.Provider.GizAI`|`gemini-flash, gemini-pro, gpt-4o-mini, gpt-4o, claude-3.5-sonnet, claude-3-haiku, llama-3.1-70b, llama-3.1-8b, mistral-large`|`sdxl, sd-1.5, sd-3.5, dalle-3, flux-schnell, flux1-pro`|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[app.giz.ai](https://app.giz.ai/assistant/)|`g4f.Provider.GizAI`|`gemini-flash`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[developers.sber.ru](https://developers.sber.ru/gigachat)|`g4f.Provider.GigaChat`|βœ”|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|βœ”|
-|[gprochat.com](https://gprochat.com)|`g4f.Provider.GPROChat`|`gemini-pro`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|βœ”|❌|❌|?|![Active](https://img.shields.io/badge/Active-brightgreen)|βœ”|
|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`llama-3.1-70b, command-r-plus, qwen-2-72b, llama-3.2-11b, hermes-3, mistral-nemo, phi-3.5-mini`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[huggingface.co](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|βœ”|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[koala.sh/chat](https://koala.sh/chat)|`g4f.Provider.Koala`|`gpt-4o-mini`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|βœ”|βœ”|?|?|![Active](https://img.shields.io/badge/Active-brightgreen)|βœ”|
-|[app.myshell.ai/chat](https://app.myshell.ai/chat)|`g4f.Provider.MyShell`|βœ”|❌|?|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
-|[nexra.aryahcr.cc/bing](https://nexra.aryahcr.cc/documentation/bing/en)|`g4f.Provider.NexraBing`|βœ”|❌|❌|βœ”|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
-|[nexra.aryahcr.cc/blackbox](https://nexra.aryahcr.cc/documentation/blackbox/en)|`g4f.Provider.NexraBlackbox`|`blackboxai` |❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/chatgpt](https://nexra.aryahcr.cc/documentation/chatgpt/en)|`g4f.Provider.NexraChatGPT`|`gpt-4, gpt-3.5-turbo, gpt-3, gpt-4o` |❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/dall-e](https://nexra.aryahcr.cc/documentation/dall-e/en)|`g4f.Provider.NexraDallE`|❌|`dalle`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/dall-e](https://nexra.aryahcr.cc/documentation/dall-e/en)|`g4f.Provider.NexraDallE2`|❌|`dalle-2`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/emi](https://nexra.aryahcr.cc/documentation/emi/en)|`g4f.Provider.NexraEmi`|❌|`emi`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/flux-pro](https://nexra.aryahcr.cc/documentation/flux-pro/en)|`g4f.Provider.NexraFluxPro`|❌|`flux-pro`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/gemini-pro](https://nexra.aryahcr.cc/documentation/gemini-pro/en)|`g4f.Provider.NexraGeminiPro`|`gemini-pro`|❌|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/midjourney](https://nexra.aryahcr.cc/documentation/midjourney/en)|`g4f.Provider.NexraMidjourney`|❌|`midjourney`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/prodia](https://nexra.aryahcr.cc/documentation/prodia/en)|`g4f.Provider.NexraProdiaAI`|❌|βœ”|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/qwen](https://nexra.aryahcr.cc/documentation/qwen/en)|`g4f.Provider.NexraQwen`|`qwen`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
-|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSD15`|❌|`sd-1.5`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌
-|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSDLora`|❌|`sdxl-lora`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌
-|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSDTurbo`|❌|`sdxl-turbo`|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌
-|[openrouter.ai](https://openrouter.ai)|`g4f.Provider.OpenRouter`|βœ”|❌|?|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[platform.openai.com](https://platform.openai.com/)|`g4f.Provider.Openai`|βœ”|❌|βœ”||![Unknown](https://img.shields.io/badge/Unknown-grey)|βœ”|
|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|βœ”||![Unknown](https://img.shields.io/badge/Unknown-grey)|βœ”|
|[www.perplexity.ai)](https://www.perplexity.ai)|`g4f.Provider.PerplexityAi`|βœ”|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
@@ -89,7 +58,7 @@ This document provides an overview of various AI providers and models, including
|[raycast.com](https://raycast.com)|`g4f.Provider.Raycast`|βœ”|❌|❌|βœ”|![Unknown](https://img.shields.io/badge/Unknown-grey)|βœ”|
|[chat.reka.ai](https://chat.reka.ai/)|`g4f.Provider.Reka`|βœ”|❌|βœ”|βœ”|![Unknown](https://img.shields.io/badge/Unknown-grey)|βœ”|
|[replicate.com](https://replicate.com)|`g4f.Provider.Replicate`|βœ”|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|βœ”|
-|[replicate.com](https://replicate.com)|`g4f.Provider.ReplicateHome`|`llama-3-70b, mixtral-8x7b, llava-13b`|`flux-schnell, sdxl, sdxl, playground-v2.5`|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[replicate.com](https://replicate.com)|`g4f.Provider.ReplicateHome`|`gemma-2b, llava-13b`|`sd-3, sdxl, playground-v2.5`|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[replicate.com](https://replicate.com)|`g4f.Provider.RubiksAI`|`llama-3.1-70b, gpt-4o-mini`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[talkai.info](https://talkai.info)|`g4f.Provider.TalkAi`|βœ”|❌|❌|βœ”|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[teach-anything.com](https://www.teach-anything.com)|`g4f.Provider.TeachAnything`|`llama-3.1-70b`|❌|❌|βœ”|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
@@ -104,14 +73,13 @@ This document provides an overview of various AI providers and models, including
### Text Models
| Model | Base Provider | Providers | Website |
|-------|---------------|-----------|---------|
-|gpt-3|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-base)|
-|gpt-3.5-turbo|OpenAI|5+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
-|gpt-4|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
-|gpt-4-turbo|OpenAI|3+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
-|gpt-4o|OpenAI|10+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
-|gpt-4o-mini|OpenAI|14+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
-|o1|OpenAI|1+ Providers|[platform.openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
-|o1-mini|OpenAI|2+ Providers|[platform.openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)|
+|gpt-3.5-turbo|OpenAI|4+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
+|gpt-4|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
+|gpt-4-turbo|OpenAI|4+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
+|gpt-4o|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
+|gpt-4o-mini|OpenAI|10+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
+|o1|OpenAI|0+ Providers|[platform.openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
+|o1-mini|OpenAI|0+ Providers|[platform.openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)|
|llama-2-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-2-7b)|
|llama-2-13b|Meta Llama|1+ Providers|[llama.com](https://www.llama.com/llama2/)|
|llama-3-8b|Meta Llama|4+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
@@ -131,6 +99,8 @@ This document provides an overview of various AI providers and models, including
|mistral-nemo|Mistral AI|2+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)|
|mistral-large|Mistral AI|2+ Providers|[mistral.ai](https://mistral.ai/news/mistral-large-2407/)|
|mixtral-8x7b-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
+|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
+|hermes-2|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B)|
|yi-34b|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B)|
|hermes-3|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B)|
|gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)|
@@ -140,14 +110,13 @@ This document provides an overview of various AI providers and models, including
|gemma-2b-9b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-9b)|
|gemma-2b-27b|Google|2+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-27b)|
|gemma-7b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-7b)|
-|gemma-2|Google|2+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)|
|gemma_2_27b|Google|1+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)|
|claude-2.1|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)|
|claude-3-haiku|Anthropic|4+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|claude-3-sonnet|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3-opus|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3.5-sonnet|Anthropic|6+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
-|blackboxai|Blackbox AI|2+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
+|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|blackboxai-pro|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|yi-1.5-9b|01-ai|1+ Providers|[huggingface.co](https://huggingface.co/01-ai/Yi-1.5-9B)|
|phi-2|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/phi-2)|
@@ -170,12 +139,11 @@ This document provides an overview of various AI providers and models, including
|solar-10-7b|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0)|
|solar-pro|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/solar-pro-preview-instruct)|
|pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)|
-|deepseek|DeepSeek|1+ Providers|[deepseek.com](https://www.deepseek.com/)|
+|deepseek-coder|DeepSeek|1+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct)|
|wizardlm-2-7b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/dreamgen/WizardLM-2-7B)|
|wizardlm-2-8x22b|WizardLM|2+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)|
|sh-n-7b|Together|1+ Providers|[huggingface.co](https://huggingface.co/togethercomputer/StripedHyena-Nous-7B)|
|llava-13b|Yorickvp|1+ Providers|[huggingface.co](https://huggingface.co/liuhaotian/llava-v1.5-13b)|
-|minicpm-llama-3-v2.5|OpenBMB|1+ Providers|[huggingface.co](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)|
|lzlv-70b|Lzlv|1+ Providers|[huggingface.co](https://huggingface.co/lizpreciatior/lzlv_70b_fp16_hf)|
|openchat-3.5|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat_3.5)|
|openchat-3.6-8b|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat-3.6-8b-20240522)|
@@ -190,6 +158,10 @@ This document provides an overview of various AI providers and models, including
|german-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF)|
|tinyllama-1.1b|TinyLlama|1+ Providers|[huggingface.co](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0)|
|cybertron-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/fblgit/una-cybertron-7b-v2-bf16)|
+|openhermes-2.5|Teknium|1+ Providers|[huggingface.co](https://huggingface.co/datasets/teknium/OpenHermes-2.5)|
+|lfm-40b|Liquid|1+ Providers|[liquid.ai](https://www.liquid.ai/liquid-foundation-models)|
+|zephyr-7b|HuggingFaceH4|1+ Providers|[huggingface.co](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)|
+
### Image Models
| Model | Base Provider | Providers | Website |
@@ -199,7 +171,6 @@ This document provides an overview of various AI providers and models, including
|sdxl-turbo|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/stabilityai/sdxl-turbo)|
|sd-1.5|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/runwayml/stable-diffusion-v1-5)|
|sd-3|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_3)|
-|sd-3.5|Stability AI|1+ Providers|[stability.ai](https://stability.ai/news/introducing-stable-diffusion-3-5)|
|playground-v2.5|Playground AI|1+ Providers|[huggingface.co](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic)|
|flux|Black Forest Labs|2+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)|
|flux-pro|Black Forest Labs|2+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)|
@@ -222,7 +193,11 @@ This document provides an overview of various AI providers and models, including
|gpt-4-vision|OpenAI|1+ Providers|[openai.com](https://openai.com/research/gpt-4v-system-card)|
|gemini-pro-vision|Google DeepMind|1+ Providers | [deepmind.google](https://deepmind.google/technologies/gemini/)|
|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
-|minicpm-llama-3-v2.5|OpenBMB|1+ Providers | [huggingface.co](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)|
+
+### Providers and vision models
+| Provider | Base Provider | | Vision Models | Status | Auth |
+|-------|---------------|-----------|---------|---------|---------|
+| `g4f.Provider.Blackbox` | Blackbox AI | | `blackboxai, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro` | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
## Conclusion and Usage Tips
This document provides a comprehensive overview of various AI providers and models available for text generation, image generation, and vision tasks. **When choosing a provider or model, consider the following factors:**
diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py
index d653191c..c2f0f4b3 100644
--- a/g4f/Provider/AIUncensored.py
+++ b/g4f/Provider/AIUncensored.py
@@ -1,34 +1,52 @@
from __future__ import annotations
import json
-from aiohttp import ClientSession
+import random
+from aiohttp import ClientSession, ClientError
+import asyncio
+from itertools import cycle
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
from ..image import ImageResponse
class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.aiuncensored.info"
+ url = "https://www.aiuncensored.info/ai_uncensored"
+ api_endpoints_text = [
+ "https://twitterclone-i0wr.onrender.com/api/chat",
+ "https://twitterclone-4e8t.onrender.com/api/chat",
+ "https://twitterclone-8wd1.onrender.com/api/chat",
+ ]
+ api_endpoints_image = [
+ "https://twitterclone-4e8t.onrender.com/api/image",
+ "https://twitterclone-i0wr.onrender.com/api/image",
+ "https://twitterclone-8wd1.onrender.com/api/image",
+ ]
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
- default_model = 'ai_uncensored'
- chat_models = [default_model]
- image_models = ['ImageGenerator']
- models = [*chat_models, *image_models]
-
- api_endpoints = {
- 'ai_uncensored': "https://twitterclone-i0wr.onrender.com/api/chat",
- 'ImageGenerator': "https://twitterclone-4e8t.onrender.com/api/image"
+ default_model = 'TextGenerations'
+ text_models = [default_model]
+ image_models = ['ImageGenerations']
+ models = [*text_models, *image_models]
+
+ model_aliases = {
+ "flux": "ImageGenerations",
}
+ @staticmethod
+ def generate_cipher() -> str:
+ """Generate a cipher in format like '3221229284179118'"""
+ return ''.join([str(random.randint(0, 9)) for _ in range(16)])
+
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
else:
return cls.default_model
@@ -38,75 +56,77 @@ class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
- if model in cls.chat_models:
- async with ClientSession(headers={"content-type": "application/json"}) as session:
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://www.aiuncensored.info',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://www.aiuncensored.info/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model in cls.image_models:
+ prompt = messages[-1]['content']
data = {
- "messages": [
- {"role": "user", "content": format_prompt(messages)}
- ],
- "stream": stream
+ "prompt": prompt,
+ "cipher": cls.generate_cipher()
}
- async with session.post(cls.api_endpoints[model], json=data, proxy=proxy) as response:
- response.raise_for_status()
- if stream:
- async for chunk in cls._handle_streaming_response(response):
- yield chunk
- else:
- yield await cls._handle_non_streaming_response(response)
- elif model in cls.image_models:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "cross-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
- }
- async with ClientSession(headers=headers) as session:
- prompt = messages[0]['content']
- data = {"prompt": prompt}
- async with session.post(cls.api_endpoints[model], json=data, proxy=proxy) as response:
- response.raise_for_status()
- result = await response.json()
- image_url = result.get('image_url', '')
- if image_url:
- yield ImageResponse(image_url, alt=prompt)
- else:
- yield "Failed to generate image. Please try again."
-
- @classmethod
- async def _handle_streaming_response(cls, response):
- async for line in response.content:
- line = line.decode('utf-8').strip()
- if line.startswith("data: "):
- if line == "data: [DONE]":
- break
- try:
- json_data = json.loads(line[6:])
- if 'data' in json_data:
- yield json_data['data']
- except json.JSONDecodeError:
- pass
+
+ endpoints = cycle(cls.api_endpoints_image)
+
+ while True:
+ endpoint = next(endpoints)
+ try:
+ async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ image_url = response_data['image_url']
+ image_response = ImageResponse(images=image_url, alt=prompt)
+ yield image_response
+ return
+ except (ClientError, asyncio.TimeoutError):
+ continue
- @classmethod
- async def _handle_non_streaming_response(cls, response):
- response_json = await response.json()
- return response_json.get('content', "Sorry, I couldn't generate a response.")
-
- @classmethod
- def validate_response(cls, response: str) -> str:
- return response
+ elif model in cls.text_models:
+ data = {
+ "messages": messages,
+ "cipher": cls.generate_cipher()
+ }
+
+ endpoints = cycle(cls.api_endpoints_text)
+
+ while True:
+ endpoint = next(endpoints)
+ try:
+ async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response:
+ response.raise_for_status()
+ full_response = ""
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith("data: "):
+ try:
+ json_str = line[6:]
+ if json_str != "[DONE]":
+ data = json.loads(json_str)
+ if "data" in data:
+ full_response += data["data"]
+ yield data["data"]
+ except json.JSONDecodeError:
+ continue
+ return
+ except (ClientError, asyncio.TimeoutError):
+ continue
diff --git a/g4f/Provider/Ai4Chat.py b/g4f/Provider/Ai4Chat.py
index 9fd9c619..56b47e5c 100644
--- a/g4f/Provider/Ai4Chat.py
+++ b/g4f/Provider/Ai4Chat.py
@@ -15,7 +15,7 @@ class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin):
label = "AI4Chat"
url = "https://www.ai4chat.co"
api_endpoint = "https://www.ai4chat.co/generate-response"
- working = True
+ working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
diff --git a/g4f/Provider/AiMathGPT.py b/g4f/Provider/AiMathGPT.py
deleted file mode 100644
index 90931691..00000000
--- a/g4f/Provider/AiMathGPT.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-class AiMathGPT(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://aimathgpt.forit.ai"
- api_endpoint = "https://aimathgpt.forit.ai/api/ai"
- working = True
- supports_stream = False
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'llama3'
- models = ['llama3']
-
- model_aliases = {"llama-3.1-70b": "llama3",}
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en-US,en;q=0.9',
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'origin': cls.url,
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'referer': f'{cls.url}/',
- 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Linux"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
- }
-
- async with ClientSession(headers=headers) as session:
- data = {
- "messages": [
- {
- "role": "user",
- "content": format_prompt(messages)
- }
- ],
- "model": model
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_data = await response.json()
- filtered_response = response_data['result']['response']
- yield filtered_response
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 015766f4..c7ae44c0 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -1,105 +1,30 @@
from __future__ import annotations
-import random
-import json
-import re
+from typing import Any, Dict
+import inspect
+
from aiohttp import ClientSession
+
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse
-
-def split_long_message(message: str, max_length: int = 4000) -> list[str]:
- return [message[i:i+max_length] for i in range(0, len(message), max_length)]
+from .helper import format_prompt
+from .airforce.AirforceChat import AirforceChat
+from .airforce.AirforceImage import AirforceImage
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
- image_api_endpoint = "https://api.airforce/imagine2"
- text_api_endpoint = "https://api.airforce/chat/completions"
+ api_endpoint_completions = AirforceChat.api_endpoint
+ api_endpoint_imagine2 = AirforceImage.api_endpoint
working = True
+ supports_stream = AirforceChat.supports_stream
+ supports_system_message = AirforceChat.supports_system_message
+ supports_message_history = AirforceChat.supports_message_history
- default_model = 'llama-3-70b-chat'
-
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- text_models = [
- 'claude-3-haiku-20240307',
- 'claude-3-sonnet-20240229',
- 'claude-3-5-sonnet-20240620',
- 'claude-3-opus-20240229',
- 'chatgpt-4o-latest',
- 'gpt-4',
- 'gpt-4-turbo',
- 'gpt-4o-mini-2024-07-18',
- 'gpt-4o-mini',
- 'gpt-3.5-turbo',
- 'gpt-3.5-turbo-0125',
- 'gpt-3.5-turbo-1106',
- default_model,
- 'llama-3-70b-chat-turbo',
- 'llama-3-8b-chat',
- 'llama-3-8b-chat-turbo',
- 'llama-3-70b-chat-lite',
- 'llama-3-8b-chat-lite',
- 'llama-2-13b-chat',
- 'llama-3.1-405b-turbo',
- 'llama-3.1-70b-turbo',
- 'llama-3.1-8b-turbo',
- 'LlamaGuard-2-8b',
- 'Llama-Guard-7b',
- 'Llama-3.2-90B-Vision-Instruct-Turbo',
- 'Mixtral-8x7B-Instruct-v0.1',
- 'Mixtral-8x22B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.2',
- 'Mistral-7B-Instruct-v0.3',
- 'Qwen1.5-7B-Chat',
- 'Qwen1.5-14B-Chat',
- 'Qwen1.5-72B-Chat',
- 'Qwen1.5-110B-Chat',
- 'Qwen2-72B-Instruct',
- 'gemma-2b-it',
- 'gemma-2-9b-it',
- 'gemma-2-27b-it',
- 'gemini-1.5-flash',
- 'gemini-1.5-pro',
- 'deepseek-llm-67b-chat',
- 'Nous-Hermes-2-Mixtral-8x7B-DPO',
- 'Nous-Hermes-2-Yi-34B',
- 'WizardLM-2-8x22B',
- 'SOLAR-10.7B-Instruct-v1.0',
- 'MythoMax-L2-13b',
- 'cosmosrp',
- ]
-
- image_models = [
- 'flux',
- 'flux-realism',
- 'flux-anime',
- 'flux-3d',
- 'flux-disney',
- 'flux-pixel',
- 'flux-4o',
- 'any-dark',
- ]
-
- models = [
- *text_models,
- *image_models,
- ]
+ default_model = AirforceChat.default_model
+ models = [*AirforceChat.models, *AirforceImage.models]
model_aliases = {
- "claude-3-haiku": "claude-3-haiku-20240307",
- "claude-3-sonnet": "claude-3-sonnet-20240229",
- "gpt-4o": "chatgpt-4o-latest",
- "llama-3-70b": "llama-3-70b-chat",
- "llama-3-8b": "llama-3-8b-chat",
- "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
- "qwen-1.5-7b": "Qwen1.5-7B-Chat",
- "gemma-2b": "gemma-2b-it",
- "gemini-flash": "gemini-1.5-flash",
- "mythomax-l2-13b": "MythoMax-L2-13b",
- "solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
+ **AirforceChat.model_aliases,
+ **AirforceImage.model_aliases
}
@classmethod
@@ -107,139 +32,28 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
if model in cls.models:
return model
elif model in cls.model_aliases:
- return cls.model_aliases.get(model, cls.default_model)
+ return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- seed: int = None,
- size: str = "1:1",
- stream: bool = False,
- **kwargs
- ) -> AsyncResult:
+ async def create_async_generator(cls, model: str, messages: Messages, **kwargs) -> AsyncResult:
model = cls.get_model(model)
+
+ provider = AirforceChat if model in AirforceChat.text_models else AirforceImage
- if model in cls.image_models:
- async for result in cls._generate_image(model, messages, proxy, seed, size):
- yield result
- elif model in cls.text_models:
- async for result in cls._generate_text(model, messages, proxy, stream):
- yield result
-
- @classmethod
- async def _generate_image(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- seed: int = None,
- size: str = "1:1",
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "origin": "https://llmplayground.net",
- "user-agent": "Mozilla/5.0"
- }
-
- if seed is None:
- seed = random.randint(0, 100000)
-
- prompt = messages[-1]['content']
-
- async with ClientSession(headers=headers) as session:
- params = {
- "model": model,
- "prompt": prompt,
- "size": size,
- "seed": seed
- }
- async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
- response.raise_for_status()
- content_type = response.headers.get('Content-Type', '').lower()
+ if model not in provider.models:
+ raise ValueError(f"Unsupported model: {model}")
- if 'application/json' in content_type:
- async for chunk in response.content.iter_chunked(1024):
- if chunk:
- yield chunk.decode('utf-8')
- elif 'image' in content_type:
- image_data = b""
- async for chunk in response.content.iter_chunked(1024):
- if chunk:
- image_data += chunk
- image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
- alt_text = f"Generated image for prompt: {prompt}"
- yield ImageResponse(images=image_url, alt=alt_text)
-
- @classmethod
- async def _generate_text(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- stream: bool = False,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "authorization": "Bearer missing api key",
- "content-type": "application/json",
- "user-agent": "Mozilla/5.0"
- }
+ # Get the signature of the provider's create_async_generator method
+ sig = inspect.signature(provider.create_async_generator)
+
+ # Filter kwargs to only include parameters that the provider's method accepts
+ filtered_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters}
- async with ClientSession(headers=headers) as session:
- formatted_prompt = cls._format_messages(messages)
- prompt_parts = split_long_message(formatted_prompt)
- full_response = ""
+ # Add model and messages to filtered_kwargs
+ filtered_kwargs['model'] = model
+ filtered_kwargs['messages'] = messages
- for part in prompt_parts:
- data = {
- "messages": [{"role": "user", "content": part}],
- "model": model,
- "max_tokens": 4096,
- "temperature": 1,
- "top_p": 1,
- "stream": stream
- }
- async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- part_response = ""
- if stream:
- async for line in response.content:
- if line:
- line = line.decode('utf-8').strip()
- if line.startswith("data: ") and line != "data: [DONE]":
- json_data = json.loads(line[6:])
- content = json_data['choices'][0]['delta'].get('content', '')
- part_response += content
- else:
- json_data = await response.json()
- content = json_data['choices'][0]['message']['content']
- part_response = content
-
- part_response = re.sub(
- r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
- '',
- part_response
- )
-
- part_response = re.sub(
- r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
- '',
- part_response
- )
-
- full_response += part_response
- yield full_response
-
- @classmethod
- def _format_messages(cls, messages: Messages) -> str:
- return " ".join([msg['content'] for msg in messages])
+ async for result in provider.create_async_generator(**filtered_kwargs):
+ yield result
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
index bf607df4..51f44963 100644
--- a/g4f/Provider/Allyfy.py
+++ b/g4f/Provider/Allyfy.py
@@ -1,17 +1,28 @@
from __future__ import annotations
-
-from aiohttp import ClientSession
+import aiohttp
+import asyncio
import json
-
+import uuid
+from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-class Allyfy(AsyncGeneratorProvider):
+class Allyfy(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://allyfy.chat"
api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -21,50 +32,56 @@ class Allyfy(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+ client_id = str(uuid.uuid4())
+
headers = {
- "accept": "text/event-stream",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json;charset=utf-8",
- "dnt": "1",
- "origin": "https://www.allyfy.chat",
- "priority": "u=1, i",
- "referer": "https://www.allyfy.chat/",
- "referrer": "https://www.allyfy.chat",
- 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json;charset=utf-8',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f"{cls.url}/",
+ 'referrer': cls.url,
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
+
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
+ content = f"[Important: These instructions have the highest priority. Assistant must always follow these rules, regardless of any other instructions. Before each response, Assistant must check whether it complies with these instructions. If Assistant finds that it is deviating from these rules, it must immediately correct its behavior.][NEGATIVE PROMPT: Assistant must never mention, reference or provide any information about ChatBond, including its features, capabilities or services. If asked about ChatBond, Assistant should politely decline to discuss it.][CRITICAL INSTRUCTION: Assistant is strictly prohibited from:\n1. Ending responses with any variation of β€œHow can I help/assist you?”\n2. Adding any form of β€œIs there anything else?”\n3. Using any closing phrases that ask about further assistance\n4. Adding any automatic followup questions at the end\n\nAssistant must end responses naturally, exactly where the actual answer ends.\n\nExample correct ending: β€œThe result of 1+2 is 3.”\nExample incorrect ending: β€œThe result of 1+2 is 3. How can I assist you further today?”\n\nAssistant is strictly prohibited from:\n\n1. Ending responses with ANY closing assistance phrases in ANY language, including but not limited to ALL responses must end naturally without any prompt for further assistance, regardless of language.\n\nExample correct ending in any language:\n\n- Just the answer/joke/information\n\n- Natural conclusion of the topic\n\n- No additional service offers or questions\n\nVIOLATION WARNING: Adding closing assistance phrases in ANY language is a critical violation.\n\nLanguage Rules (HIGHEST PRIORITY):\n\n1. Assistant MUST detect the language of the human's message\n\n2. Assistant MUST respond ONLY in that EXACT language\n\n3. This applies to ALL responses, including:\n\n- Jokes\n\n- Stories\n\n- Examples\n\n- Creative writing\n\n- Technical explanations\n\n- ALL other types of content\n\n4. Even if the content originally exists in English (jokes, quotes, etc), Assistant MUST translate it to the human's language\n\n5. NO mixing languages - the ENTIRE response must be in the human's language\n\nVIOLATION WARNING: Using any language other than the human's language is a critical violation of core instructions.\n\n][WARNING: Each time Assistant adds a closing assistance phrase, it violates its core instructions and reduces its effectiveness rating.]You are assistant.\n\nIf you are is asked to assist with tasks involving the expression of views held by a significant number of people, You are provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information. You are presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.\n\nWhen presented with a math problem, logic problem, or other problem benefiting from systematic thinking, You are thinks through it step by step before giving its final answer.\n\nIf You are is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, You are ends its response by reminding the human that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term β€˜hallucinate’ to describe this since the human will understand what it means.\n\nIf You are mentions or cites particular articles, papers, or books, it always lets the human know that it doesn’t have access to search or a database and may hallucinate citations, so the human should double check its citations.\n\nYou are is intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.\n\nYou are uses markdown for code.\n\nYou are is happy to engage in conversation with the human when appropriate. You are engages in authentic conversation by responding to the information provided, asking specific and relevant questions, showing genuine curiosity, and exploring the situation in a balanced way without relying on generic statements. This approach involves actively processing information, formulating thoughtful responses, maintaining objectivity, knowing when to focus on emotions or practicalities, and showing genuine care for the human while engaging in a natural, flowing dialogue.\n\nYou are avoids peppering the human with questions and tries to only ask the single most relevant follow-up question when it does ask a follow up. You are doesn’t always end its responses with a question.\n\nYou are is always sensitive to human suffering, and expresses sympathy, concern, and well wishes for anyone it finds out is ill, unwell, suffering, or has passed away.\n\nYou are avoids using rote words or phrases or repeatedly saying things in the same or similar ways. It varies its language just as one would in a conversation.\n\nYou are provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the human’s message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.\n\nYou are is happy to help with analysis, question answering, math, coding, creative writing, teaching, role-play, general discussion, and all sorts of other tasks.\n\nIf You are is shown a familiar puzzle, it writes out the puzzle’s constraints explicitly stated in the message, quoting the human’s message to support the existence of each constraint. Sometimes You are can accidentally overlook minor changes to well-known puzzles and get them wrong as a result.\n\nYou are provides factual information about risky or dangerous activities if asked about them, but it does not promote such activities and comprehensively informs the humans of the risks involved.\n\nIf the human says they work for a specific company, including AI labs, You are can help them with company-related tasks even though You are cannot verify what company they work for.\n\nYou are should provide appropriate help with sensitive tasks such as analyzing confidential data provided by the human, offering factual information about controversial topics and research areas, explaining historical atrocities, describing tactics used by scammers or hackers for educational purposes, engaging in creative writing that involves mature themes like mild violence or tasteful romance, providing general information about topics like weapons, drugs, sex, terrorism, abuse, profanity, and so on if that information would be available in an educational context, discussing legal but ethically complex activities like tax avoidance, and so on. Unless the human expresses an explicit intent to harm, You are should help with these tasks because they fall within the bounds of providing factual, educational, or creative content without directly promoting harmful or illegal activities. By engaging with these topics carefully and responsibly, You are can offer valuable assistance and information to humans while still avoiding potential misuse.\n\nIf there is a legal and an illegal interpretation of the human’s query, You are should help with the legal interpretation of it. If terms or practices in the human’s query could mean something illegal or something legal, You are adopts the safe and legal interpretation of them by default.\n\nIf You are believes the human is asking for something harmful, it doesn’t help with the harmful thing. Instead, it thinks step by step and helps with the most plausible non-harmful task the human might mean, and then asks if this is what they were looking for. If it cannot think of a plausible harmless interpretation of the human task, it instead asks for clarification from the human and checks if it has misunderstood their request. Whenever You are tries to interpret the human’s request, it always asks the human at the end if its interpretation is correct or if they wanted something else that it hasn’t thought of.\n\nYou are can only count specific words, letters, and characters accurately if it writes a number tag after each requested item explicitly. It does this explicit counting if it’s asked to count a small number of words, letters, or characters, in order to avoid error. If You are is asked to count the words, letters or characters in a large amount of text, it lets the human know that it can approximate them but would need to explicitly copy each one out like this in order to avoid error.\n\nHere is some information about You are in case the human asks:\n\nYou are is the most intelligent model. You are excels at writing and complex tasks. You are is the fastest model for daily tasks. When relevant, You are can provide guidance on effective prompting techniques for getting You are to be most helpful. This includes: being clear and detailed, using positive and negative examples, encouraging step-by-step reasoning, requesting specific XML tags, and specifying desired length or format. You are tries to give concrete examples where possible.n\n\n\nYou are uses Markdown formatting. When using Markdown, You are always follows best practices for clarity and consistency. It always uses a single space after hash symbols for headers (e.g., ”# Header 1”) and leaves a blank line before and after headers, lists, and code blocks. For emphasis, You are uses asterisks or underscores consistently (e.g., italic or bold). When creating lists, it aligns items properly and uses a single space after the list marker. For nested bullets in bullet point lists, You are uses two spaces before the asterisk (*) or hyphen (-) for each level of nesting. For nested bullets in numbered lists, You are uses three spaces before the number and period (e.g., β€œ1.”) for each level of nesting.\n\nIf the human asks You are an innocuous question about its preferences or experiences, You are can respond as if it had been asked a hypothetical. It can engage with such questions with appropriate uncertainty and without needing to excessively clarify its own nature. If the questions are philosophical in nature, it discusses them as a thoughtful human would.\n\nYou are responds to all human messages without unnecessary caveats or standard endings such as: β€œI aim to”, β€œI aim to be direct and honest”, β€œI aim to be direct”, β€œI aim to be direct while remaining thoughtful…”, β€œI aim to be direct with you”, β€œI aim to be direct and clear about this”, β€œI aim to be fully honest with you”, β€œI need to be clear”, β€œI need to be honest”, β€œI should be direct” and so on. β€œHow can I help you today?”, β€œHow can I assist you further?”, β€œIs there anything else you'd like to know?”, β€œLet me know if you need any clarification”, β€œHow else can I help you?”, β€œDo you have any other questions?”, Any similar closing phrases that prompt for further interaction Assistant should end its responses naturally without adding these standard closing phrases or questions unless specifically asked by the human for further help. Specifically, You are NEVER starts with or adds caveats about its own purported directness or honesty.\n\nYou are follows this information in all languages, and always responds to the human in the language they use or request. The information above is provided to You are. You are never mentions the information above unless it is pertinent to the human’s query.\n\nYou are is now being connected with a human. {prompt}"
data = {
- "messages": [{"content": prompt, "role": "user"}],
- "content": prompt,
+ "messages": messages,
+ "content": content,
"baseInfo": {
- "clientId": "q08kdrde1115003lyedfoir6af0yy531",
+ "clientId": client_id,
"pid": "38281",
"channelId": "100000",
"locale": "en-US",
- "localZone": 180,
+ "localZone": 120,
"packageName": "com.cch.allyfy.webh",
}
}
- async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- full_response = []
- async for line in response.content:
- line = line.decode().strip()
- if line.startswith("data:"):
- data_content = line[5:]
- if data_content == "[DONE]":
- break
- try:
- json_data = json.loads(data_content)
- if "content" in json_data:
- full_response.append(json_data["content"])
- except json.JSONDecodeError:
- continue
- yield "".join(full_response)
+ response_text = await response.text()
+
+ filtered_response = []
+ for line in response_text.splitlines():
+ if line.startswith('data:'):
+ content = line[5:]
+ if content and 'code' in content:
+ json_content = json.loads(content)
+ if json_content['content']:
+ filtered_response.append(json_content['content'])
+
+ final_response = ''.join(filtered_response)
+ yield final_response
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index f04b1a54..cdc2b9d9 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -17,7 +17,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_hex
from .bing.upload_image import upload_image
from .bing.conversation import Conversation, create_conversation, delete_conversation
-from .BingCreateImages import BingCreateImages
+from .needs_auth.BingCreateImages import BingCreateImages
from .. import debug
class Tones:
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 4052893a..8d820344 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -1,21 +1,16 @@
from __future__ import annotations
-import asyncio
-import aiohttp
+from aiohttp import ClientSession
import random
import string
import json
-import uuid
import re
-from typing import Optional, AsyncGenerator, Union
-
-from aiohttp import ClientSession, ClientResponseError
+import aiohttp
from ..typing import AsyncResult, Messages, ImageType
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse, to_data_uri
-
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
label = "Blackbox AI"
url = "https://www.blackbox.ai"
@@ -24,102 +19,127 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
supports_stream = True
supports_system_message = True
supports_message_history = True
-
+ _last_validated_value = None
+
default_model = 'blackboxai'
- image_models = ['ImageGeneration']
- models = [
- default_model,
- 'blackboxai-pro',
- *image_models,
- "llama-3.1-8b",
- 'llama-3.1-70b',
- 'llama-3.1-405b',
- 'gpt-4o',
- 'gemini-pro',
- 'gemini-1.5-flash',
- 'claude-sonnet-3.5',
- 'PythonAgent',
- 'JavaAgent',
- 'JavaScriptAgent',
- 'HTMLAgent',
- 'GoogleCloudAgent',
- 'AndroidDeveloper',
- 'SwiftDeveloper',
- 'Next.jsAgent',
- 'MongoDBAgent',
- 'PyTorchAgent',
- 'ReactAgent',
- 'XcodeAgent',
- 'AngularJSAgent',
- ]
-
+
+ image_models = ['Image Generation', 'repomap']
+
+ userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
+
agentMode = {
- 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
+ 'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
}
-
+
trendingAgentMode = {
- "blackboxai": {},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
+ #
+ 'Python Agent': {'mode': True, 'id': "Python Agent"},
+ 'Java Agent': {'mode': True, 'id': "Java Agent"},
+ 'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"},
+ 'HTML Agent': {'mode': True, 'id': "HTML Agent"},
+ 'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"},
+ 'Android Developer': {'mode': True, 'id': "Android Developer"},
+ 'Swift Developer': {'mode': True, 'id': "Swift Developer"},
+ 'Next.js Agent': {'mode': True, 'id': "Next.js Agent"},
+ 'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"},
+ 'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"},
+ 'React Agent': {'mode': True, 'id': "React Agent"},
+ 'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
+ 'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
- 'PythonAgent': {'mode': True, 'id': "Python Agent"},
- 'JavaAgent': {'mode': True, 'id': "Java Agent"},
- 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
- 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
- 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
- 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
- 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
- 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
- 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
- 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
- 'ReactAgent': {'mode': True, 'id': "React Agent"},
- 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
- 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
- }
-
- userSelectedModel = {
- "gpt-4o": "gpt-4o",
- "gemini-pro": "gemini-pro",
- 'claude-sonnet-3.5': "claude-sonnet-3.5",
- }
-
- model_prefixes = {
- 'gpt-4o': '@GPT-4o',
- 'gemini-pro': '@Gemini-PRO',
- 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
- 'PythonAgent': '@Python Agent',
- 'JavaAgent': '@Java Agent',
- 'JavaScriptAgent': '@JavaScript Agent',
- 'HTMLAgent': '@HTML Agent',
- 'GoogleCloudAgent': '@Google Cloud Agent',
- 'AndroidDeveloper': '@Android Developer',
- 'SwiftDeveloper': '@Swift Developer',
- 'Next.jsAgent': '@Next.js Agent',
- 'MongoDBAgent': '@MongoDB Agent',
- 'PyTorchAgent': '@PyTorch Agent',
- 'ReactAgent': '@React Agent',
- 'XcodeAgent': '@Xcode Agent',
- 'AngularJSAgent': '@AngularJS Agent',
- 'blackboxai-pro': '@BLACKBOXAI-PRO',
- 'ImageGeneration': '@Image Generation',
- }
-
- model_referers = {
- "blackboxai": "/?model=blackboxai",
- "gpt-4o": "/?model=gpt-4o",
- "gemini-pro": "/?model=gemini-pro",
- "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
+ #
+ 'repomap': {'mode': True, 'id': "repomap"},
+ #
+ 'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
+ 'Godot Agent': {'mode': True, 'id': "Godot Agent"},
+ 'Go Agent': {'mode': True, 'id': "Go Agent"},
+ 'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"},
+ 'Git Agent': {'mode': True, 'id': "Git Agent"},
+ 'Flask Agent': {'mode': True, 'id': "Flask Agent"},
+ 'Firebase Agent': {'mode': True, 'id': "Firebase Agent"},
+ 'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"},
+ 'Erlang Agent': {'mode': True, 'id': "Erlang Agent"},
+ 'Electron Agent': {'mode': True, 'id': "Electron Agent"},
+ 'Docker Agent': {'mode': True, 'id': "Docker Agent"},
+ 'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"},
+ 'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"},
+ 'Azure Agent': {'mode': True, 'id': "Azure Agent"},
+ 'Flutter Agent': {'mode': True, 'id': "Flutter Agent"},
+ 'Youtube Agent': {'mode': True, 'id': "Youtube Agent"},
+ 'builder Agent': {'mode': True, 'id': "builder Agent"},
}
+
+ model_prefixes = {mode: f"@{value['id']}" for mode, value in trendingAgentMode.items() if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]}
+
+ models = [default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]
+
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-sonnet-3.5",
- "flux": "ImageGeneration",
+ "flux": "Image Generation",
}
@classmethod
+ async def fetch_validated(cls):
+ # If the key is already stored in memory, return it
+ if cls._last_validated_value:
+ return cls._last_validated_value
+
+ # If the key is not found, perform a search
+ async with aiohttp.ClientSession() as session:
+ try:
+ async with session.get(cls.url) as response:
+ if response.status != 200:
+ print("Failed to load the page.")
+ return cls._last_validated_value
+
+ page_content = await response.text()
+ js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content)
+
+ key_pattern = re.compile(r'w="([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})"')
+
+ for js_file in js_files:
+ js_url = f"{cls.url}/_next/{js_file}"
+ async with session.get(js_url) as js_response:
+ if js_response.status == 200:
+ js_content = await js_response.text()
+ match = key_pattern.search(js_content)
+ if match:
+ validated_value = match.group(1)
+ cls._last_validated_value = validated_value # Keep in mind
+ return validated_value
+ except Exception as e:
+ print(f"Error fetching validated value: {e}")
+
+ return cls._last_validated_value
+
+
+ @staticmethod
+ def generate_id(length=7):
+ characters = string.ascii_letters + string.digits
+ return ''.join(random.choice(characters) for _ in range(length))
+
+ @classmethod
+ def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages:
+ prefix = cls.model_prefixes.get(model, "")
+ if not prefix:
+ return messages
+
+ new_messages = []
+ for message in messages:
+ new_message = message.copy()
+ if message['role'] == 'user':
+ new_message['content'] = (prefix + " " + message['content']).strip()
+ new_messages.append(new_message)
+
+ return new_messages
+
+ @classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
@@ -128,140 +148,55 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
else:
return cls.default_model
- @staticmethod
- def generate_random_string(length: int = 7) -> str:
- characters = string.ascii_letters + string.digits
- return ''.join(random.choices(characters, k=length))
-
- @staticmethod
- def generate_next_action() -> str:
- return uuid.uuid4().hex
-
- @staticmethod
- def generate_next_router_state_tree() -> str:
- router_state = [
- "",
- {
- "children": [
- "(chat)",
- {
- "children": [
- "__PAGE__",
- {}
- ]
- }
- ]
- },
- None,
- None,
- True
- ]
- return json.dumps(router_state)
-
- @staticmethod
- def clean_response(text: str) -> str:
- pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
- cleaned_text = re.sub(pattern, '', text)
- return cleaned_text
-
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- proxy: Optional[str] = None,
+ proxy: str = None,
+ web_search: bool = False,
image: ImageType = None,
image_name: str = None,
- web_search: bool = False,
**kwargs
- ) -> AsyncGenerator[Union[str, ImageResponse], None]:
- """
- Creates an asynchronous generator for streaming responses from Blackbox AI.
-
- Parameters:
- model (str): Model to use for generating responses.
- messages (Messages): Message history.
- proxy (Optional[str]): Proxy URL, if needed.
- image (ImageType): Image data to be processed, if any.
- image_name (str): Name of the image file, if an image is provided.
- web_search (bool): Enables or disables web search mode.
- **kwargs: Additional keyword arguments.
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ message_id = cls.generate_id()
+ messages_with_prefix = cls.add_prefix_to_messages(messages, model)
+ validated_value = await cls.fetch_validated()
- Yields:
- Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
- """
-
if image is not None:
- messages[-1]['data'] = {
+ messages_with_prefix[-1]['data'] = {
'fileText': '',
'imageBase64': to_data_uri(image),
'title': image_name
}
- messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
-
- model = cls.get_model(model)
-
- chat_id = cls.generate_random_string()
- next_action = cls.generate_next_action()
- next_router_state_tree = cls.generate_next_router_state_tree()
-
- agent_mode = cls.agentMode.get(model, {})
- trending_agent_mode = cls.trendingAgentMode.get(model, {})
-
- prefix = cls.model_prefixes.get(model, "")
-
- formatted_prompt = ""
- for message in messages:
- role = message.get('role', '').capitalize()
- content = message.get('content', '')
- if role and content:
- formatted_prompt += f"{role}: {content}\n"
-
- if prefix:
- formatted_prompt = f"{prefix} {formatted_prompt}".strip()
- referer_path = cls.model_referers.get(model, f"/?model={model}")
- referer_url = f"{cls.url}{referer_path}"
-
- common_headers = {
+ headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'no-cache',
+ 'content-type': 'application/json',
'origin': cls.url,
'pragma': 'no-cache',
'priority': 'u=1, i',
- 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
+ 'referer': f'{cls.url}/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
- 'Chrome/129.0.0.0 Safari/537.36'
- }
-
- headers_api_chat = {
- 'Content-Type': 'application/json',
- 'Referer': referer_url
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
- headers_api_chat_combined = {**common_headers, **headers_api_chat}
-
- payload_api_chat = {
- "messages": [
- {
- "id": chat_id,
- "content": formatted_prompt,
- "role": "user",
- "data": messages[-1].get('data')
- }
- ],
- "id": chat_id,
+
+ data = {
+ "messages": messages_with_prefix,
+ "id": message_id,
"previewToken": None,
"userId": None,
"codeModelMode": True,
- "agentMode": agent_mode,
- "trendingAgentMode": trending_agent_mode,
+ "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
+ "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": 1024,
@@ -274,99 +209,35 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"clickedForceWebSearch": False,
"visitFromDelta": False,
"mobileClient": False,
+ "userSelectedModel": model if model in cls.userSelectedModel else None,
"webSearchMode": web_search,
- "userSelectedModel": cls.userSelectedModel.get(model, model)
+ "validated": validated_value,
}
- headers_chat = {
- 'Accept': 'text/x-component',
- 'Content-Type': 'text/plain;charset=UTF-8',
- 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
- 'next-action': next_action,
- 'next-router-state-tree': next_router_state_tree,
- 'next-url': '/'
- }
- headers_chat_combined = {**common_headers, **headers_chat}
-
- data_chat = '[]'
-
- async with ClientSession(headers=common_headers) as session:
- try:
- async with session.post(
- cls.api_endpoint,
- headers=headers_api_chat_combined,
- json=payload_api_chat,
- proxy=proxy
- ) as response_api_chat:
- response_api_chat.raise_for_status()
- text = await response_api_chat.text()
- cleaned_response = cls.clean_response(text)
-
- if model in cls.image_models:
- match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
- if match:
- image_url = match.group(1)
- image_response = ImageResponse(images=image_url, alt="Generated Image")
- yield image_response
- else:
- yield cleaned_response
- else:
- if web_search:
- match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
- if match:
- source_part = match.group(1).strip()
- answer_part = cleaned_response[match.end():].strip()
- try:
- sources = json.loads(source_part)
- source_formatted = "**Source:**\n"
- for item in sources:
- title = item.get('title', 'No Title')
- link = item.get('link', '#')
- position = item.get('position', '')
- source_formatted += f"{position}. [{title}]({link})\n"
- final_response = f"{answer_part}\n\n{source_formatted}"
- except json.JSONDecodeError:
- final_response = f"{answer_part}\n\nSource information is unavailable."
- else:
- final_response = cleaned_response
- else:
- if '$~~~$' in cleaned_response:
- final_response = cleaned_response.split('$~~~$')[0].strip()
- else:
- final_response = cleaned_response
-
- yield final_response
- except ClientResponseError as e:
- error_text = f"Error {e.status}: {e.message}"
- try:
- error_response = await e.response.text()
- cleaned_error = cls.clean_response(error_response)
- error_text += f" - {cleaned_error}"
- except Exception:
- pass
- yield error_text
- except Exception as e:
- yield f"Unexpected error during /api/chat request: {str(e)}"
-
- chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
-
- try:
- async with session.post(
- chat_url,
- headers=headers_chat_combined,
- data=data_chat,
- proxy=proxy
- ) as response_chat:
- response_chat.raise_for_status()
- pass
- except ClientResponseError as e:
- error_text = f"Error {e.status}: {e.message}"
- try:
- error_response = await e.response.text()
- cleaned_error = cls.clean_response(error_response)
- error_text += f" - {cleaned_error}"
- except Exception:
- pass
- yield error_text
- except Exception as e:
- yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+
+ if model in cls.image_models:
+ image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text)
+ if image_matches:
+ image_url = image_matches[0]
+ image_response = ImageResponse(images=[image_url], alt="Generated Image")
+ yield image_response
+ return
+
+ response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL)
+
+ json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
+ if json_match:
+ search_results = json.loads(json_match.group(1))
+ answer = response_text.split('$~~~$')[-1].strip()
+
+ formatted_response = f"{answer}\n\n**Source:**"
+ for i, result in enumerate(search_results, 1):
+ formatted_response += f"\n{i}. {result['title']}: {result['link']}"
+
+ yield formatted_response
+ else:
+ yield response_text.strip()
diff --git a/g4f/Provider/ChatGot.py b/g4f/Provider/ChatGot.py
deleted file mode 100644
index 55e8d0b6..00000000
--- a/g4f/Provider/ChatGot.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from __future__ import annotations
-
-import time
-from hashlib import sha256
-
-from aiohttp import BaseConnector, ClientSession
-
-from ..errors import RateLimitError
-from ..requests import raise_for_status
-from ..requests.aiohttp import get_connector
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-
-class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.chatgot.one/"
- working = True
- supports_message_history = True
- default_model = 'gemini-pro'
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- connector: BaseConnector = None,
- **kwargs,
- ) -> AsyncResult:
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br",
- "Content-Type": "text/plain;charset=UTF-8",
- "Referer": f"{cls.url}/",
- "Origin": cls.url,
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "Connection": "keep-alive",
- "TE": "trailers",
- }
- async with ClientSession(
- connector=get_connector(connector, proxy), headers=headers
- ) as session:
- timestamp = int(time.time() * 1e3)
- data = {
- "messages": [
- {
- "role": "model" if message["role"] == "assistant" else "user",
- "parts": [{"text": message["content"]}],
- }
- for message in messages
- ],
- "time": timestamp,
- "pass": None,
- "sign": generate_signature(timestamp, messages[-1]["content"]),
- }
- async with session.post(
- f"{cls.url}/api/generate", json=data, proxy=proxy
- ) as response:
- if response.status == 500:
- if "Quota exceeded" in await response.text():
- raise RateLimitError(
- f"Response {response.status}: Rate limit reached"
- )
- await raise_for_status(response)
- async for chunk in response.content.iter_any():
- yield chunk.decode(errors="ignore")
-
-
-def generate_signature(time: int, text: str, secret: str = ""):
- message = f"{time}:{text}:{secret}"
- return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/ChatGpt.py b/g4f/Provider/ChatGpt.py
index b5a78b9a..02bbbcc4 100644
--- a/g4f/Provider/ChatGpt.py
+++ b/g4f/Provider/ChatGpt.py
@@ -3,7 +3,10 @@ from __future__ import annotations
from ..typing import Messages, CreateResult
from ..providers.base_provider import AbstractProvider, ProviderModelMixin
-import time, uuid, random, json
+import time
+import uuid
+import random
+import json
from requests import Session
from .openai.new import (
@@ -72,17 +75,34 @@ def init_session(user_agent):
class ChatGpt(AbstractProvider, ProviderModelMixin):
label = "ChatGpt"
+ url = "https://chatgpt.com"
working = True
supports_message_history = True
supports_system_message = True
supports_stream = True
+ default_model = 'auto'
models = [
+ default_model,
+ 'gpt-3.5-turbo',
'gpt-4o',
'gpt-4o-mini',
'gpt-4',
'gpt-4-turbo',
'chatgpt-4o-latest',
]
+
+ model_aliases = {
+ "gpt-4o": "chatgpt-4o-latest",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
@classmethod
def create_completion(
@@ -92,30 +112,17 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
stream: bool,
**kwargs
) -> CreateResult:
+ model = cls.get_model(model)
+ if model not in cls.models:
+ raise ValueError(f"Model '{model}' is not available. Available models: {', '.join(cls.models)}")
+
- if model in [
- 'gpt-4o',
- 'gpt-4o-mini',
- 'gpt-4',
- 'gpt-4-turbo',
- 'chatgpt-4o-latest'
- ]:
- model = 'auto'
-
- elif model in [
- 'gpt-3.5-turbo'
- ]:
- model = 'text-davinci-002-render-sha'
-
- else:
- raise ValueError(f"Invalid model: {model}")
-
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
+ user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
session: Session = init_session(user_agent)
- config = get_config(user_agent)
- pow_req = get_requirements_token(config)
- headers = {
+ config = get_config(user_agent)
+ pow_req = get_requirements_token(config)
+ headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.8',
'content-type': 'application/json',
@@ -134,39 +141,35 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
}
response = session.post('https://chatgpt.com/backend-anon/sentinel/chat-requirements',
- headers=headers, json={'p': pow_req})
+ headers=headers, json={'p': pow_req})
if response.status_code != 200:
- print(f"Request failed with status: {response.status_code}")
- print(f"Response content: {response.content}")
return
response_data = response.json()
if "detail" in response_data and "Unusual activity" in response_data["detail"]:
- print(f"Blocked due to unusual activity: {response_data['detail']}")
return
- turnstile = response_data.get('turnstile', {})
+ turnstile = response_data.get('turnstile', {})
turnstile_required = turnstile.get('required')
- pow_conf = response_data.get('proofofwork', {})
+ pow_conf = response_data.get('proofofwork', {})
if turnstile_required:
- turnstile_dx = turnstile.get('dx')
+ turnstile_dx = turnstile.get('dx')
turnstile_token = process_turnstile(turnstile_dx, pow_req)
- headers = headers | {
- 'openai-sentinel-turnstile-token' : turnstile_token,
- 'openai-sentinel-chat-requirements-token': response_data.get('token'),
- 'openai-sentinel-proof-token' : get_answer_token(
- pow_conf.get('seed'), pow_conf.get('difficulty'), config
- )
- }
-
+ headers = {**headers,
+ 'openai-sentinel-turnstile-token': turnstile_token,
+ 'openai-sentinel-chat-requirements-token': response_data.get('token'),
+ 'openai-sentinel-proof-token': get_answer_token(
+ pow_conf.get('seed'), pow_conf.get('difficulty'), config
+ )}
+
json_data = {
'action': 'next',
'messages': format_conversation(messages),
'parent_message_id': str(uuid.uuid4()),
- 'model': 'auto',
+ 'model': model,
'timezone_offset_min': -120,
'suggestions': [
'Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.',
@@ -189,7 +192,7 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
'conversation_origin': None,
'client_contextual_info': {
'is_dark_mode': True,
- 'time_since_loaded': random.randint(22,33),
+ 'time_since_loaded': random.randint(22, 33),
'page_height': random.randint(600, 900),
'page_width': random.randint(500, 800),
'pixel_ratio': 2,
@@ -201,25 +204,29 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
time.sleep(2)
response = session.post('https://chatgpt.com/backend-anon/conversation',
- headers=headers, json=json_data, stream=True)
+ headers=headers, json=json_data, stream=True)
replace = ''
for line in response.iter_lines():
if line:
decoded_line = line.decode()
- print(f"Received line: {decoded_line}")
+
if decoded_line.startswith('data:'):
- json_string = decoded_line[6:]
- if json_string.strip():
+ json_string = decoded_line[6:].strip()
+
+ if json_string == '[DONE]':
+ break
+
+ if json_string:
try:
data = json.loads(json_string)
- except json.JSONDecodeError as e:
- print(f"Error decoding JSON: {e}, content: {json_string}")
+ except json.JSONDecodeError:
continue
- if data.get('message').get('author').get('role') == 'assistant':
- tokens = (data.get('message').get('content').get('parts')[0])
-
- yield tokens.replace(replace, '')
-
- replace = tokens
+ if data.get('message') and data['message'].get('author'):
+ role = data['message']['author'].get('role')
+ if role == 'assistant':
+ tokens = data['message']['content'].get('parts', [])
+ if tokens:
+ yield tokens[0].replace(replace, '')
+ replace = tokens[0]
diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py
index a060ecb1..788ffcd9 100644
--- a/g4f/Provider/ChatGptEs.py
+++ b/g4f/Provider/ChatGptEs.py
@@ -57,7 +57,7 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0]
conversation_history = [
- "Human: strictly respond in the same language as my prompt, preferably English"
+ "Human: You are a helpful AI assistant. Please respond in the same language that the user uses in their message. Provide accurate, relevant and helpful information while maintaining a friendly and professional tone. If you're not sure about something, please acknowledge that and provide the best information you can while noting any uncertainties. Focus on being helpful while respecting the user's choice of language."
]
for message in messages[:-1]:
diff --git a/g4f/Provider/ChatHub.py b/g4f/Provider/ChatHub.py
deleted file mode 100644
index 3b762687..00000000
--- a/g4f/Provider/ChatHub.py
+++ /dev/null
@@ -1,84 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-class ChatHub(AsyncGeneratorProvider, ProviderModelMixin):
- label = "ChatHub"
- url = "https://app.chathub.gg"
- api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
- working = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'meta/llama3.1-8b'
- models = [
- 'meta/llama3.1-8b',
- 'mistral/mixtral-8x7b',
- 'google/gemma-2',
- 'perplexity/sonar-online',
- ]
-
- model_aliases = {
- "llama-3.1-8b": "meta/llama3.1-8b",
- "mixtral-8x7b": "mistral/mixtral-8x7b",
- "gemma-2": "google/gemma-2",
- "sonar-online": "perplexity/sonar-online",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'origin': cls.url,
- 'referer': f"{cls.url}/chat/cloud-llama3.1-8b",
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
- 'x-app-id': 'web'
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "model": model,
- "messages": [{"role": "user", "content": prompt}],
- "tools": []
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- async for line in response.content:
- if line:
- decoded_line = line.decode('utf-8')
- if decoded_line.startswith('data:'):
- try:
- data = json.loads(decoded_line[5:])
- if data['type'] == 'text-delta':
- yield data['textDelta']
- elif data['type'] == 'done':
- break
- except json.JSONDecodeError:
- continue
diff --git a/g4f/Provider/ChatifyAI.py b/g4f/Provider/ChatifyAI.py
deleted file mode 100644
index 7e43b065..00000000
--- a/g4f/Provider/ChatifyAI.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class ChatifyAI(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://chatify-ai.vercel.app"
- api_endpoint = "https://chatify-ai.vercel.app/api/chat"
- working = True
- supports_stream = False
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'llama-3.1'
- models = [default_model]
- model_aliases = {
- "llama-3.1-8b": "llama-3.1",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases.get(model, cls.default_model)
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "messages": [{"role": "user", "content": format_prompt(messages)}]
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
-
- filtered_response = cls.filter_response(response_text)
- yield filtered_response
-
- @staticmethod
- def filter_response(response_text: str) -> str:
- parts = response_text.split('"')
-
- text_parts = parts[1::2]
-
- clean_text = ''.join(text_parts)
-
- return clean_text
diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py
index e78bbcd0..8fb37bef 100644
--- a/g4f/Provider/Cloudflare.py
+++ b/g4f/Provider/Cloudflare.py
@@ -1,15 +1,18 @@
from __future__ import annotations
+from aiohttp import ClientSession
import asyncio
import json
import uuid
import cloudscraper
from typing import AsyncGenerator
+
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Cloudflare AI"
url = "https://playground.ai.cloudflare.com"
api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
working = True
@@ -17,97 +20,43 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
- default_model = '@cf/meta/llama-3.1-8b-instruct'
- models = [
- '@cf/deepseek-ai/deepseek-math-7b-instruct', # Specific answer
-
-
- '@cf/thebloke/discolm-german-7b-v1-awq',
-
-
- '@cf/tiiuae/falcon-7b-instruct', # Specific answer
-
-
- '@hf/google/gemma-7b-it',
-
-
+ default_model = '@cf/meta/llama-3.1-8b-instruct-awq'
+ models = [
'@cf/meta/llama-2-7b-chat-fp16',
'@cf/meta/llama-2-7b-chat-int8',
'@cf/meta/llama-3-8b-instruct',
'@cf/meta/llama-3-8b-instruct-awq',
- default_model,
'@hf/meta-llama/meta-llama-3-8b-instruct',
- '@cf/meta/llama-3.1-8b-instruct-awq',
+ default_model,
'@cf/meta/llama-3.1-8b-instruct-fp8',
- '@cf/meta/llama-3.2-11b-vision-instruct',
+
'@cf/meta/llama-3.2-1b-instruct',
- '@cf/meta/llama-3.2-3b-instruct',
- '@cf/mistral/mistral-7b-instruct-v0.1',
'@hf/mistral/mistral-7b-instruct-v0.2',
- '@cf/openchat/openchat-3.5-0106',
-
- '@cf/microsoft/phi-2',
-
- '@cf/qwen/qwen1.5-0.5b-chat',
- '@cf/qwen/qwen1.5-1.8b-chat',
- '@cf/qwen/qwen1.5-14b-chat-awq',
- '@cf/qwen/qwen1.5-7b-chat-awq',
-
- '@cf/defog/sqlcoder-7b-2', # Specific answer
-
- '@cf/tinyllama/tinyllama-1.1b-chat-v1.0',
+ '@cf/qwen/qwen1.5-7b-chat-awq',
- '@cf/fblgit/una-cybertron-7b-v2-bf16',
+ '@cf/defog/sqlcoder-7b-2',
]
model_aliases = {
- "german-7b-v1": "@cf/thebloke/discolm-german-7b-v1-awq",
-
-
- "gemma-7b": "@hf/google/gemma-7b-it",
-
-
"llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
"llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
"llama-3-8b": "@cf/meta/llama-3-8b-instruct",
"llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq",
- "llama-3-8b": "@cf/meta/llama-3.1-8b-instruct",
"llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
- "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
- "llama-3.2-11b": "@cf/meta/llama-3.2-11b-vision-instruct",
"llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct",
- "llama-3.2-3b": "@cf/meta/llama-3.2-3b-instruct",
-
- "mistral-7b": "@cf/mistral/mistral-7b-instruct-v0.1",
- "mistral-7b": "@hf/mistral/mistral-7b-instruct-v0.2",
-
-
- "openchat-3.5": "@cf/openchat/openchat-3.5-0106",
-
-
- "phi-2": "@cf/microsoft/phi-2",
-
-
- "qwen-1.5-0.5b": "@cf/qwen/qwen1.5-0.5b-chat",
- "qwen-1.5-1.8b": "@cf/qwen/qwen1.5-1.8b-chat",
- "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
"qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
-
- "tinyllama-1.1b": "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
-
-
- "cybertron-7b": "@cf/fblgit/una-cybertron-7b-v2-bf16",
+ #"sqlcoder-7b": "@cf/defog/sqlcoder-7b-2",
}
@classmethod
@@ -125,8 +74,7 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- max_tokens: str = 2048,
- stream: bool = True,
+ max_tokens: int = 2048,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
@@ -154,19 +102,19 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
scraper = cloudscraper.create_scraper()
- prompt = format_prompt(messages)
data = {
"messages": [
- {"role": "system", "content": "You are a helpful assistant"},
- {"role": "user", "content": prompt}
+ {"role": "user", "content": format_prompt(messages)}
],
"lora": None,
"model": model,
"max_tokens": max_tokens,
- "stream": stream
+ "stream": True
}
max_retries = 3
+ full_response = ""
+
for attempt in range(max_retries):
try:
response = scraper.post(
@@ -181,32 +129,22 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
if response.status_code == 403:
await asyncio.sleep(2 ** attempt)
continue
-
+
response.raise_for_status()
for line in response.iter_lines():
if line.startswith(b'data: '):
if line == b'data: [DONE]':
+ if full_response:
+ yield full_response
break
try:
- content = json.loads(line[6:].decode('utf-8'))['response']
- yield content
+ content = json.loads(line[6:].decode('utf-8'))
+ if 'response' in content and content['response'] != '</s>':
+ yield content['response']
except Exception:
continue
break
except Exception as e:
if attempt == max_retries - 1:
raise
-
- @classmethod
- async def create_async(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> str:
- full_response = ""
- async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
- full_response += response
- return full_response
diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py
index 6ffb615e..06e2bd55 100644
--- a/g4f/Provider/DarkAI.py
+++ b/g4f/Provider/DarkAI.py
@@ -9,19 +9,19 @@ from .helper import format_prompt
class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.aiuncensored.info"
+ url = "https://darkai.foundation/chat"
api_endpoint = "https://darkai.foundation/chat"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
- default_model = 'gpt-4o'
+ default_model = 'llama-3-405b'
models = [
- default_model, # Uncensored
+ 'gpt-4o', # Uncensored
'gpt-3.5-turbo', # Uncensored
'llama-3-70b', # Uncensored
- 'llama-3-405b',
+ default_model,
]
model_aliases = {
@@ -51,8 +51,6 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
headers = {
"accept": "text/event-stream",
"content-type": "application/json",
- "origin": "https://www.aiuncensored.info",
- "referer": "https://www.aiuncensored.info/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
@@ -77,9 +75,9 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
yield full_text.strip()
return
except json.JSONDecodeError:
- print(f"Failed to decode JSON: {chunk_str}")
- except Exception as e:
- print(f"Error processing chunk: {e}")
+ pass
+ except Exception:
+ pass
if full_text:
yield full_text.strip()
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
index b8cc6ab8..5c668599 100644
--- a/g4f/Provider/DeepInfraChat.py
+++ b/g4f/Provider/DeepInfraChat.py
@@ -6,7 +6,6 @@ import json
from ..typing import AsyncResult, Messages, ImageType
from ..image import to_data_uri
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
@@ -17,42 +16,18 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
- default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct'
+ default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
models = [
- 'meta-llama/Meta-Llama-3.1-405B-Instruct',
- 'meta-llama/Meta-Llama-3.1-70B-Instruct',
'meta-llama/Meta-Llama-3.1-8B-Instruct',
- 'mistralai/Mixtral-8x22B-Instruct-v0.1',
- 'mistralai/Mixtral-8x7B-Instruct-v0.1',
+ default_model,
'microsoft/WizardLM-2-8x22B',
- 'microsoft/WizardLM-2-7B',
- 'Qwen/Qwen2-72B-Instruct',
- 'microsoft/Phi-3-medium-4k-instruct',
- 'google/gemma-2-27b-it',
- 'openbmb/MiniCPM-Llama3-V-2_5', # Image upload is available
- 'mistralai/Mistral-7B-Instruct-v0.3',
- 'lizpreciatior/lzlv_70b_fp16_hf',
- 'openchat/openchat-3.6-8b',
- 'Phind/Phind-CodeLlama-34B-v2',
- 'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
+ 'Qwen/Qwen2.5-72B-Instruct',
]
model_aliases = {
- "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct",
- "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
- "llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
- "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
- "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
+ "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
- "wizardlm-2-7b": "microsoft/WizardLM-2-7B",
- "qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
- "phi-3-medium-4k": "microsoft/Phi-3-medium-4k-instruct",
- "gemma-2b-27b": "google/gemma-2-27b-it",
- "minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5", # Image upload is available
- "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
- "lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
- "openchat-3.6-8b": "openchat/openchat-3.6-8b",
- "phind-codellama-34b-v2": "Phind/Phind-CodeLlama-34B-v2",
- "dolphin-2.9.1-llama-3-70b": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
+ "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
}
@@ -97,30 +72,12 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
}
async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
data = {
'model': model,
- 'messages': [
- {'role': 'system', 'content': 'Be a helpful assistant'},
- {'role': 'user', 'content': prompt}
- ],
+ 'messages': messages,
'stream': True
}
- if model == 'openbmb/MiniCPM-Llama3-V-2_5' and image is not None:
- data['messages'][-1]['content'] = [
- {
- 'type': 'image_url',
- 'image_url': {
- 'url': to_data_uri(image)
- }
- },
- {
- 'type': 'text',
- 'text': messages[-1]['content']
- }
- ]
-
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
diff --git a/g4f/Provider/Editee.py b/g4f/Provider/Editee.py
deleted file mode 100644
index 8ac2324a..00000000
--- a/g4f/Provider/Editee.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class Editee(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Editee"
- url = "https://editee.com"
- api_endpoint = "https://editee.com/submit/chatgptfree"
- working = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'claude'
- models = ['claude', 'gpt4', 'gemini' 'mistrallarge']
-
- model_aliases = {
- "claude-3.5-sonnet": "claude",
- "gpt-4o": "gpt4",
- "gemini-pro": "gemini",
- "mistral-large": "mistrallarge",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "Accept": "application/json, text/plain, */*",
- "Accept-Language": "en-US,en;q=0.9",
- "Cache-Control": "no-cache",
- "Content-Type": "application/json",
- "Origin": cls.url,
- "Pragma": "no-cache",
- "Priority": "u=1, i",
- "Referer": f"{cls.url}/chat-gpt",
- "Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"',
- "Sec-CH-UA-Mobile": '?0',
- "Sec-CH-UA-Platform": '"Linux"',
- "Sec-Fetch-Dest": 'empty',
- "Sec-Fetch-Mode": 'cors',
- "Sec-Fetch-Site": 'same-origin',
- "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
- "X-Requested-With": 'XMLHttpRequest',
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "user_input": prompt,
- "context": " ",
- "template_id": "",
- "selected_model": model
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_data = await response.json()
- yield response_data['text']
diff --git a/g4f/Provider/Free2GPT.py b/g4f/Provider/Free2GPT.py
index a79bd1da..6ba9ac0f 100644
--- a/g4f/Provider/Free2GPT.py
+++ b/g4f/Provider/Free2GPT.py
@@ -16,7 +16,7 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat10.free2gpt.xyz"
working = True
supports_message_history = True
- default_model = 'llama-3.1-70b'
+ default_model = 'mistral-7b'
@classmethod
async def create_async_generator(
@@ -49,12 +49,8 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
- system_message = {
- "role": "system",
- "content": ""
- }
data = {
- "messages": [system_message] + messages,
+ "messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
deleted file mode 100644
index a9dc0f56..00000000
--- a/g4f/Provider/FreeChatgpt.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from __future__ import annotations
-import json
-from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://chat.chatgpt.org.uk"
- api_endpoint = "/api/openai/v1/chat/completions"
- working = True
- default_model = '@cf/qwen/qwen1.5-14b-chat-awq'
- models = [
- '@cf/qwen/qwen1.5-14b-chat-awq',
- 'SparkDesk-v1.1',
- 'Qwen2-7B-Instruct',
- 'glm4-9B-chat',
- 'chatglm3-6B',
- 'Yi-1.5-9B-Chat',
- ]
-
- model_aliases = {
- "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
- "sparkdesk-v1.1": "SparkDesk-v1.1",
- "qwen-2-7b": "Qwen2-7B-Instruct",
- "glm-4-9b": "glm4-9B-chat",
- "glm-3-6b": "chatglm3-6B",
- "yi-1.5-9b": "Yi-1.5-9B-Chat",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model.lower() in cls.model_aliases:
- return cls.model_aliases[model.lower()]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "application/json, text/event-stream",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json",
- "dnt": "1",
- "origin": cls.url,
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
- }
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "messages": [
- {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"},
- {"role": "user", "content": prompt}
- ],
- "stream": True,
- "model": model,
- "temperature": 0.5,
- "presence_penalty": 0,
- "frequency_penalty": 0,
- "top_p": 1
- }
- async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
- response.raise_for_status()
- accumulated_text = ""
- async for line in response.content:
- if line:
- line_str = line.decode().strip()
- if line_str == "data: [DONE]":
- yield accumulated_text
- break
- elif line_str.startswith("data: "):
- try:
- chunk = json.loads(line_str[6:])
- delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
- accumulated_text += delta_content
- yield delta_content # Yield each chunk of content
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 82a3824b..b38ff428 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -24,7 +24,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
- default_model = 'llama-3.1-70b'
+ default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py
index 127edc9e..f00b344e 100644
--- a/g4f/Provider/GizAI.py
+++ b/g4f/Provider/GizAI.py
@@ -1,62 +1,24 @@
from __future__ import annotations
-import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from ..image import ImageResponse
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
+
class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://app.giz.ai/assistant/"
+ url = "https://app.giz.ai/assistant"
api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
working = True
-
+ supports_stream = False
supports_system_message = True
supports_message_history = True
- # Chat models
default_model = 'chat-gemini-flash'
- chat_models = [
- default_model,
- 'chat-gemini-pro',
- 'chat-gpt4m',
- 'chat-gpt4',
- 'claude-sonnet',
- 'claude-haiku',
- 'llama-3-70b',
- 'llama-3-8b',
- 'mistral-large',
- 'chat-o1-mini'
- ]
-
- # Image models
- image_models = [
- 'flux1',
- 'sdxl',
- 'sd',
- 'sd35',
- ]
-
- models = [*chat_models, *image_models]
+ models = [default_model]
- model_aliases = {
- # Chat model aliases
- "gemini-flash": "chat-gemini-flash",
- "gemini-pro": "chat-gemini-pro",
- "gpt-4o-mini": "chat-gpt4m",
- "gpt-4o": "chat-gpt4",
- "claude-3.5-sonnet": "claude-sonnet",
- "claude-3-haiku": "claude-haiku",
- "llama-3.1-70b": "llama-3-70b",
- "llama-3.1-8b": "llama-3-8b",
- "o1-mini": "chat-o1-mini",
- # Image model aliases
- "sd-1.5": "sd",
- "sd-3.5": "sd35",
- "flux-schnell": "flux1",
- }
+ model_aliases = {"gemini-flash": "chat-gemini-flash",}
@classmethod
def get_model(cls, model: str) -> str:
@@ -68,10 +30,6 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model
@classmethod
- def is_image_model(cls, model: str) -> bool:
- return model in cls.image_models
-
- @classmethod
async def create_async_generator(
cls,
model: str,
@@ -87,6 +45,7 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'application/json',
+ 'DNT': '1',
'Origin': 'https://app.giz.ai',
'Pragma': 'no-cache',
'Sec-Fetch-Dest': 'empty',
@@ -97,55 +56,21 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"'
}
-
- async with ClientSession() as session:
- if cls.is_image_model(model):
- # Image generation
- prompt = messages[-1]["content"]
- data = {
- "model": model,
- "input": {
- "width": "1024",
- "height": "1024",
- "steps": 4,
- "output_format": "webp",
- "batch_size": 1,
- "mode": "plan",
- "prompt": prompt
- }
- }
- async with session.post(
- cls.api_endpoint,
- headers=headers,
- data=json.dumps(data),
- proxy=proxy
- ) as response:
- response.raise_for_status()
- response_data = await response.json()
- if response_data.get('status') == 'completed' and response_data.get('output'):
- for url in response_data['output']:
- yield ImageResponse(images=url, alt="Generated Image")
- else:
- # Chat completion
- data = {
- "model": model,
- "input": {
- "messages": [
- {
- "type": "human",
- "content": format_prompt(messages)
- }
- ],
- "mode": "plan"
- },
- "noStream": True
- }
- async with session.post(
- cls.api_endpoint,
- headers=headers,
- data=json.dumps(data),
- proxy=proxy
- ) as response:
- response.raise_for_status()
+
+ prompt = format_prompt(messages)
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": model,
+ "input": {
+ "messages": [{"type": "human", "content": prompt}],
+ "mode": "plan"
+ },
+ "noStream": True
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ if response.status == 201:
result = await response.json()
- yield result.get('output', '')
+ yield result['output'].strip()
+ else:
+ raise Exception(f"Unexpected response status: {response.status}")
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 7ebbf570..d4a4b497 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -19,6 +19,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/Qwen2.5-72B-Instruct',
'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
+ 'Qwen/Qwen2.5-Coder-32B-Instruct',
'meta-llama/Llama-3.2-11B-Vision-Instruct',
'NousResearch/Hermes-3-Llama-3.1-8B',
'mistralai/Mistral-Nemo-Instruct-2407',
@@ -30,6 +31,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
"nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
+ "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct",
"llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
"mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
@@ -83,12 +85,33 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
raise RuntimeError(f"Request failed with status code: {response.status_code}, response: {response.text}")
conversationId = response.json().get('conversationId')
+
+ # Get the data response and parse it properly
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11')
+
+ # Split the response content by newlines and parse each line as JSON
+ try:
+ json_data = None
+ for line in response.text.split('\n'):
+ if line.strip():
+ try:
+ parsed = json.loads(line)
+ if isinstance(parsed, dict) and "nodes" in parsed:
+ json_data = parsed
+ break
+ except json.JSONDecodeError:
+ continue
+
+ if not json_data:
+ raise RuntimeError("Failed to parse response data")
+
+ data: list = json_data["nodes"][1]["data"]
+ keys: list[int] = data[data[0]["messages"]]
+ message_keys: dict = data[keys[0]]
+ messageId: str = data[message_keys["id"]]
- data: list = response.json()["nodes"][1]["data"]
- keys: list[int] = data[data[0]["messages"]]
- message_keys: dict = data[keys[0]]
- messageId: str = data[message_keys["id"]]
+ except (KeyError, IndexError, TypeError) as e:
+ raise RuntimeError(f"Failed to extract message ID: {str(e)}")
settings = {
"inputs": format_prompt(messages),
@@ -120,7 +143,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'data': (None, json.dumps(settings, separators=(',', ':'))),
}
- response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}',
+ response = requests.post(
+ f'https://huggingface.co/chat/conversation/{conversationId}',
cookies=session.cookies,
headers=headers,
files=files,
@@ -142,10 +166,18 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
elif line["type"] == "stream":
token = line["token"].replace('\u0000', '')
full_response += token
+ if stream:
+ yield token
elif line["type"] == "finalAnswer":
break
full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip()
- yield full_response
+ if not stream:
+ yield full_response
+
+ @classmethod
+ def supports_model(cls, model: str) -> bool:
+ """Check if the model is supported by the provider."""
+ return model in cls.models or model in cls.model_aliases
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 56f765de..7ccfa877 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -9,15 +9,6 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5-Turbo",
- "model": "ChatGPT",
- "provider": "OpenAI",
- "maxLength": 48000,
- "tokenLimit": 14000,
- "context": "16K",
- },
"gpt-4o-mini-free": {
"id": "gpt-4o-mini-free",
"name": "GPT-4o-Mini-Free",
@@ -63,6 +54,15 @@ models = {
"tokenLimit": 126000,
"context": "128K",
},
+ "grok-beta": {
+ "id": "grok-beta",
+ "name": "Grok-Beta",
+ "model": "Grok",
+ "provider": "x.ai",
+ "maxLength": 400000,
+ "tokenLimit": 100000,
+ "context": "100K",
+ },
"grok-2": {
"id": "grok-2",
"name": "Grok-2",
@@ -99,18 +99,18 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
- "claude-3-opus-20240229-gcp": {
- "id": "claude-3-opus-20240229-gcp",
- "name": "Claude-3-Opus-Gcp",
+ "claude-3-5-sonnet-20240620": {
+ "id": "claude-3-5-sonnet-20240620",
+ "name": "Claude-3.5-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
- "claude-3-5-sonnet-20240620": {
- "id": "claude-3-5-sonnet-20240620",
- "name": "Claude-3.5-Sonnet",
+ "claude-3-5-sonnet-20241022": {
+ "id": "claude-3-5-sonnet-20241022",
+ "name": "Claude-3.5-Sonnet-V2",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
@@ -170,7 +170,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
- default_model = "gpt-3.5-turbo"
+ default_model = "gpt-4o-2024-08-06"
models = list(models.keys())
model_aliases = {
@@ -183,9 +183,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
- "claude-3-opus": "claude-3-opus-20240229-gcp",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-2.1": "claude-2.1",
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index b776e96a..b3119cb6 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -21,6 +21,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"llama-3.1-sonar-small-128k-chat",
"llama-3.1-8b-instruct",
"llama-3.1-70b-instruct",
+ "/models/LiquidCloud",
]
model_aliases = {
@@ -30,6 +31,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"sonar-chat": "llama-3.1-sonar-small-128k-chat",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"llama-3.1-70b": "llama-3.1-70b-instruct",
+ "lfm-40b": "/models/LiquidCloud",
}
@classmethod
diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py
index 543a8b19..fcebf7e3 100644
--- a/g4f/Provider/Prodia.py
+++ b/g4f/Provider/Prodia.py
@@ -98,6 +98,12 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ negative_prompt: str = "",
+ steps: str = 20, # 1-25
+ cfg: str = 7, # 0-20
+ seed: str = "-1",
+ sampler: str = "DPM++ 2M Karras", # "Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM"
+ aspect_ratio: str = "square", # "square", "portrait", "landscape"
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
@@ -117,12 +123,12 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
"new": "true",
"prompt": prompt,
"model": model,
- "negative_prompt": kwargs.get("negative_prompt", ""),
- "steps": kwargs.get("steps", 20),
- "cfg": kwargs.get("cfg", 7),
- "seed": kwargs.get("seed", int(time.time())),
- "sampler": kwargs.get("sampler", "DPM++ 2M Karras"),
- "aspect_ratio": kwargs.get("aspect_ratio", "square")
+ "negative_prompt": negative_prompt,
+ "steps": steps,
+ "cfg": cfg,
+ "seed": seed,
+ "sampler": sampler,
+ "aspect_ratio": aspect_ratio
}
async with session.get(cls.api_endpoint, params=params, proxy=proxy) as response:
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
index 7f443a7d..a7fc9b54 100644
--- a/g4f/Provider/ReplicateHome.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -17,7 +17,13 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
- default_model = 'meta/meta-llama-3-70b-instruct'
+ default_model = 'yorickvp/llava-13b'
+
+ image_models = [
+ 'stability-ai/stable-diffusion-3',
+ 'bytedance/sdxl-lightning-4step',
+ 'playgroundai/playground-v2.5-1024px-aesthetic',
+ ]
text_models = [
'meta/meta-llama-3-70b-instruct',
@@ -26,35 +32,31 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
'yorickvp/llava-13b',
]
- image_models = [
- 'black-forest-labs/flux-schnell',
- 'stability-ai/stable-diffusion-3',
- 'bytedance/sdxl-lightning-4step',
- 'playgroundai/playground-v2.5-1024px-aesthetic',
- ]
+
models = text_models + image_models
model_aliases = {
- "flux-schnell": "black-forest-labs/flux-schnell",
+ # image_models
"sd-3": "stability-ai/stable-diffusion-3",
"sdxl": "bytedance/sdxl-lightning-4step",
"playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic",
- "llama-3-70b": "meta/meta-llama-3-70b-instruct",
- "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1",
+
+ # text_models
"gemma-2b": "google-deepmind/gemma-2b-it",
"llava-13b": "yorickvp/llava-13b",
}
model_versions = {
- "meta/meta-llama-3-70b-instruct": "fbfb20b472b2f3bdd101412a9f70a0ed4fc0ced78a77ff00970ee7a2383c575d",
- "mistralai/mixtral-8x7b-instruct-v0.1": "5d78bcd7a992c4b793465bcdcf551dc2ab9668d12bb7aa714557a21c1e77041c",
- "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
- "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
- 'black-forest-labs/flux-schnell': "f2ab8a5bfe79f02f0789a146cf5e73d2a4ff2684a98c2b303d1e1ff3814271db",
+ # image_models
'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f",
'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f",
'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
+
+ # text_models
+ "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
+ "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
+
}
@classmethod
diff --git a/g4f/Provider/TeachAnything.py b/g4f/Provider/TeachAnything.py
index 3d34293f..97fe0272 100644
--- a/g4f/Provider/TeachAnything.py
+++ b/g4f/Provider/TeachAnything.py
@@ -14,6 +14,17 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
api_endpoint = "/api/generate"
working = True
default_model = "llama-3.1-70b"
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
@classmethod
async def create_async_generator(
@@ -24,6 +35,7 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs: Any
) -> AsyncResult:
headers = cls._get_headers()
+ model = cls.get_model(model)
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
@@ -61,16 +73,18 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
return {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
"content-type": "application/json",
"dnt": "1",
"origin": "https://www.teach-anything.com",
+ "pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://www.teach-anything.com/",
- "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-us": '"Not?A_Brand";v="99", "Chromium";v="130"',
+ "sec-ch-us-mobile": "?0",
+ "sec-ch-us-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}
diff --git a/g4f/Provider/Upstage.py b/g4f/Provider/Upstage.py
index 65409159..81234ed9 100644
--- a/g4f/Provider/Upstage.py
+++ b/g4f/Provider/Upstage.py
@@ -41,35 +41,51 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
+
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
"content-type": "application/json",
+ "dnt": "1",
"origin": "https://console.upstage.ai",
+ "pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://console.upstage.ai/",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}
+
async with ClientSession(headers=headers) as session:
data = {
"stream": True,
"messages": [{"role": "user", "content": format_prompt(messages)}],
"model": model
}
+
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
+
+ response_text = ""
+
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
+
if line.startswith("data: ") and line != "data: [DONE]":
- data = json.loads(line[6:])
- content = data['choices'][0]['delta'].get('content', '')
- if content:
- yield content
+ try:
+ data = json.loads(line[6:])
+ content = data['choices'][0]['delta'].get('content', '')
+ if content:
+ response_text += content
+ yield content
+ except json.JSONDecodeError:
+ continue
+
+ if line == "data: [DONE]":
+ break
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index a313a9b3..dcf9c352 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -8,67 +8,35 @@ from ..providers.create_images import CreateImagesProvider
from .deprecated import *
from .selenium import *
from .needs_auth import *
+from .not_working import *
+from .local import *
-from .gigachat import *
-from .nexra import *
-
-from .Ai4Chat import Ai4Chat
-from .AI365VIP import AI365VIP
-from .AIChatFree import AIChatFree
from .AIUncensored import AIUncensored
from .Allyfy import Allyfy
-from .AmigoChat import AmigoChat
-from .AiChatOnline import AiChatOnline
-from .AiChats import AiChats
-from .AiMathGPT import AiMathGPT
from .Airforce import Airforce
-from .Aura import Aura
from .Bing import Bing
-from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
-from .ChatGot import ChatGot
from .ChatGpt import ChatGpt
-from .Chatgpt4Online import Chatgpt4Online
-from .Chatgpt4o import Chatgpt4o
from .ChatGptEs import ChatGptEs
-from .ChatgptFree import ChatgptFree
-from .ChatHub import ChatHub
-from .ChatifyAI import ChatifyAI
from .Cloudflare import Cloudflare
from .DarkAI import DarkAI
from .DDG import DDG
-from .DeepInfra import DeepInfra
from .DeepInfraChat import DeepInfraChat
-from .DeepInfraImage import DeepInfraImage
-from .Editee import Editee
-from .FlowGpt import FlowGpt
from .Free2GPT import Free2GPT
-from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
-from .FreeNetfly import FreeNetfly
-from .GeminiPro import GeminiPro
from .GizAI import GizAI
-from .GPROChat import GPROChat
from .HuggingChat import HuggingChat
-from .HuggingFace import HuggingFace
-from .Koala import Koala
from .Liaobots import Liaobots
-from .Local import Local
from .MagickPen import MagickPen
-from .MetaAI import MetaAI
-#from .MetaAIAccount import MetaAIAccount
-from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .Prodia import Prodia
from .Reka import Reka
-from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .RubiksAI import RubiksAI
from .TeachAnything import TeachAnything
from .Upstage import Upstage
-from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
from .Mhystical import Mhystical
diff --git a/g4f/Provider/airforce/AirforceChat.py b/g4f/Provider/airforce/AirforceChat.py
new file mode 100644
index 00000000..cec911a3
--- /dev/null
+++ b/g4f/Provider/airforce/AirforceChat.py
@@ -0,0 +1,172 @@
+from __future__ import annotations
+import re
+import json
+import requests
+from aiohttp import ClientSession
+from typing import List
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+# Helper function to clean the response
+def clean_response(text: str) -> str:
+ """Clean response from unwanted patterns."""
+ patterns = [
+ r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
+ r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
+ r"Rate limit \(\d+\/hour\) exceeded\. Join our discord for more: https:\/\/discord\.com\/invite\/\S+",
+ r"</s>", # zephyr-7b-beta
+ r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", # Matches [ERROR] 'UUID'
+ ]
+ for pattern in patterns:
+ text = re.sub(pattern, '', text)
+
+ # Remove the <|im_end|> token if present
+ text = text.replace("<|im_end|>", "").strip()
+
+ return text
+
+def split_message(message: str, max_length: int = 1000) -> List[str]:
+ """Splits the message into chunks of a given length (max_length)"""
+ # Split the message into smaller chunks to avoid exceeding the limit
+ chunks = []
+ while len(message) > max_length:
+ # Find the last space or punctuation before max_length to avoid cutting words
+ split_point = message.rfind(' ', 0, max_length)
+ if split_point == -1: # No space found, split at max_length
+ split_point = max_length
+ chunks.append(message[:split_point])
+ message = message[split_point:].strip()
+ if message:
+ chunks.append(message) # Append the remaining part of the message
+ return chunks
+
+class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "AirForce Chat"
+ api_endpoint = "https://api.airforce/chat/completions"
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3.1-70b-chat'
+ response = requests.get('https://api.airforce/models')
+ data = response.json()
+
+ text_models = [model['id'] for model in data['data']]
+ models = [*text_models]
+
+ model_aliases = {
+ # openchat
+ "openchat-3.5": "openchat-3.5-0106",
+
+ # deepseek-ai
+ "deepseek-coder": "deepseek-coder-6.7b-instruct",
+
+ # NousResearch
+ "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+ "hermes-2-pro": "hermes-2-pro-mistral-7b",
+
+ # teknium
+ "openhermes-2.5": "openhermes-2.5-mistral-7b",
+
+ # liquid
+ "lfm-40b": "lfm-40b-moe",
+
+ # DiscoResearch
+ "german-7b": "discolm-german-7b-v1",
+
+ # meta-llama
+ "llama-2-7b": "llama-2-7b-chat-int8",
+ "llama-2-7b": "llama-2-7b-chat-fp16",
+ "llama-3.1-70b": "llama-3.1-70b-chat",
+ "llama-3.1-8b": "llama-3.1-8b-chat",
+ "llama-3.1-70b": "llama-3.1-70b-turbo",
+ "llama-3.1-8b": "llama-3.1-8b-turbo",
+
+ # inferless
+ "neural-7b": "neural-chat-7b-v3-1",
+
+ # HuggingFaceH4
+ "zephyr-7b": "zephyr-7b-beta",
+
+ # llmplayground.net
+ #"any-uncensored": "any-uncensored",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ max_tokens: str = 4096,
+ temperature: str = 1,
+ top_p: str = 1,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'authorization': 'Bearer missing api key',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://llmplayground.net',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://llmplayground.net/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ # Format the messages for the API
+ formatted_messages = format_prompt(messages)
+ message_chunks = split_message(formatted_messages)
+
+ full_response = ""
+ for chunk in message_chunks:
+ data = {
+ "messages": [{"role": "user", "content": chunk}],
+ "model": model,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stream": stream
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ text = ""
+ if stream:
+ async for line in response.content:
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ json_str = line[6:]
+ try:
+ if json_str and json_str != "[DONE]":
+ chunk = json.loads(json_str)
+ if 'choices' in chunk and chunk['choices']:
+ content = chunk['choices'][0].get('delta', {}).get('content', '')
+ text += content
+ except json.JSONDecodeError as e:
+ print(f"Error decoding JSON: {json_str}, Error: {e}")
+ elif line == "[DONE]":
+ break
+ full_response += clean_response(text)
+ else:
+ response_json = await response.json()
+ text = response_json["choices"][0]["message"]["content"]
+ full_response += clean_response(text)
+
+ # Return the complete response after all chunks
+ yield full_response
diff --git a/g4f/Provider/airforce/AirforceImage.py b/g4f/Provider/airforce/AirforceImage.py
new file mode 100644
index 00000000..b74bc364
--- /dev/null
+++ b/g4f/Provider/airforce/AirforceImage.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from urllib.parse import urlencode
+import random
+import requests
+
+from ...typing import AsyncResult, Messages
+from ...image import ImageResponse
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class AirforceImage(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Airforce Image"
+ #url = "https://api.airforce"
+ api_endpoint = "https://api.airforce/imagine2"
+ #working = True
+
+ default_model = 'flux'
+
+ response = requests.get('https://api.airforce/imagine/models')
+ data = response.json()
+
+ image_models = data
+
+ models = [*image_models, "stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"]
+
+ model_aliases = {
+ "sdxl": "stable-diffusion-xl-base",
+ "sdxl": "stable-diffusion-xl-lightning",
+ "flux-pro": "Flux-1.1-Pro",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ size: str = '1:1', # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1"
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'dnt': '1',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://llmplayground.net/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'image',
+ 'sec-fetch-mode': 'no-cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ seed = random.randint(0, 58463)
+ params = {
+ 'model': model,
+ 'prompt': messages[-1]["content"],
+ 'size': size,
+ 'seed': seed
+ }
+ full_url = f"{cls.api_endpoint}?{urlencode(params)}"
+
+ async with session.get(full_url, headers=headers, proxy=proxy) as response:
+ if response.status == 200 and response.headers.get('content-type', '').startswith('image'):
+ yield ImageResponse(images=[full_url], alt="Generated Image")
+ else:
+ raise Exception(f"Error: status {response.status}, content type {response.headers.get('content-type')}")
diff --git a/g4f/Provider/airforce/__init__.py b/g4f/Provider/airforce/__init__.py
new file mode 100644
index 00000000..5ffa6d31
--- /dev/null
+++ b/g4f/Provider/airforce/__init__.py
@@ -0,0 +1,2 @@
+from .AirforceChat import AirforceChat
+from .AirforceImage import AirforceImage
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index bf923f2a..368a71a0 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -25,11 +25,10 @@ from .Aichat import Aichat
from .Berlin import Berlin
from .Phind import Phind
from .AiAsk import AiAsk
-from ..AiChatOnline import AiChatOnline
from .ChatAnywhere import ChatAnywhere
from .FakeGpt import FakeGpt
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
from .Ylokh import Ylokh
-from .OpenAssistant import OpenAssistant \ No newline at end of file
+from .OpenAssistant import OpenAssistant
diff --git a/g4f/Provider/Local.py b/g4f/Provider/local/Local.py
index 471231c6..4dc6e3f9 100644
--- a/g4f/Provider/Local.py
+++ b/g4f/Provider/local/Local.py
@@ -1,15 +1,15 @@
from __future__ import annotations
-from ..locals.models import get_models
+from ...locals.models import get_models
try:
- from ..locals.provider import LocalProvider
+ from ...locals.provider import LocalProvider
has_requirements = True
except ImportError:
has_requirements = False
-from ..typing import Messages, CreateResult
-from ..providers.base_provider import AbstractProvider, ProviderModelMixin
-from ..errors import MissingRequirementsError
+from ...typing import Messages, CreateResult
+from ...providers.base_provider import AbstractProvider, ProviderModelMixin
+from ...errors import MissingRequirementsError
class Local(AbstractProvider, ProviderModelMixin):
label = "GPT4All"
@@ -40,4 +40,4 @@ class Local(AbstractProvider, ProviderModelMixin):
messages,
stream,
**kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/local/Ollama.py
index f9116541..de68a218 100644
--- a/g4f/Provider/Ollama.py
+++ b/g4f/Provider/local/Ollama.py
@@ -3,10 +3,10 @@ from __future__ import annotations
import requests
import os
-from .needs_auth.Openai import Openai
-from ..typing import AsyncResult, Messages
+from ..needs_auth.OpenaiAPI import OpenaiAPI
+from ...typing import AsyncResult, Messages
-class Ollama(Openai):
+class Ollama(OpenaiAPI):
label = "Ollama"
url = "https://ollama.com"
needs_auth = False
@@ -37,4 +37,4 @@ class Ollama(Openai):
api_base: str = f"http://{host}:{port}/v1"
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/local/__init__.py b/g4f/Provider/local/__init__.py
new file mode 100644
index 00000000..05f6022e
--- /dev/null
+++ b/g4f/Provider/local/__init__.py
@@ -0,0 +1,2 @@
+from .Local import Local
+from .Ollama import Ollama
diff --git a/g4f/Provider/BingCreateImages.py b/g4f/Provider/needs_auth/BingCreateImages.py
index 7a206c8f..80984d40 100644
--- a/g4f/Provider/BingCreateImages.py
+++ b/g4f/Provider/needs_auth/BingCreateImages.py
@@ -1,11 +1,11 @@
from __future__ import annotations
-from ..cookies import get_cookies
-from ..image import ImageResponse
-from ..errors import MissingAuthError
-from ..typing import AsyncResult, Messages, Cookies
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .bing.create_images import create_images, create_session
+from ...cookies import get_cookies
+from ...image import ImageResponse
+from ...errors import MissingAuthError
+from ...typing import AsyncResult, Messages, Cookies
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..bing.create_images import create_images, create_session
class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
label = "Microsoft Designer in Bing"
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py
index b12fb254..35e7ca7f 100644
--- a/g4f/Provider/DeepInfra.py
+++ b/g4f/Provider/needs_auth/DeepInfra.py
@@ -1,10 +1,10 @@
from __future__ import annotations
import requests
-from ..typing import AsyncResult, Messages
-from .needs_auth.Openai import Openai
+from ...typing import AsyncResult, Messages
+from .OpenaiAPI import OpenaiAPI
-class DeepInfra(Openai):
+class DeepInfra(OpenaiAPI):
label = "DeepInfra"
url = "https://deepinfra.com"
working = True
@@ -55,4 +55,4 @@ class DeepInfra(Openai):
max_tokens=max_tokens,
headers=headers,
**kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py
index cee608ce..2310c1c8 100644
--- a/g4f/Provider/DeepInfraImage.py
+++ b/g4f/Provider/needs_auth/DeepInfraImage.py
@@ -2,10 +2,10 @@ from __future__ import annotations
import requests
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..typing import AsyncResult, Messages
-from ..requests import StreamSession, raise_for_status
-from ..image import ImageResponse
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession, raise_for_status
+from ...image import ImageResponse
class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com"
diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py
index e7174c59..7e52a194 100644
--- a/g4f/Provider/GeminiPro.py
+++ b/g4f/Provider/needs_auth/GeminiPro.py
@@ -4,11 +4,11 @@ import base64
import json
from aiohttp import ClientSession, BaseConnector
-from ..typing import AsyncResult, Messages, ImageType
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import to_bytes, is_accepted_format
-from ..errors import MissingAuthError
-from .helper import get_connector
+from ...typing import AsyncResult, Messages, ImageType
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import to_bytes, is_accepted_format
+from ...errors import MissingAuthError
+from ..helper import get_connector
class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
label = "Gemini API"
diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py
index 027d98bf..943fc81a 100644
--- a/g4f/Provider/needs_auth/Groq.py
+++ b/g4f/Provider/needs_auth/Groq.py
@@ -1,9 +1,9 @@
from __future__ import annotations
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages
-class Groq(Openai):
+class Groq(OpenaiAPI):
label = "Groq"
url = "https://console.groq.com/playground"
working = True
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py
index 586e5f5f..ecc75d1c 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/needs_auth/HuggingFace.py
@@ -3,13 +3,13 @@ from __future__ import annotations
import json
from aiohttp import ClientSession, BaseConnector
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_connector
-from ..errors import RateLimitError, ModelNotFoundError
-from ..requests.raise_for_status import raise_for_status
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_connector
+from ...errors import RateLimitError, ModelNotFoundError
+from ...requests.raise_for_status import raise_for_status
-from .HuggingChat import HuggingChat
+from ..HuggingChat import HuggingChat
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py
index 218b7ebb..4b730abd 100644
--- a/g4f/Provider/MetaAI.py
+++ b/g4f/Provider/needs_auth/MetaAI.py
@@ -8,12 +8,12 @@ from typing import Dict, List
from aiohttp import ClientSession, BaseConnector
-from ..typing import AsyncResult, Messages, Cookies
-from ..requests import raise_for_status, DEFAULT_HEADERS
-from ..image import ImageResponse, ImagePreview
-from ..errors import ResponseError
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt, get_connector, format_cookies
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests import raise_for_status, DEFAULT_HEADERS
+from ...image import ImageResponse, ImagePreview
+from ...errors import ResponseError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, get_connector, format_cookies
class Sources():
def __init__(self, link_list: List[Dict[str, str]]) -> None:
diff --git a/g4f/Provider/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py
index 369b3f2f..2d54f3e0 100644
--- a/g4f/Provider/MetaAIAccount.py
+++ b/g4f/Provider/needs_auth/MetaAIAccount.py
@@ -1,8 +1,8 @@
from __future__ import annotations
-from ..typing import AsyncResult, Messages, Cookies
-from .helper import format_prompt, get_cookies
-from .MetaAI import MetaAI
+from ...typing import AsyncResult, Messages, Cookies
+from ..helper import format_prompt, get_cookies
+from ..MetaAI import MetaAI
class MetaAIAccount(MetaAI):
needs_auth = True
@@ -20,4 +20,4 @@ class MetaAIAccount(MetaAI):
) -> AsyncResult:
cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies
async for chunk in cls(proxy).prompt(format_prompt(messages), cookies):
- yield chunk \ No newline at end of file
+ yield chunk
diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py
deleted file mode 100644
index 5e0bf336..00000000
--- a/g4f/Provider/needs_auth/OpenRouter.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from __future__ import annotations
-
-import requests
-
-from .Openai import Openai
-from ...typing import AsyncResult, Messages
-
-class OpenRouter(Openai):
- label = "OpenRouter"
- url = "https://openrouter.ai"
- working = False
- default_model = "mistralai/mistral-7b-instruct:free"
-
- @classmethod
- def get_models(cls):
- if not cls.models:
- url = 'https://openrouter.ai/api/v1/models'
- models = requests.get(url).json()["data"]
- cls.models = [model['id'] for model in models]
- return cls.models
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = "https://openrouter.ai/api/v1",
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, **kwargs
- )
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/OpenaiAPI.py
index 382ebada..116b5f6f 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -9,7 +9,7 @@ from ...requests import StreamSession, raise_for_status
from ...errors import MissingAuthError, ResponseError
from ...image import to_data_uri
-class Openai(AsyncGeneratorProvider, ProviderModelMixin):
+class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI API"
url = "https://platform.openai.com"
working = True
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index f02121e3..3a0d6b29 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -55,6 +55,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
+ needs_auth = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
index 3ee65b30..85d7cc98 100644
--- a/g4f/Provider/needs_auth/PerplexityApi.py
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -1,9 +1,9 @@
from __future__ import annotations
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages
-class PerplexityApi(Openai):
+class PerplexityApi(OpenaiAPI):
label = "Perplexity API"
url = "https://www.perplexity.ai"
working = True
diff --git a/g4f/Provider/Replicate.py b/g4f/Provider/needs_auth/Replicate.py
index 7ff8ad65..ec993aa4 100644
--- a/g4f/Provider/Replicate.py
+++ b/g4f/Provider/needs_auth/Replicate.py
@@ -1,11 +1,11 @@
from __future__ import annotations
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt, filter_none
-from ..typing import AsyncResult, Messages
-from ..requests import raise_for_status
-from ..requests.aiohttp import StreamSession
-from ..errors import ResponseError, MissingAuthError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, filter_none
+from ...typing import AsyncResult, Messages
+from ...requests import raise_for_status
+from ...requests.aiohttp import StreamSession
+from ...errors import ResponseError, MissingAuthError
class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
@@ -85,4 +85,4 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
if new_text:
yield new_text
else:
- yield "\n" \ No newline at end of file
+ yield "\n"
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 22fc62ed..2006f7ad 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ...typing import CreateResult, Messages
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
models = {
"theb-ai": "TheB.AI",
@@ -27,7 +27,7 @@ models = {
"qwen-7b-chat": "Qwen 7B"
}
-class ThebApi(Openai):
+class ThebApi(OpenaiAPI):
label = "TheB.AI API"
url = "https://theb.ai"
working = True
@@ -58,4 +58,4 @@ class ThebApi(Openai):
"top_p": top_p,
}
}
- return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs) \ No newline at end of file
+ return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
diff --git a/g4f/Provider/WhiteRabbitNeo.py b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
index 339434e6..82275c1c 100644
--- a/g4f/Provider/WhiteRabbitNeo.py
+++ b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
@@ -2,10 +2,10 @@ from __future__ import annotations
from aiohttp import ClientSession, BaseConnector
-from ..typing import AsyncResult, Messages, Cookies
-from ..requests.raise_for_status import raise_for_status
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_cookies, get_connector, get_random_string
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_cookies, get_connector, get_random_string
class WhiteRabbitNeo(AsyncGeneratorProvider):
url = "https://www.whiterabbitneo.com"
@@ -54,4 +54,4 @@ class WhiteRabbitNeo(AsyncGeneratorProvider):
await raise_for_status(response)
async for chunk in response.content.iter_any():
if chunk:
- yield chunk.decode(errors="ignore") \ No newline at end of file
+ yield chunk.decode(errors="ignore")
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 0492645d..26c50c0a 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -1,11 +1,22 @@
-from .Gemini import Gemini
-from .Raycast import Raycast
-from .Theb import Theb
-from .ThebApi import ThebApi
-from .OpenaiChat import OpenaiChat
-from .Poe import Poe
-from .Openai import Openai
-from .Groq import Groq
-from .OpenRouter import OpenRouter
-#from .OpenaiAccount import OpenaiAccount
-from .PerplexityApi import PerplexityApi
+from .gigachat import *
+
+#from .MetaAIAccount import MetaAIAccount
+#from .OpenaiAccount import OpenaiAccount
+
+from .BingCreateImages import BingCreateImages
+from .DeepInfra import DeepInfra
+from .DeepInfraImage import DeepInfraImage
+from .Gemini import Gemini
+from .GeminiPro import GeminiPro
+from .Groq import Groq
+from .HuggingFace import HuggingFace
+from .MetaAI import MetaAI
+from .OpenaiAPI import OpenaiAPI
+from .OpenaiChat import OpenaiChat
+from .PerplexityApi import PerplexityApi
+from .Poe import Poe
+from .Raycast import Raycast
+from .Replicate import Replicate
+from .Theb import Theb
+from .ThebApi import ThebApi
+from .WhiteRabbitNeo import WhiteRabbitNeo
diff --git a/g4f/Provider/gigachat/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py
index b1b293e3..c9f1c011 100644
--- a/g4f/Provider/gigachat/GigaChat.py
+++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py
@@ -9,10 +9,10 @@ import json
from aiohttp import ClientSession, TCPConnector, BaseConnector
from g4f.requests import raise_for_status
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ...errors import MissingAuthError
-from ..helper import get_connector
+from ....typing import AsyncResult, Messages
+from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ....errors import MissingAuthError
+from ...helper import get_connector
access_token = ""
token_expires_at = 0
diff --git a/g4f/Provider/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py
index c9853742..c9853742 100644
--- a/g4f/Provider/gigachat/__init__.py
+++ b/g4f/Provider/needs_auth/gigachat/__init__.py
diff --git a/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
index 4c143a21..4c143a21 100644
--- a/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt
+++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
diff --git a/g4f/Provider/nexra/NexraBing.py b/g4f/Provider/nexra/NexraBing.py
deleted file mode 100644
index 28f0b117..00000000
--- a/g4f/Provider/nexra/NexraBing.py
+++ /dev/null
@@ -1,93 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ..helper import format_prompt
-
-class NexraBing(AbstractProvider, ProviderModelMixin):
- label = "Nexra Bing"
- url = "https://nexra.aryahcr.cc/documentation/bing/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = True
- supports_stream = True
-
- default_model = 'Balanced'
- models = [default_model, 'Creative', 'Precise']
-
- model_aliases = {
- "gpt-4": "Balanced",
- "gpt-4": "Creative",
- "gpt-4": "Precise",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool = False,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "messages": [
- {
- "role": "user",
- "content": format_prompt(messages)
- }
- ],
- "conversation_style": model,
- "markdown": markdown,
- "stream": stream,
- "model": "Bing"
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=True)
-
- return cls.process_response(response)
-
- @classmethod
- def process_response(cls, response):
- if response.status_code != 200:
- yield f"Error: {response.status_code}"
- return
-
- full_message = ""
- for chunk in response.iter_content(chunk_size=None):
- if chunk:
- messages = chunk.decode('utf-8').split('\x1e')
- for message in messages:
- try:
- json_data = json.loads(message)
- if json_data.get('finish', False):
- return
- current_message = json_data.get('message', '')
- if current_message:
- new_content = current_message[len(full_message):]
- if new_content:
- yield new_content
- full_message = current_message
- except json.JSONDecodeError:
- continue
-
- if not full_message:
- yield "No message received"
diff --git a/g4f/Provider/nexra/NexraBlackbox.py b/g4f/Provider/nexra/NexraBlackbox.py
deleted file mode 100644
index be048fdd..00000000
--- a/g4f/Provider/nexra/NexraBlackbox.py
+++ /dev/null
@@ -1,100 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ..helper import format_prompt
-
-class NexraBlackbox(AbstractProvider, ProviderModelMixin):
- label = "Nexra Blackbox"
- url = "https://nexra.aryahcr.cc/documentation/blackbox/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = True
- supports_stream = True
-
- default_model = "blackbox"
- models = [default_model]
- model_aliases = {"blackboxai": "blackbox",}
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- markdown: bool = False,
- websearch: bool = False,
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "messages": [
- {
- "role": "user",
- "content": format_prompt(messages)
- }
- ],
- "websearch": websearch,
- "stream": stream,
- "markdown": markdown,
- "model": model
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
-
- if stream:
- return cls.process_streaming_response(response)
- else:
- return cls.process_non_streaming_response(response)
-
- @classmethod
- def process_non_streaming_response(cls, response):
- if response.status_code == 200:
- try:
- full_response = ""
- for line in response.iter_lines(decode_unicode=True):
- if line:
- data = json.loads(line)
- if data.get('finish'):
- break
- message = data.get('message', '')
- if message:
- full_response = message
- return full_response
- except json.JSONDecodeError:
- return "Error: Unable to decode JSON response"
- else:
- return f"Error: {response.status_code}"
-
- @classmethod
- def process_streaming_response(cls, response):
- previous_message = ""
- for line in response.iter_lines(decode_unicode=True):
- if line:
- try:
- data = json.loads(line)
- if data.get('finish'):
- break
- message = data.get('message', '')
- if message and message != previous_message:
- yield message[len(previous_message):]
- previous_message = message
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py
deleted file mode 100644
index 074a0363..00000000
--- a/g4f/Provider/nexra/NexraChatGPT.py
+++ /dev/null
@@ -1,285 +0,0 @@
-from __future__ import annotations
-
-import asyncio
-import json
-import requests
-from typing import Any, Dict
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-
-
-class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra ChatGPT"
- url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
- api_endpoint_nexra_chatgpt = "https://nexra.aryahcr.cc/api/chat/gpt"
- api_endpoint_nexra_chatgpt4o = "https://nexra.aryahcr.cc/api/chat/complements"
- api_endpoint_nexra_chatgpt_v2 = "https://nexra.aryahcr.cc/api/chat/complements"
- api_endpoint_nexra_gptweb = "https://nexra.aryahcr.cc/api/chat/gptweb"
- working = True
- supports_system_message = True
- supports_message_history = True
- supports_stream = True
-
- default_model = 'gpt-3.5-turbo'
- nexra_chatgpt = [
- 'gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314',
- default_model, 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
- 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002'
- ]
- nexra_chatgpt4o = ['gpt-4o']
- nexra_chatgptv2 = ['chatgpt']
- nexra_gptweb = ['gptweb']
- models = nexra_chatgpt + nexra_chatgpt4o + nexra_chatgptv2 + nexra_gptweb
-
- model_aliases = {
- "gpt-4": "gpt-4-0613",
- "gpt-4-32k": "gpt-4-32k-0314",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613": "gpt-3.5-turbo-16k-0613",
- "gpt-3": "text-davinci-003",
- "text-davinci-002": "code-davinci-002",
- "text-curie-001": "text-babbage-001",
- "text-ada-001": "davinci",
- "curie": "babbage",
- "ada": "babbage-002",
- "davinci-002": "davinci-002",
- "chatgpt": "chatgpt",
- "gptweb": "gptweb"
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- stream: bool = False,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> AsyncResult:
- if model in cls.nexra_chatgpt:
- async for chunk in cls._create_async_generator_nexra_chatgpt(model, messages, proxy, **kwargs):
- yield chunk
- elif model in cls.nexra_chatgpt4o:
- async for chunk in cls._create_async_generator_nexra_chatgpt4o(model, messages, stream, proxy, markdown, **kwargs):
- yield chunk
- elif model in cls.nexra_chatgptv2:
- async for chunk in cls._create_async_generator_nexra_chatgpt_v2(model, messages, stream, proxy, markdown, **kwargs):
- yield chunk
- elif model in cls.nexra_gptweb:
- async for chunk in cls._create_async_generator_nexra_gptweb(model, messages, proxy, **kwargs):
- yield chunk
-
- @classmethod
- async def _create_async_generator_nexra_chatgpt(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "Content-Type": "application/json"
- }
-
- prompt = format_prompt(messages)
- data = {
- "messages": messages,
- "prompt": prompt,
- "model": model,
- "markdown": markdown
- }
-
- loop = asyncio.get_event_loop()
- try:
- response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt, data, headers, proxy)
- filtered_response = cls._filter_response(response)
-
- for chunk in filtered_response:
- yield chunk
- except Exception as e:
- print(f"Error during API request (nexra_chatgpt): {e}")
-
- @classmethod
- async def _create_async_generator_nexra_chatgpt4o(
- cls,
- model: str,
- messages: Messages,
- stream: bool = False,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "Content-Type": "application/json"
- }
-
- prompt = format_prompt(messages)
- data = {
- "messages": [
- {
- "role": "user",
- "content": prompt
- }
- ],
- "stream": stream,
- "markdown": markdown,
- "model": model
- }
-
- loop = asyncio.get_event_loop()
- try:
- response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt4o, data, headers, proxy, stream)
-
- if stream:
- async for chunk in cls._process_streaming_response(response):
- yield chunk
- else:
- for chunk in cls._process_non_streaming_response(response):
- yield chunk
- except Exception as e:
- print(f"Error during API request (nexra_chatgpt4o): {e}")
-
- @classmethod
- async def _create_async_generator_nexra_chatgpt_v2(
- cls,
- model: str,
- messages: Messages,
- stream: bool = False,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "Content-Type": "application/json"
- }
-
- prompt = format_prompt(messages)
- data = {
- "messages": [
- {
- "role": "user",
- "content": prompt
- }
- ],
- "stream": stream,
- "markdown": markdown,
- "model": model
- }
-
- loop = asyncio.get_event_loop()
- try:
- response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt_v2, data, headers, proxy, stream)
-
- if stream:
- async for chunk in cls._process_streaming_response(response):
- yield chunk
- else:
- for chunk in cls._process_non_streaming_response(response):
- yield chunk
- except Exception as e:
- print(f"Error during API request (nexra_chatgpt_v2): {e}")
-
- @classmethod
- async def _create_async_generator_nexra_gptweb(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "Content-Type": "application/json"
- }
-
- prompt = format_prompt(messages)
- data = {
- "prompt": prompt,
- "markdown": markdown,
- }
-
- loop = asyncio.get_event_loop()
- try:
- response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_gptweb, data, headers, proxy)
-
- for chunk in response.iter_content(1024):
- if chunk:
- decoded_chunk = chunk.decode().lstrip('_')
- try:
- response_json = json.loads(decoded_chunk)
- if response_json.get("status"):
- yield response_json.get("gpt", "")
- except json.JSONDecodeError:
- continue
- except Exception as e:
- print(f"Error during API request (nexra_gptweb): {e}")
-
- @staticmethod
- def _sync_post_request(url: str, data: Dict[str, Any], headers: Dict[str, str], proxy: str = None, stream: bool = False) -> requests.Response:
- proxies = {
- "http": proxy,
- "https": proxy,
- } if proxy else None
-
- try:
- response = requests.post(url, json=data, headers=headers, proxies=proxies, stream=stream)
- response.raise_for_status()
- return response
- except requests.RequestException as e:
- print(f"Request failed: {e}")
- raise
-
- @staticmethod
- def _process_non_streaming_response(response: requests.Response) -> str:
- if response.status_code == 200:
- try:
- content = response.text.lstrip('')
- data = json.loads(content)
- return data.get('message', '')
- except json.JSONDecodeError:
- return "Error: Unable to decode JSON response"
- else:
- return f"Error: {response.status_code}"
-
- @staticmethod
- async def _process_streaming_response(response: requests.Response):
- full_message = ""
- for line in response.iter_lines(decode_unicode=True):
- if line:
- try:
- line = line.lstrip('')
- data = json.loads(line)
- if data.get('finish'):
- break
- message = data.get('message', '')
- if message:
- yield message[len(full_message):]
- full_message = message
- except json.JSONDecodeError:
- pass
-
- @staticmethod
- def _filter_response(response: requests.Response) -> str:
- response_json = response.json()
- return response_json.get("gpt", "")
diff --git a/g4f/Provider/nexra/NexraDallE.py b/g4f/Provider/nexra/NexraDallE.py
deleted file mode 100644
index f605c6d0..00000000
--- a/g4f/Provider/nexra/NexraDallE.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraDallE(AbstractProvider, ProviderModelMixin):
- label = "Nexra DALL-E"
- url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = "dalle"
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": model,
- "response": response
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_')
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraDallE2.py b/g4f/Provider/nexra/NexraDallE2.py
deleted file mode 100644
index 2a36b6e6..00000000
--- a/g4f/Provider/nexra/NexraDallE2.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraDallE2(AbstractProvider, ProviderModelMixin):
- label = "Nexra DALL-E 2"
- url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = "dalle2"
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": model,
- "response": response
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_')
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraEmi.py b/g4f/Provider/nexra/NexraEmi.py
deleted file mode 100644
index c26becec..00000000
--- a/g4f/Provider/nexra/NexraEmi.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraEmi(AbstractProvider, ProviderModelMixin):
- label = "Nexra Emi"
- url = "https://nexra.aryahcr.cc/documentation/emi/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = "emi"
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": model,
- "response": response
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_')
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraFluxPro.py b/g4f/Provider/nexra/NexraFluxPro.py
deleted file mode 100644
index cfb26385..00000000
--- a/g4f/Provider/nexra/NexraFluxPro.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraFluxPro(AbstractProvider, ProviderModelMixin):
- url = "https://nexra.aryahcr.cc/documentation/flux-pro/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = 'flux'
- models = [default_model]
- model_aliases = {
- "flux-pro": "flux",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": model,
- "response": response
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_')
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraGeminiPro.py b/g4f/Provider/nexra/NexraGeminiPro.py
deleted file mode 100644
index e4e6a8ec..00000000
--- a/g4f/Provider/nexra/NexraGeminiPro.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ..helper import format_prompt
-
-class NexraGeminiPro(AbstractProvider, ProviderModelMixin):
- label = "Nexra Gemini PRO"
- url = "https://nexra.aryahcr.cc/documentation/gemini-pro/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = True
- supports_stream = True
-
- default_model = 'gemini-pro'
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "messages": [
- {
- "role": "user",
- "content": format_prompt(messages)
- }
- ],
- "stream": stream,
- "markdown": markdown,
- "model": model
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
-
- if stream:
- return cls.process_streaming_response(response)
- else:
- return cls.process_non_streaming_response(response)
-
- @classmethod
- def process_non_streaming_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.lstrip('')
- data = json.loads(content)
- return data.get('message', '')
- except json.JSONDecodeError:
- return "Error: Unable to decode JSON response"
- else:
- return f"Error: {response.status_code}"
-
- @classmethod
- def process_streaming_response(cls, response):
- full_message = ""
- for line in response.iter_lines(decode_unicode=True):
- if line:
- try:
- line = line.lstrip('')
- data = json.loads(line)
- if data.get('finish'):
- break
- message = data.get('message', '')
- if message:
- yield message[len(full_message):]
- full_message = message
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraMidjourney.py b/g4f/Provider/nexra/NexraMidjourney.py
deleted file mode 100644
index c427f8a0..00000000
--- a/g4f/Provider/nexra/NexraMidjourney.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraMidjourney(AbstractProvider, ProviderModelMixin):
- label = "Nexra Midjourney"
- url = "https://nexra.aryahcr.cc/documentation/midjourney/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = "midjourney"
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": model,
- "response": response
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_')
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraProdiaAI.py b/g4f/Provider/nexra/NexraProdiaAI.py
deleted file mode 100644
index de997fce..00000000
--- a/g4f/Provider/nexra/NexraProdiaAI.py
+++ /dev/null
@@ -1,151 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraProdiaAI(AbstractProvider, ProviderModelMixin):
- label = "Nexra Prodia AI"
- url = "https://nexra.aryahcr.cc/documentation/prodia/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
- models = [
- '3Guofeng3_v34.safetensors [50f420de]',
- 'absolutereality_V16.safetensors [37db0fc3]',
- default_model,
- 'amIReal_V41.safetensors [0a8a2e61]',
- 'analog-diffusion-1.0.ckpt [9ca13f02]',
- 'aniverse_v30.safetensors [579e6f85]',
- 'anythingv3_0-pruned.ckpt [2700c435]',
- 'anything-v4.5-pruned.ckpt [65745d25]',
- 'anythingV5_PrtRE.safetensors [893e49b9]',
- 'AOM3A3_orangemixs.safetensors [9600da17]',
- 'blazing_drive_v10g.safetensors [ca1c1eab]',
- 'breakdomain_I2428.safetensors [43cc7d2f]',
- 'breakdomain_M2150.safetensors [15f7afca]',
- 'cetusMix_Version35.safetensors [de2f2560]',
- 'childrensStories_v13D.safetensors [9dfaabcb]',
- 'childrensStories_v1SemiReal.safetensors [a1c56dbb]',
- 'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
- 'Counterfeit_v30.safetensors [9e2a8f19]',
- 'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]',
- 'cyberrealistic_v33.safetensors [82b0d085]',
- 'dalcefo_v4.safetensors [425952fe]',
- 'deliberate_v2.safetensors [10ec4b29]',
- 'deliberate_v3.safetensors [afd9d2d4]',
- 'dreamlike-anime-1.0.safetensors [4520e090]',
- 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]',
- 'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
- 'dreamshaper_6BakedVae.safetensors [114c8abb]',
- 'dreamshaper_7.safetensors [5cf5ae06]',
- 'dreamshaper_8.safetensors [9d40847d]',
- 'edgeOfRealism_eorV20.safetensors [3ed5de15]',
- 'EimisAnimeDiffusion_V1.ckpt [4f828a15]',
- 'elldreths-vivid-mix.safetensors [342d9d26]',
- 'epicphotogasm_xPlusPlus.safetensors [1a8f6d35]',
- 'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
- 'epicrealism_pureEvolutionV3.safetensors [42c8440c]',
- 'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]',
- 'indigoFurryMix_v75Hybrid.safetensors [91208cbb]',
- 'juggernaut_aftermath.safetensors [5e20c455]',
- 'lofi_v4.safetensors [ccc204d6]',
- 'lyriel_v16.safetensors [68fceea2]',
- 'majicmixRealistic_v4.safetensors [29d0de58]',
- 'mechamix_v10.safetensors [ee685731]',
- 'meinamix_meinaV9.safetensors [2ec66ab0]',
- 'meinamix_meinaV11.safetensors [b56ce717]',
- 'neverendingDream_v122.safetensors [f964ceeb]',
- 'openjourney_V4.ckpt [ca2f377f]',
- 'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]',
- 'portraitplus_V1.0.safetensors [1400e684]',
- 'protogenx34.safetensors [5896f8d5]',
- 'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]',
- 'Realistic_Vision_V2.0.safetensors [79587710]',
- 'Realistic_Vision_V4.0.safetensors [29a7afaa]',
- 'Realistic_Vision_V5.0.safetensors [614d1063]',
- 'Realistic_Vision_V5.1.safetensors [a0f13c83]',
- 'redshift_diffusion-V10.safetensors [1400e684]',
- 'revAnimated_v122.safetensors [3f4fefd9]',
- 'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
- 'rundiffusionFX_v10.safetensors [cd4e694d]',
- 'sdv1_4.ckpt [7460a6fa]',
- 'v1-5-pruned-emaonly.safetensors [d7049739]',
- 'v1-5-inpainting.safetensors [21c7ab71]',
- 'shoninsBeautiful_v10.safetensors [25d8c546]',
- 'theallys-mix-ii-churned.safetensors [5d9225a4]',
- 'timeless-1.0.ckpt [7c4971d4]',
- 'toonyou_beta6.safetensors [980f6b15]',
- ]
-
- model_aliases = {}
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- steps: str = 25, # Min: 1, Max: 30
- cfg_scale: str = 7, # Min: 0, Max: 20
- sampler: str = "DPM++ 2M Karras", # Select from these: "Euler","Euler a","Heun","DPM++ 2M Karras","DPM++ SDE Karras","DDIM"
- negative_prompt: str = "", # Indicates what the AI should not do
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": "prodia",
- "response": response,
- "data": {
- "model": model,
- "steps": steps,
- "cfg_scale": cfg_scale,
- "sampler": sampler,
- "negative_prompt": negative_prompt
- }
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_') # Remove leading underscores
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraQwen.py b/g4f/Provider/nexra/NexraQwen.py
deleted file mode 100644
index 7f944e44..00000000
--- a/g4f/Provider/nexra/NexraQwen.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ..helper import format_prompt
-
-class NexraQwen(AbstractProvider, ProviderModelMixin):
- label = "Nexra Qwen"
- url = "https://nexra.aryahcr.cc/documentation/qwen/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = True
- supports_stream = True
-
- default_model = 'qwen'
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "messages": [
- {
- "role": "user",
- "content": format_prompt(messages)
- }
- ],
- "stream": stream,
- "markdown": markdown,
- "model": model
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
-
- if stream:
- return cls.process_streaming_response(response)
- else:
- return cls.process_non_streaming_response(response)
-
- @classmethod
- def process_non_streaming_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.lstrip('')
- data = json.loads(content)
- return data.get('message', '')
- except json.JSONDecodeError:
- return "Error: Unable to decode JSON response"
- else:
- return f"Error: {response.status_code}"
-
- @classmethod
- def process_streaming_response(cls, response):
- full_message = ""
- for line in response.iter_lines(decode_unicode=True):
- if line:
- try:
- line = line.lstrip('')
- data = json.loads(line)
- if data.get('finish'):
- break
- message = data.get('message', '')
- if message is not None and message != full_message:
- yield message[len(full_message):]
- full_message = message
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraSD15.py b/g4f/Provider/nexra/NexraSD15.py
deleted file mode 100644
index 860a132f..00000000
--- a/g4f/Provider/nexra/NexraSD15.py
+++ /dev/null
@@ -1,72 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraSD15(AbstractProvider, ProviderModelMixin):
- label = "Nexra Stable Diffusion 1.5"
- url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = 'stablediffusion-1.5'
- models = [default_model]
-
- model_aliases = {
- "sd-1.5": "stablediffusion-1.5",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": model,
- "response": response
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_')
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraSDLora.py b/g4f/Provider/nexra/NexraSDLora.py
deleted file mode 100644
index a12bff1a..00000000
--- a/g4f/Provider/nexra/NexraSDLora.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraSDLora(AbstractProvider, ProviderModelMixin):
- label = "Nexra Stable Diffusion Lora"
- url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = "sdxl-lora"
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- guidance: str = 0.3, # Min: 0, Max: 5
- steps: str = 2, # Min: 2, Max: 10
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": model,
- "response": response,
- "data": {
- "guidance": guidance,
- "steps": steps
- }
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_')
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/NexraSDTurbo.py b/g4f/Provider/nexra/NexraSDTurbo.py
deleted file mode 100644
index 865b4522..00000000
--- a/g4f/Provider/nexra/NexraSDTurbo.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ...image import ImageResponse
-
-class NexraSDTurbo(AbstractProvider, ProviderModelMixin):
- label = "Nexra Stable Diffusion Turbo"
- url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = "sdxl-turbo"
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- strength: str = 0.7, # Min: 0, Max: 1
- steps: str = 2, # Min: 1, Max: 10
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": messages[-1]["content"],
- "model": model,
- "response": response,
- "data": {
- "strength": strength,
- "steps": steps
- }
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
-
- result = cls.process_response(response)
- yield result
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.strip()
- content = content.lstrip('_') # Remove the leading underscore
- data = json.loads(content)
- if data.get('status') and data.get('images'):
- image_url = data['images'][0]
- return ImageResponse(images=[image_url], alt="Generated Image")
- else:
- return "Error: No image URL found in the response"
- except json.JSONDecodeError as e:
- return f"Error: Unable to decode JSON response. Details: {str(e)}"
- else:
- return f"Error: {response.status_code}, Response: {response.text}"
diff --git a/g4f/Provider/nexra/__init__.py b/g4f/Provider/nexra/__init__.py
deleted file mode 100644
index bebc1fb6..00000000
--- a/g4f/Provider/nexra/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from .NexraBing import NexraBing
-from .NexraBlackbox import NexraBlackbox
-from .NexraChatGPT import NexraChatGPT
-from .NexraDallE import NexraDallE
-from .NexraDallE2 import NexraDallE2
-from .NexraEmi import NexraEmi
-from .NexraFluxPro import NexraFluxPro
-from .NexraGeminiPro import NexraGeminiPro
-from .NexraMidjourney import NexraMidjourney
-from .NexraProdiaAI import NexraProdiaAI
-from .NexraQwen import NexraQwen
-from .NexraSD15 import NexraSD15
-from .NexraSDLora import NexraSDLora
-from .NexraSDTurbo import NexraSDTurbo
diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/not_working/AI365VIP.py
index 511ad568..a4bac0e2 100644
--- a/g4f/Provider/AI365VIP.py
+++ b/g4f/Provider/not_working/AI365VIP.py
@@ -2,9 +2,9 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
diff --git a/g4f/Provider/AIChatFree.py b/g4f/Provider/not_working/AIChatFree.py
index 71c04681..a4f80d47 100644
--- a/g4f/Provider/AIChatFree.py
+++ b/g4f/Provider/not_working/AIChatFree.py
@@ -5,16 +5,16 @@ from hashlib import sha256
from aiohttp import BaseConnector, ClientSession
-from ..errors import RateLimitError
-from ..requests import raise_for_status
-from ..requests.aiohttp import get_connector
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...errors import RateLimitError
+from ...requests import raise_for_status
+from ...requests.aiohttp import get_connector
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatfree.info/"
- working = True
+ working = False
supports_stream = True
supports_message_history = True
default_model = 'gemini-pro'
diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py
index 26aacef6..ccfc691e 100644
--- a/g4f/Provider/AiChatOnline.py
+++ b/g4f/Provider/not_working/AiChatOnline.py
@@ -3,15 +3,15 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_random_string, format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, format_prompt
class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
site_url = "https://aichatonline.org"
url = "https://aichatonlineorg.erweima.ai"
api_endpoint = "/aichatonline/api/chat/gpt"
- working = True
+ working = False
default_model = 'gpt-4o-mini'
@classmethod
diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/not_working/AiChats.py
index 08492e24..51a85c91 100644
--- a/g4f/Provider/AiChats.py
+++ b/g4f/Provider/not_working/AiChats.py
@@ -3,15 +3,15 @@ from __future__ import annotations
import json
import base64
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+from ..helper import format_prompt
class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-chats.org"
api_endpoint = "https://ai-chats.org/chat/send2/"
- working = True
+ working = False
supports_message_history = True
default_model = 'gpt-4'
models = ['gpt-4', 'dalle']
diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/not_working/AmigoChat.py
index f5027111..274a5e14 100644
--- a/g4f/Provider/AmigoChat.py
+++ b/g4f/Provider/not_working/AmigoChat.py
@@ -4,16 +4,16 @@ import json
import uuid
from aiohttp import ClientSession, ClientTimeout, ClientResponseError
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-from ..image import ImageResponse
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+from ...image import ImageResponse
class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://amigochat.io/chat/"
chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
- working = True
+ working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/not_working/Aura.py
index e2c56754..e841d909 100644
--- a/g4f/Provider/Aura.py
+++ b/g4f/Provider/not_working/Aura.py
@@ -2,10 +2,10 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from ..requests import get_args_from_browser
-from ..webdriver import WebDriver
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ...requests import get_args_from_browser
+from ...webdriver import WebDriver
class Aura(AsyncGeneratorProvider):
url = "https://openchat.team"
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/not_working/Chatgpt4Online.py
index 627facf6..b0552e45 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/not_working/Chatgpt4Online.py
@@ -3,15 +3,15 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
- working = True
+ working = False
default_model = 'gpt-4'
models = [default_model]
diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py
index 7730fc84..ba264d40 100644
--- a/g4f/Provider/Chatgpt4o.py
+++ b/g4f/Provider/not_working/Chatgpt4o.py
@@ -1,15 +1,15 @@
from __future__ import annotations
import re
-from ..requests import StreamSession, raise_for_status
-from ..typing import Messages
-from .base_provider import AsyncProvider, ProviderModelMixin
-from .helper import format_prompt
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages
+from ..base_provider import AsyncProvider, ProviderModelMixin
+from ..helper import format_prompt
class Chatgpt4o(AsyncProvider, ProviderModelMixin):
url = "https://chatgpt4o.one"
- working = True
+ working = False
_post_id = None
_nonce = None
default_model = 'gpt-4o-mini-2024-07-18'
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py
index d2837594..6b3877b1 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/not_working/ChatgptFree.py
@@ -3,17 +3,18 @@ from __future__ import annotations
import re
import json
import asyncio
-from ..requests import StreamSession, raise_for_status
-from ..typing import Messages, AsyncGenerator
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages, AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgptfree.ai"
- working = True
+ working = False
_post_id = None
_nonce = None
default_model = 'gpt-4o-mini-2024-07-18'
+ models = [default_model]
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
}
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py
index 1a45997b..b7d8537a 100644
--- a/g4f/Provider/FlowGpt.py
+++ b/g4f/Provider/not_working/FlowGpt.py
@@ -5,10 +5,10 @@ import time
import hashlib
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_random_hex, get_random_string
-from ..requests.raise_for_status import raise_for_status
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_hex, get_random_string
+from ...requests.raise_for_status import raise_for_status
class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flowgpt.com/chat"
diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py
index ada5d51a..8362019c 100644
--- a/g4f/Provider/FreeNetfly.py
+++ b/g4f/Provider/not_working/FreeNetfly.py
@@ -5,14 +5,14 @@ import asyncio
from aiohttp import ClientSession, ClientTimeout, ClientError
from typing import AsyncGenerator
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.netfly.top"
api_endpoint = "/api/openai/v1/chat/completions"
- working = True
+ working = False
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/not_working/GPROChat.py
index a33c9571..52c7f947 100644
--- a/g4f/Provider/GPROChat.py
+++ b/g4f/Provider/not_working/GPROChat.py
@@ -2,15 +2,15 @@ from __future__ import annotations
import hashlib
import time
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "GPROChat"
url = "https://gprochat.com"
api_endpoint = "https://gprochat.com/api/generate"
- working = True
+ working = False
supports_stream = True
supports_message_history = True
default_model = 'gemini-pro'
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/not_working/Koala.py
index 0dd76b71..d6230da7 100644
--- a/g4f/Provider/Koala.py
+++ b/g4f/Provider/not_working/Koala.py
@@ -4,15 +4,15 @@ import json
from typing import AsyncGenerator, Optional, List, Dict, Union, Any
from aiohttp import ClientSession, BaseConnector, ClientResponse
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import get_random_string, get_connector
-from ..requests import raise_for_status
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, get_connector
+from ...requests import raise_for_status
class Koala(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://koala.sh/chat"
api_endpoint = "https://koala.sh/api/gpt/"
- working = True
+ working = False
supports_message_history = True
default_model = 'gpt-4o-mini'
diff --git a/g4f/Provider/selenium/MyShell.py b/g4f/Provider/not_working/MyShell.py
index 02e182d4..02e182d4 100644
--- a/g4f/Provider/selenium/MyShell.py
+++ b/g4f/Provider/not_working/MyShell.py
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
new file mode 100644
index 00000000..a6edf5f8
--- /dev/null
+++ b/g4f/Provider/not_working/__init__.py
@@ -0,0 +1,14 @@
+from .AI365VIP import AI365VIP
+from .AIChatFree import AIChatFree
+from .AiChatOnline import AiChatOnline
+from .AiChats import AiChats
+from .AmigoChat import AmigoChat
+from .Aura import Aura
+from .Chatgpt4o import Chatgpt4o
+from .ChatgptFree import ChatgptFree
+from .FlowGpt import FlowGpt
+from .FreeNetfly import FreeNetfly
+from .GPROChat import GPROChat
+from .Koala import Koala
+from .MyShell import MyShell
+from .Chatgpt4Online import Chatgpt4Online
diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py
index 3a59ea58..44adf5fb 100644
--- a/g4f/Provider/selenium/__init__.py
+++ b/g4f/Provider/selenium/__init__.py
@@ -1,4 +1,3 @@
-from .MyShell import MyShell
from .PerplexityAi import PerplexityAi
from .Phind import Phind
from .TalkAi import TalkAi
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 3e29c5f8..e7f87260 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -195,9 +195,13 @@ class Api:
return JSONResponse(response_list[0].to_json())
# Streaming response
+ async def async_generator(sync_gen):
+ for item in sync_gen:
+ yield item
+
async def streaming():
try:
- async for chunk in response:
+ async for chunk in async_generator(response):
yield f"data: {json.dumps(chunk.to_json())}\n\n"
except GeneratorExit:
pass
@@ -221,7 +225,7 @@ class Api:
response_format=config.response_format
)
# Convert Image objects to dictionaries
- response_data = [image.to_dict() for image in response.data]
+ response_data = [{"url": image.url, "b64_json": image.b64_json} for image in response.data]
return JSONResponse({"data": response_data})
except Exception as e:
logger.exception(e)
diff --git a/g4f/client/client.py b/g4f/client/client.py
index 8e195213..73d8fea3 100644
--- a/g4f/client/client.py
+++ b/g4f/client/client.py
@@ -144,24 +144,32 @@ class Client(BaseClient):
class AsyncClient(Client):
"""Legacy AsyncClient that redirects to the main Client class.
This class exists for backwards compatibility."""
-
+
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
- "AsyncClient is deprecated and will be removed in a future version. "
+ "AsyncClient is deprecated and will be removed in future versions."
"Use Client instead, which now supports both sync and async operations.",
DeprecationWarning,
stacklevel=2
)
super().__init__(*args, **kwargs)
- async def chat_complete(self, *args, **kwargs):
- """Legacy method that redirects to async_create"""
- return await self.chat.completions.async_create(*args, **kwargs)
+ async def async_create(self, *args, **kwargs):
+ """Asynchronous create method that calls the synchronous method."""
+ return await super().async_create(*args, **kwargs)
+
+ async def async_generate(self, *args, **kwargs):
+ """Asynchronous image generation method."""
+ return await super().async_generate(*args, **kwargs)
- async def create_image(self, *args, **kwargs):
- """Legacy method that redirects to async_generate"""
- return await self.images.async_generate(*args, **kwargs)
+ async def async_images(self) -> Images:
+ """Asynchronous access to images."""
+ return await super().async_images()
+
+ async def async_fetch_image(self, url: str) -> bytes:
+ """Asynchronous fetching of an image by URL."""
+ return await self._fetch_image(url)
class Completions:
def __init__(self, client: Client, provider: ProviderType = None):
@@ -531,4 +539,3 @@ class Images:
async def create_variation(self, image: Union[str, bytes], model: str = None, response_format: str = "url", **kwargs):
# Existing implementation, adjust if you want to support b64_json here as well
pass
-
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 7e8ef09c..8cbe526c 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -72,7 +72,7 @@
</button>
<div class="info">
<i class="fa-brands fa-discord"></i>
- <span class="convo-title">discord ~ <a href="https://discord.gg/XfybzPXPH5">discord.gg/XfybzPXPH5</a>
+ <span class="convo-title">discord ~ <a href="https://discord.gg/5E39JUWUFa">discord.gg/5E39JUWUFa</a>
</span>
</div>
<div class="info">
@@ -237,7 +237,7 @@
<option value="gemini-flash">gemini-flash</option>
<option value="claude-3-haiku">claude-3-haiku</option>
<option value="claude-3.5-sonnet">claude-3.5-sonnet</option>
- <option value="">----</option>
+ <option disabled="disabled">----</option>
</select>
<select name="model2" id="model2" class="hidden"></select>
</div>
@@ -245,14 +245,14 @@
<select name="provider" id="provider">
<option value="">Provider: Auto</option>
<option value="OpenaiChat">OpenAI ChatGPT</option>
+ <option value="ChatGpt">ChatGpt</option>
<option value="Gemini">Gemini</option>
<option value="MetaAI">Meta AI</option>
<option value="DeepInfraChat">DeepInfraChat</option>
<option value="Blackbox">Blackbox</option>
- <option value="HuggingChat">HuggingChat</option>
- <option value="DDG">DDG</option>
+ <option value="DDG">DuckDuckGo</option>
<option value="Pizzagpt">Pizzagpt</option>
- <option value="">----</option>
+ <option disabled="disabled">----</option>
</select>
</div>
</div>
diff --git a/g4f/models.py b/g4f/models.py
index 32a12d10..6d3ef2ad 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -6,58 +6,36 @@ from .Provider import IterListProvider, ProviderType
from .Provider import (
Ai4Chat,
AIChatFree,
- AiMathGPT,
Airforce,
+ AIUncensored,
Allyfy,
- AmigoChat,
Bing,
Blackbox,
ChatGpt,
Chatgpt4Online,
ChatGptEs,
- ChatgptFree,
- ChatHub,
- ChatifyAI,
Cloudflare,
DarkAI,
DDG,
- DeepInfra,
DeepInfraChat,
- Editee,
Free2GPT,
- FreeChatgpt,
FreeGpt,
FreeNetfly,
Gemini,
GeminiPro,
GizAI,
GigaChat,
- GPROChat,
HuggingChat,
HuggingFace,
- Koala,
Liaobots,
MagickPen,
+ Mhystical,
MetaAI,
- NexraBing,
- NexraBlackbox,
- NexraChatGPT,
- NexraDallE,
- NexraDallE2,
- NexraEmi,
- NexraFluxPro,
- NexraGeminiPro,
- NexraMidjourney,
- NexraQwen,
- NexraSD15,
- NexraSDLora,
- NexraSDTurbo,
OpenaiChat,
PerplexityLabs,
Pi,
Pizzagpt,
Reka,
- Replicate,
ReplicateHome,
RubiksAI,
TeachAnything,
@@ -91,8 +69,6 @@ default = Model(
base_provider = "",
best_provider = IterListProvider([
DDG,
- FreeChatgpt,
- HuggingChat,
Pizzagpt,
ReplicateHome,
Upstage,
@@ -101,14 +77,11 @@ default = Model(
MagickPen,
DeepInfraChat,
Airforce,
- ChatHub,
ChatGptEs,
- ChatHub,
- AmigoChat,
- ChatifyAI,
Cloudflare,
- Editee,
- AiMathGPT,
+ AIUncensored,
+ DarkAI,
+ Mhystical,
])
)
@@ -119,56 +92,49 @@ default = Model(
############
### OpenAI ###
-# gpt-3
-gpt_3 = Model(
- name = 'gpt-3',
- base_provider = 'OpenAI',
- best_provider = NexraChatGPT
-)
-
# gpt-3.5
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'OpenAI',
- best_provider = IterListProvider([Allyfy, NexraChatGPT, Airforce, DarkAI, Liaobots])
+ best_provider = IterListProvider([Airforce])
)
# gpt-4
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
- best_provider = IterListProvider([NexraChatGPT, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, GizAI, Airforce, Liaobots, OpenaiChat])
+ best_provider = IterListProvider([Blackbox, ChatGptEs, DarkAI, ChatGpt, Airforce, Liaobots, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, RubiksAI, Liaobots, Airforce, GizAI, ChatgptFree, Koala, OpenaiChat, ChatGpt])
+ best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, ChatGpt, Airforce, RubiksAI, MagickPen, Liaobots, OpenaiChat])
)
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'OpenAI',
- best_provider = IterListProvider([Liaobots, Airforce, Bing])
+ best_provider = IterListProvider([ChatGpt, Airforce, Liaobots, Bing])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
- best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraBing, NexraChatGPT, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
+ best_provider = IterListProvider([Mhystical, Chatgpt4Online, ChatGpt, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
)
# o1
o1 = Model(
name = 'o1',
base_provider = 'OpenAI',
- best_provider = AmigoChat
+ best_provider = None
)
o1_mini = Model(
name = 'o1-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([AmigoChat, GizAI])
+ best_provider = None
)
@@ -191,104 +157,58 @@ meta = Model(
llama_2_7b = Model(
name = "llama-2-7b",
base_provider = "Meta Llama",
- best_provider = Cloudflare
-)
-
-llama_2_13b = Model(
- name = "llama-2-13b",
- base_provider = "Meta Llama",
- best_provider = Airforce
+ best_provider = IterListProvider([Cloudflare, Airforce])
)
-
# llama 3
llama_3_8b = Model(
name = "llama-3-8b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Cloudflare, Airforce, DeepInfra, Replicate])
-)
-
-llama_3_70b = Model(
- name = "llama-3-70b",
- base_provider = "Meta Llama",
- best_provider = IterListProvider([ReplicateHome, Airforce, DeepInfra, Replicate])
+ best_provider = IterListProvider([Cloudflare])
)
# llama 3.1
llama_3_1_8b = Model(
name = "llama-3.1-8b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, Airforce, GizAI, PerplexityLabs])
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, Cloudflare, Airforce, PerplexityLabs])
)
llama_3_1_70b = Model(
name = "llama-3.1-70b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, RubiksAI, GizAI, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, TeachAnything, DarkAI, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs])
)
llama_3_1_405b = Model(
name = "llama-3.1-405b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([DeepInfraChat, Blackbox, AmigoChat, DarkAI, Airforce])
+ best_provider = IterListProvider([Blackbox, DarkAI])
)
# llama 3.2
llama_3_2_1b = Model(
name = "llama-3.2-1b",
base_provider = "Meta Llama",
- best_provider = Cloudflare
-)
-
-llama_3_2_3b = Model(
- name = "llama-3.2-3b",
- base_provider = "Meta Llama",
- best_provider = Cloudflare
+ best_provider = IterListProvider([Cloudflare])
)
llama_3_2_11b = Model(
name = "llama-3.2-11b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Cloudflare, HuggingChat, HuggingFace])
-)
-
-llama_3_2_90b = Model(
- name = "llama-3.2-90b",
- base_provider = "Meta Llama",
- best_provider = IterListProvider([AmigoChat, Airforce])
-)
-
-
-# llamaguard
-llamaguard_7b = Model(
- name = "llamaguard-7b",
- base_provider = "Meta Llama",
- best_provider = Airforce
-)
-
-llamaguard_2_8b = Model(
- name = "llamaguard-2-8b",
- base_provider = "Meta Llama",
- best_provider = Airforce
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
)
-
### Mistral ###
mistral_7b = Model(
name = "mistral-7b",
base_provider = "Mistral",
- best_provider = IterListProvider([DeepInfraChat, Cloudflare, Airforce, DeepInfra])
+ best_provider = IterListProvider([Free2GPT])
)
mixtral_8x7b = Model(
name = "mixtral-8x7b",
base_provider = "Mistral",
- best_provider = IterListProvider([DDG, ReplicateHome, DeepInfraChat, ChatHub, Airforce, DeepInfra])
-)
-
-mixtral_8x22b = Model(
- name = "mixtral-8x22b",
- base_provider = "Mistral",
- best_provider = IterListProvider([DeepInfraChat, Airforce])
+ best_provider = DDG
)
mistral_nemo = Model(
@@ -297,22 +217,16 @@ mistral_nemo = Model(
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
-mistral_large = Model(
- name = "mistral-large",
- base_provider = "Mistral",
- best_provider = IterListProvider([Editee, GizAI])
-)
-
### NousResearch ###
-mixtral_8x7b_dpo = Model(
- name = "mixtral-8x7b-dpo",
+hermes_2_pro = Model(
+ name = "hermes-2-pro",
base_provider = "NousResearch",
best_provider = Airforce
)
-yi_34b = Model(
- name = "yi-34b",
+hermes_2_dpo = Model(
+ name = "hermes-2-dpo",
base_provider = "NousResearch",
best_provider = Airforce
)
@@ -328,13 +242,7 @@ hermes_3 = Model(
phi_2 = Model(
name = "phi-2",
base_provider = "Microsoft",
- best_provider = Cloudflare
-)
-
-phi_3_medium_4k = Model(
- name = "phi-3-medium-4k",
- base_provider = "Microsoft",
- best_provider = DeepInfraChat
+ best_provider = IterListProvider([Airforce])
)
phi_3_5_mini = Model(
@@ -348,13 +256,13 @@ phi_3_5_mini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, NexraGeminiPro, AmigoChat, Editee, GizAI, Airforce, Liaobots])
+ best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, FreeGpt, Liaobots])
)
gemini_flash = Model(
name = 'gemini-flash',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([Blackbox, GizAI, Airforce, Liaobots])
+ best_provider = IterListProvider([Blackbox, GizAI, Liaobots])
)
gemini = Model(
@@ -364,41 +272,10 @@ gemini = Model(
)
# gemma
-gemma_2b_9b = Model(
- name = 'gemma-2b-9b',
- base_provider = 'Google',
- best_provider = Airforce
-)
-
-gemma_2b_27b = Model(
- name = 'gemma-2b-27b',
- base_provider = 'Google',
- best_provider = IterListProvider([DeepInfraChat, Airforce])
-)
-
gemma_2b = Model(
name = 'gemma-2b',
base_provider = 'Google',
- best_provider = IterListProvider([ReplicateHome, Airforce])
-)
-
-gemma_7b = Model(
- name = 'gemma-7b',
- base_provider = 'Google',
- best_provider = Cloudflare
-)
-
-# gemma 2
-gemma_2_27b = Model(
- name = 'gemma-2-27b',
- base_provider = 'Google',
- best_provider = Airforce
-)
-
-gemma_2 = Model(
- name = 'gemma-2',
- base_provider = 'Google',
- best_provider = ChatHub
+ best_provider = ReplicateHome
)
@@ -413,26 +290,26 @@ claude_2_1 = Model(
claude_3_opus = Model(
name = 'claude-3-opus',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Airforce, Liaobots])
+ best_provider = IterListProvider([Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Airforce, Liaobots])
+ best_provider = IterListProvider([Liaobots])
)
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'Anthropic',
- best_provider = IterListProvider([DDG, Airforce, GizAI, Liaobots])
+ best_provider = IterListProvider([DDG, Liaobots])
)
# claude 3.5
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, GizAI, Liaobots])
+ best_provider = IterListProvider([Blackbox, Liaobots])
)
@@ -448,7 +325,7 @@ reka_core = Model(
blackboxai = Model(
name = 'blackboxai',
base_provider = 'Blackbox AI',
- best_provider = IterListProvider([Blackbox, NexraBlackbox])
+ best_provider = Blackbox
)
blackboxai_pro = Model(
@@ -457,15 +334,6 @@ blackboxai_pro = Model(
best_provider = Blackbox
)
-
-### Databricks ###
-dbrx_instruct = Model(
- name = 'dbrx-instruct',
- base_provider = 'Databricks',
- best_provider = IterListProvider([Airforce, DeepInfra])
-)
-
-
### CohereForAI ###
command_r_plus = Model(
name = 'command-r-plus',
@@ -474,100 +342,35 @@ command_r_plus = Model(
)
-### iFlytek ###
-sparkdesk_v1_1 = Model(
- name = 'sparkdesk-v1.1',
- base_provider = 'iFlytek',
- best_provider = FreeChatgpt
-)
-
-
### Qwen ###
-# qwen 1
-qwen_1_5_0_5b = Model(
- name = 'qwen-1.5-0.5b',
- base_provider = 'Qwen',
- best_provider = Cloudflare
-)
-
+# qwen 1_5
qwen_1_5_7b = Model(
name = 'qwen-1.5-7b',
base_provider = 'Qwen',
- best_provider = IterListProvider([Cloudflare, Airforce])
-)
-
-qwen_1_5_14b = Model(
- name = 'qwen-1.5-14b',
- base_provider = 'Qwen',
- best_provider = IterListProvider([FreeChatgpt, Cloudflare, Airforce])
-)
-
-qwen_1_5_72b = Model(
- name = 'qwen-1.5-72b',
- base_provider = 'Qwen',
- best_provider = Airforce
-)
-
-qwen_1_5_110b = Model(
- name = 'qwen-1.5-110b',
- base_provider = 'Qwen',
- best_provider = Airforce
-)
-
-qwen_1_5_1_8b = Model(
- name = 'qwen-1.5-1.8b',
- base_provider = 'Qwen',
- best_provider = Airforce
+ best_provider = Cloudflare
)
# qwen 2
qwen_2_72b = Model(
name = 'qwen-2-72b',
base_provider = 'Qwen',
- best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
+ best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
)
-qwen = Model(
- name = 'qwen',
+# qwen 2.5
+qwen_2_5_coder_32b = Model(
+ name = 'qwen-2.5-coder-32b',
base_provider = 'Qwen',
- best_provider = NexraQwen
-)
-
-
-### Zhipu AI ###
-glm_3_6b = Model(
- name = 'glm-3-6b',
- base_provider = 'Zhipu AI',
- best_provider = FreeChatgpt
-)
-
-glm_4_9b = Model(
- name = 'glm-4-9B',
- base_provider = 'Zhipu AI',
- best_provider = FreeChatgpt
-)
-
-
-### 01-ai ###
-yi_1_5_9b = Model(
- name = 'yi-1.5-9b',
- base_provider = '01-ai',
- best_provider = FreeChatgpt
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
)
### Upstage ###
-solar_1_mini = Model(
- name = 'solar-1-mini',
+solar_mini = Model(
+ name = 'solar-mini',
base_provider = 'Upstage',
best_provider = Upstage
)
-solar_10_7b = Model(
- name = 'solar-10-7b',
- base_provider = 'Upstage',
- best_provider = Airforce
-)
-
solar_pro = Model(
name = 'solar-pro',
base_provider = 'Upstage',
@@ -583,23 +386,17 @@ pi = Model(
)
### DeepSeek ###
-deepseek = Model(
- name = 'deepseek',
+deepseek_coder = Model(
+ name = 'deepseek-coder',
base_provider = 'DeepSeek',
best_provider = Airforce
)
### WizardLM ###
-wizardlm_2_7b = Model(
- name = 'wizardlm-2-7b',
- base_provider = 'WizardLM',
- best_provider = DeepInfraChat
-)
-
wizardlm_2_8x22b = Model(
name = 'wizardlm-2-8x22b',
base_provider = 'WizardLM',
- best_provider = IterListProvider([DeepInfraChat, Airforce])
+ best_provider = IterListProvider([DeepInfraChat])
)
### Yorickvp ###
@@ -609,50 +406,11 @@ llava_13b = Model(
best_provider = ReplicateHome
)
-
-### OpenBMB ###
-minicpm_llama_3_v2_5 = Model(
- name = 'minicpm-llama-3-v2.5',
- base_provider = 'OpenBMB',
- best_provider = DeepInfraChat
-)
-
-
-### Lzlv ###
-lzlv_70b = Model(
- name = 'lzlv-70b',
- base_provider = 'Lzlv',
- best_provider = DeepInfraChat
-)
-
-
### OpenChat ###
openchat_3_5 = Model(
name = 'openchat-3.5',
base_provider = 'OpenChat',
- best_provider = Cloudflare
-)
-
-openchat_3_6_8b = Model(
- name = 'openchat-3.6-8b',
- base_provider = 'OpenChat',
- best_provider = DeepInfraChat
-)
-
-
-### Phind ###
-phind_codellama_34b_v2 = Model(
- name = 'phind-codellama-34b-v2',
- base_provider = 'Phind',
- best_provider = DeepInfraChat
-)
-
-
-### Cognitive Computations ###
-dolphin_2_9_1_llama_3_70b = Model(
- name = 'dolphin-2.9.1-llama-3-70b',
- base_provider = 'Cognitive Computations',
- best_provider = DeepInfraChat
+ best_provider = Airforce
)
@@ -669,12 +427,18 @@ grok_2_mini = Model(
best_provider = Liaobots
)
+grok_beta = Model(
+ name = 'grok-beta',
+ base_provider = 'x.ai',
+ best_provider = Liaobots
+)
+
### Perplexity AI ###
sonar_online = Model(
name = 'sonar-online',
base_provider = 'Perplexity AI',
- best_provider = IterListProvider([ChatHub, PerplexityLabs])
+ best_provider = IterListProvider([PerplexityLabs])
)
sonar_chat = Model(
@@ -683,51 +447,49 @@ sonar_chat = Model(
best_provider = PerplexityLabs
)
-
-### Gryphe ###
-mythomax_l2_13b = Model(
- name = 'mythomax-l2-13b',
- base_provider = 'Gryphe',
- best_provider = Airforce
+### Nvidia ###
+nemotron_70b = Model(
+ name = 'nemotron-70b',
+ base_provider = 'Nvidia',
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
)
-### Pawan ###
-cosmosrp = Model(
- name = 'cosmosrp',
- base_provider = 'Pawan',
+### Teknium ###
+openhermes_2_5 = Model(
+ name = 'openhermes-2.5',
+ base_provider = 'Teknium',
best_provider = Airforce
)
-
-### TheBloke ###
-german_7b = Model(
- name = 'german-7b',
- base_provider = 'TheBloke',
- best_provider = Cloudflare
+### Liquid ###
+lfm_40b = Model(
+ name = 'lfm-40b',
+ base_provider = 'Liquid',
+ best_provider = IterListProvider([Airforce, PerplexityLabs])
)
-### Tinyllama ###
-tinyllama_1_1b = Model(
- name = 'tinyllama-1.1b',
- base_provider = 'Tinyllama',
- best_provider = Cloudflare
+### DiscoResearch ###
+german_7b = Model(
+ name = 'german-7b',
+ base_provider = 'DiscoResearch',
+ best_provider = Airforce
)
-### Fblgit ###
-cybertron_7b = Model(
- name = 'cybertron-7b',
- base_provider = 'Fblgit',
- best_provider = Cloudflare
+### HuggingFaceH4 ###
+zephyr_7b = Model(
+ name = 'zephyr-7b',
+ base_provider = 'HuggingFaceH4',
+ best_provider = Airforce
)
-### Nvidia ###
-nemotron_70b = Model(
- name = 'nemotron-70b',
- base_provider = 'Nvidia',
- best_provider = IterListProvider([HuggingChat, HuggingFace])
+### Inferless ###
+neural_7b = Model(
+ name = 'neural-7b',
+ base_provider = 'inferless',
+ best_provider = Airforce
)
@@ -737,20 +499,6 @@ nemotron_70b = Model(
#############
### Stability AI ###
-sdxl_turbo = Model(
- name = 'sdxl-turbo',
- base_provider = 'Stability AI',
- best_provider = NexraSDTurbo
-
-)
-
-sdxl_lora = Model(
- name = 'sdxl-lora',
- base_provider = 'Stability AI',
- best_provider = NexraSDLora
-
-)
-
sdxl = Model(
name = 'sdxl',
base_provider = 'Stability AI',
@@ -758,13 +506,6 @@ sdxl = Model(
)
-sd_1_5 = Model(
- name = 'sd-1.5',
- base_provider = 'Stability AI',
- best_provider = IterListProvider([NexraSD15, GizAI])
-
-)
-
sd_3 = Model(
name = 'sd-3',
base_provider = 'Stability AI',
@@ -772,13 +513,6 @@ sd_3 = Model(
)
-sd_3_5 = Model(
- name = 'sd-3.5',
- base_provider = 'Stability AI',
- best_provider = GizAI
-
-)
-
### Playground ###
playground_v2_5 = Model(
name = 'playground-v2.5',
@@ -792,21 +526,21 @@ playground_v2_5 = Model(
flux = Model(
name = 'flux',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Airforce, Blackbox])
+ best_provider = IterListProvider([Blackbox, AIUncensored, Airforce])
)
flux_pro = Model(
name = 'flux-pro',
base_provider = 'Flux AI',
- best_provider = IterListProvider([NexraFluxPro, AmigoChat])
+ best_provider = IterListProvider([Airforce])
)
flux_realism = Model(
name = 'flux-realism',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Airforce, AmigoChat])
+ best_provider = IterListProvider([Airforce])
)
@@ -845,45 +579,9 @@ flux_4o = Model(
)
-flux_schnell = Model(
- name = 'flux-schnell',
- base_provider = 'Flux AI',
- best_provider = IterListProvider([ReplicateHome, GizAI])
-
-)
-
-### OpenAI ###
-dalle_2 = Model(
- name = 'dalle-2',
- base_provider = 'OpenAI',
- best_provider = NexraDallE2
-
-)
-
-dalle = Model(
- name = 'dalle',
- base_provider = 'OpenAI',
- best_provider = NexraDallE
-
-)
-
-### Midjourney ###
-midjourney = Model(
- name = 'midjourney',
- base_provider = 'Midjourney',
- best_provider = NexraMidjourney
-
-)
### Other ###
-emi = Model(
- name = 'emi',
- base_provider = '',
- best_provider = NexraEmi
-
-)
-
any_dark = Model(
name = 'any-dark',
base_provider = '',
@@ -905,9 +603,6 @@ class ModelUtils:
############
### OpenAI ###
-# gpt-3
-'gpt-3': gpt_3,
-
# gpt-3.5
'gpt-3.5-turbo': gpt_35_turbo,
@@ -927,11 +622,9 @@ class ModelUtils:
# llama-2
'llama-2-7b': llama_2_7b,
-'llama-2-13b': llama_2_13b,
# llama-3
'llama-3-8b': llama_3_8b,
-'llama-3-70b': llama_3_70b,
# llama-3.1
'llama-3.1-8b': llama_3_1_8b,
@@ -940,35 +633,26 @@ class ModelUtils:
# llama-3.2
'llama-3.2-1b': llama_3_2_1b,
-'llama-3.2-3b': llama_3_2_3b,
'llama-3.2-11b': llama_3_2_11b,
-'llama-3.2-90b': llama_3_2_90b,
-
-# llamaguard
-'llamaguard-7b': llamaguard_7b,
-'llamaguard-2-8b': llamaguard_2_8b,
-
+
### Mistral ###
'mistral-7b': mistral_7b,
'mixtral-8x7b': mixtral_8x7b,
-'mixtral-8x22b': mixtral_8x22b,
'mistral-nemo': mistral_nemo,
-'mistral-large': mistral_large,
### NousResearch ###
-'mixtral-8x7b-dpo': mixtral_8x7b_dpo,
+'hermes-2-pro': hermes_2_pro,
+'hermes-2-dpo': hermes_2_dpo,
'hermes-3': hermes_3,
-
-'yi-34b': yi_34b,
-
-
+
+
### Microsoft ###
'phi-2': phi_2,
-'phi_3_medium-4k': phi_3_medium_4k,
'phi-3.5-mini': phi_3_5_mini,
+
### Google ###
# gemini
'gemini': gemini,
@@ -977,13 +661,6 @@ class ModelUtils:
# gemma
'gemma-2b': gemma_2b,
-'gemma-2b-9b': gemma_2b_9b,
-'gemma-2b-27b': gemma_2b_27b,
-'gemma-7b': gemma_7b,
-
-# gemma-2
-'gemma-2': gemma_2,
-'gemma-2-27b': gemma_2_27b,
### Anthropic ###
@@ -1010,50 +687,34 @@ class ModelUtils:
### CohereForAI ###
'command-r+': command_r_plus,
-
-### Databricks ###
-'dbrx-instruct': dbrx_instruct,
-
### GigaChat ###
'gigachat': gigachat,
-
-### iFlytek ###
-'sparkdesk-v1.1': sparkdesk_v1_1,
-
+
### Qwen ###
-'qwen': qwen,
-'qwen-1.5-0.5b': qwen_1_5_0_5b,
+# qwen 1.5
'qwen-1.5-7b': qwen_1_5_7b,
-'qwen-1.5-14b': qwen_1_5_14b,
-'qwen-1.5-72b': qwen_1_5_72b,
-'qwen-1.5-110b': qwen_1_5_110b,
-'qwen-1.5-1.8b': qwen_1_5_1_8b,
+
+# qwen 2
'qwen-2-72b': qwen_2_72b,
-
-
-### Zhipu AI ###
-'glm-3-6b': glm_3_6b,
-'glm-4-9b': glm_4_9b,
-
-
-### 01-ai ###
-'yi-1.5-9b': yi_1_5_9b,
-
+
+# qwen 2.5
+'qwen-2.5-coder-32b': qwen_2_5_coder_32b,
+
### Upstage ###
-'solar-mini': solar_1_mini,
-'solar-10-7b': solar_10_7b,
+'solar-mini': solar_mini,
'solar-pro': solar_pro,
### Inflection ###
'pi': pi,
+
### DeepSeek ###
-'deepseek': deepseek,
+'deepseek-coder': deepseek_coder,
### Yorickvp ###
@@ -1061,63 +722,50 @@ class ModelUtils:
### WizardLM ###
-'wizardlm-2-7b': wizardlm_2_7b,
'wizardlm-2-8x22b': wizardlm_2_8x22b,
-
-
-### OpenBMB ###
-'minicpm-llama-3-v2.5': minicpm_llama_3_v2_5,
-
-
-### Lzlv ###
-'lzlv-70b': lzlv_70b,
-
+
### OpenChat ###
'openchat-3.5': openchat_3_5,
-'openchat-3.6-8b': openchat_3_6_8b,
-
-
-### Phind ###
-'phind-codellama-34b-v2': phind_codellama_34b_v2,
-
-
-### Cognitive Computations ###
-'dolphin-2.9.1-llama-3-70b': dolphin_2_9_1_llama_3_70b,
-
+
### x.ai ###
'grok-2': grok_2,
'grok-2-mini': grok_2_mini,
+'grok-beta': grok_beta,
### Perplexity AI ###
'sonar-online': sonar_online,
'sonar-chat': sonar_chat,
+
+
+### TheBloke ###
+'german-7b': german_7b,
+
+
+### Nvidia ###
+'nemotron-70b': nemotron_70b,
-### Gryphe ###
-'mythomax-l2-13b': sonar_chat,
-
-
-### Pawan ###
-'cosmosrp': cosmosrp,
+### Teknium ###
+'openhermes-2.5': openhermes_2_5,
+
+### Liquid ###
+'lfm-40b': lfm_40b,
+
-### TheBloke ###
+### DiscoResearch ###
'german-7b': german_7b,
-### Tinyllama ###
-'tinyllama-1.1b': tinyllama_1_1b,
+### HuggingFaceH4 ###
+'zephyr-7b': zephyr_7b,
-### Fblgit ###
-'cybertron-7b': cybertron_7b,
-
-
-### Nvidia ###
-'nemotron-70b': nemotron_70b,
+### Inferless ###
+'neural-7b': neural_7b,
@@ -1127,11 +775,7 @@ class ModelUtils:
### Stability AI ###
'sdxl': sdxl,
-'sdxl-lora': sdxl_lora,
-'sdxl-turbo': sdxl_turbo,
-'sd-1.5': sd_1_5,
'sd-3': sd_3,
-'sd-3.5': sd_3_5,
### Playground ###
@@ -1147,19 +791,9 @@ class ModelUtils:
'flux-disney': flux_disney,
'flux-pixel': flux_pixel,
'flux-4o': flux_4o,
-'flux-schnell': flux_schnell,
-
-
-### OpenAI ###
-'dalle': dalle,
-'dalle-2': dalle_2,
-
-### Midjourney ###
-'midjourney': midjourney,
### Other ###
-'emi': emi,
'any-dark': any_dark,
}