summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorrkihacker <rkihacker@gmail.com>2024-11-02 17:07:48 +0100
committerGitHub <noreply@github.com>2024-11-02 17:07:48 +0100
commit56d696cf10f0b436e7212cbeb67929ae3639c311 (patch)
tree858f25d175297d5ad613f4bd3d1762eec82ac9ee /g4f/Provider
parentremove model prefix for claude (diff)
parentUpdate (docs/providers-and-models.md) (diff)
downloadgpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.gz
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.bz2
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.lz
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.xz
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.tar.zst
gpt4free-56d696cf10f0b436e7212cbeb67929ae3639c311.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/AIChatFree.py2
-rw-r--r--g4f/Provider/AIUncensored.py148
-rw-r--r--g4f/Provider/AiChats.py2
-rw-r--r--g4f/Provider/Airforce.py250
-rw-r--r--g4f/Provider/Allyfy.py88
-rw-r--r--g4f/Provider/AmigoChat.py2
-rw-r--r--g4f/Provider/Blackbox.py3
-rw-r--r--g4f/Provider/airforce/AirforceChat.py375
-rw-r--r--g4f/Provider/airforce/AirforceImage.py97
-rw-r--r--g4f/Provider/airforce/__init__.py2
10 files changed, 639 insertions, 330 deletions
diff --git a/g4f/Provider/AIChatFree.py b/g4f/Provider/AIChatFree.py
index 71c04681..6f4b8560 100644
--- a/g4f/Provider/AIChatFree.py
+++ b/g4f/Provider/AIChatFree.py
@@ -14,7 +14,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatfree.info/"
- working = True
+ working = False
supports_stream = True
supports_message_history = True
default_model = 'gemini-pro'
diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py
index d653191c..ce492b38 100644
--- a/g4f/Provider/AIUncensored.py
+++ b/g4f/Provider/AIUncensored.py
@@ -2,33 +2,49 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
+from itertools import cycle
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..image import ImageResponse
+
class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.aiuncensored.info"
+ api_endpoints_text = [
+ "https://twitterclone-i0wr.onrender.com/api/chat",
+ "https://twitterclone-4e8t.onrender.com/api/chat",
+ "https://twitterclone-8wd1.onrender.com/api/chat",
+ ]
+ api_endpoints_image = [
+ "https://twitterclone-4e8t.onrender.com/api/image",
+ "https://twitterclone-i0wr.onrender.com/api/image",
+ "https://twitterclone-8wd1.onrender.com/api/image",
+ ]
+ api_endpoints_cycle_text = cycle(api_endpoints_text)
+ api_endpoints_cycle_image = cycle(api_endpoints_image)
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
- default_model = 'ai_uncensored'
- chat_models = [default_model]
- image_models = ['ImageGenerator']
- models = [*chat_models, *image_models]
-
- api_endpoints = {
- 'ai_uncensored': "https://twitterclone-i0wr.onrender.com/api/chat",
- 'ImageGenerator': "https://twitterclone-4e8t.onrender.com/api/image"
+ default_model = 'TextGenerations'
+ text_models = [default_model]
+ image_models = ['ImageGenerations']
+ models = [*text_models, *image_models]
+
+ model_aliases = {
+ #"": "TextGenerations",
+ "flux": "ImageGenerations",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
else:
return cls.default_model
@@ -38,75 +54,63 @@ class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
- if model in cls.chat_models:
- async with ClientSession(headers={"content-type": "application/json"}) as session:
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://www.aiuncensored.info',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://www.aiuncensored.info/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model in cls.image_models:
+ prompt = messages[-1]['content']
data = {
- "messages": [
- {"role": "user", "content": format_prompt(messages)}
- ],
- "stream": stream
+ "prompt": prompt,
}
- async with session.post(cls.api_endpoints[model], json=data, proxy=proxy) as response:
+ api_endpoint = next(cls.api_endpoints_cycle_image)
+ async with session.post(api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- if stream:
- async for chunk in cls._handle_streaming_response(response):
- yield chunk
- else:
- yield await cls._handle_non_streaming_response(response)
- elif model in cls.image_models:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/",
- "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "cross-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
- }
- async with ClientSession(headers=headers) as session:
- prompt = messages[0]['content']
- data = {"prompt": prompt}
- async with session.post(cls.api_endpoints[model], json=data, proxy=proxy) as response:
+ response_data = await response.json()
+ image_url = response_data['image_url']
+ image_response = ImageResponse(images=image_url, alt=prompt)
+ yield image_response
+ elif model in cls.text_models:
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ]
+ }
+ api_endpoint = next(cls.api_endpoints_cycle_text)
+ async with session.post(api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- result = await response.json()
- image_url = result.get('image_url', '')
- if image_url:
- yield ImageResponse(image_url, alt=prompt)
- else:
- yield "Failed to generate image. Please try again."
-
- @classmethod
- async def _handle_streaming_response(cls, response):
- async for line in response.content:
- line = line.decode('utf-8').strip()
- if line.startswith("data: "):
- if line == "data: [DONE]":
- break
- try:
- json_data = json.loads(line[6:])
- if 'data' in json_data:
- yield json_data['data']
- except json.JSONDecodeError:
- pass
-
- @classmethod
- async def _handle_non_streaming_response(cls, response):
- response_json = await response.json()
- return response_json.get('content', "Sorry, I couldn't generate a response.")
-
- @classmethod
- def validate_response(cls, response: str) -> str:
- return response
+ full_response = ""
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith("data: "):
+ try:
+ json_str = line[6:]
+ if json_str != "[DONE]":
+ data = json.loads(json_str)
+ if "data" in data:
+ full_response += data["data"]
+ yield data["data"]
+ except json.JSONDecodeError:
+ continue
diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/AiChats.py
index 08492e24..7ff25639 100644
--- a/g4f/Provider/AiChats.py
+++ b/g4f/Provider/AiChats.py
@@ -11,7 +11,7 @@ from .helper import format_prompt
class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-chats.org"
api_endpoint = "https://ai-chats.org/chat/send2/"
- working = True
+ working = False
supports_message_history = True
default_model = 'gpt-4'
models = ['gpt-4', 'dalle']
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 015766f4..b7819f9a 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -1,105 +1,30 @@
from __future__ import annotations
-import random
-import json
-import re
+from typing import Any, Dict
+import inspect
+
from aiohttp import ClientSession
+
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse
-
-def split_long_message(message: str, max_length: int = 4000) -> list[str]:
- return [message[i:i+max_length] for i in range(0, len(message), max_length)]
+from .helper import format_prompt
+from .airforce.AirforceChat import AirforceChat
+from .airforce.AirforceImage import AirforceImage
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
- image_api_endpoint = "https://api.airforce/imagine2"
- text_api_endpoint = "https://api.airforce/chat/completions"
+ api_endpoint_completions = AirforceChat.api_endpoint_completions
+ api_endpoint_imagine2 = AirforceImage.api_endpoint_imagine2
working = True
+ supports_stream = AirforceChat.supports_stream
+ supports_system_message = AirforceChat.supports_system_message
+ supports_message_history = AirforceChat.supports_message_history
- default_model = 'llama-3-70b-chat'
-
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- text_models = [
- 'claude-3-haiku-20240307',
- 'claude-3-sonnet-20240229',
- 'claude-3-5-sonnet-20240620',
- 'claude-3-opus-20240229',
- 'chatgpt-4o-latest',
- 'gpt-4',
- 'gpt-4-turbo',
- 'gpt-4o-mini-2024-07-18',
- 'gpt-4o-mini',
- 'gpt-3.5-turbo',
- 'gpt-3.5-turbo-0125',
- 'gpt-3.5-turbo-1106',
- default_model,
- 'llama-3-70b-chat-turbo',
- 'llama-3-8b-chat',
- 'llama-3-8b-chat-turbo',
- 'llama-3-70b-chat-lite',
- 'llama-3-8b-chat-lite',
- 'llama-2-13b-chat',
- 'llama-3.1-405b-turbo',
- 'llama-3.1-70b-turbo',
- 'llama-3.1-8b-turbo',
- 'LlamaGuard-2-8b',
- 'Llama-Guard-7b',
- 'Llama-3.2-90B-Vision-Instruct-Turbo',
- 'Mixtral-8x7B-Instruct-v0.1',
- 'Mixtral-8x22B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.2',
- 'Mistral-7B-Instruct-v0.3',
- 'Qwen1.5-7B-Chat',
- 'Qwen1.5-14B-Chat',
- 'Qwen1.5-72B-Chat',
- 'Qwen1.5-110B-Chat',
- 'Qwen2-72B-Instruct',
- 'gemma-2b-it',
- 'gemma-2-9b-it',
- 'gemma-2-27b-it',
- 'gemini-1.5-flash',
- 'gemini-1.5-pro',
- 'deepseek-llm-67b-chat',
- 'Nous-Hermes-2-Mixtral-8x7B-DPO',
- 'Nous-Hermes-2-Yi-34B',
- 'WizardLM-2-8x22B',
- 'SOLAR-10.7B-Instruct-v1.0',
- 'MythoMax-L2-13b',
- 'cosmosrp',
- ]
-
- image_models = [
- 'flux',
- 'flux-realism',
- 'flux-anime',
- 'flux-3d',
- 'flux-disney',
- 'flux-pixel',
- 'flux-4o',
- 'any-dark',
- ]
-
- models = [
- *text_models,
- *image_models,
- ]
+ default_model = AirforceChat.default_model
+ models = [*AirforceChat.text_models, *AirforceImage.image_models]
model_aliases = {
- "claude-3-haiku": "claude-3-haiku-20240307",
- "claude-3-sonnet": "claude-3-sonnet-20240229",
- "gpt-4o": "chatgpt-4o-latest",
- "llama-3-70b": "llama-3-70b-chat",
- "llama-3-8b": "llama-3-8b-chat",
- "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
- "qwen-1.5-7b": "Qwen1.5-7B-Chat",
- "gemma-2b": "gemma-2b-it",
- "gemini-flash": "gemini-1.5-flash",
- "mythomax-l2-13b": "MythoMax-L2-13b",
- "solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
+ **AirforceChat.model_aliases,
+ **AirforceImage.model_aliases
}
@classmethod
@@ -107,139 +32,28 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
if model in cls.models:
return model
elif model in cls.model_aliases:
- return cls.model_aliases.get(model, cls.default_model)
+ return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- seed: int = None,
- size: str = "1:1",
- stream: bool = False,
- **kwargs
- ) -> AsyncResult:
+ async def create_async_generator(cls, model: str, messages: Messages, **kwargs) -> AsyncResult:
model = cls.get_model(model)
+
+ provider = AirforceChat if model in AirforceChat.text_models else AirforceImage
- if model in cls.image_models:
- async for result in cls._generate_image(model, messages, proxy, seed, size):
- yield result
- elif model in cls.text_models:
- async for result in cls._generate_text(model, messages, proxy, stream):
- yield result
-
- @classmethod
- async def _generate_image(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- seed: int = None,
- size: str = "1:1",
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "origin": "https://llmplayground.net",
- "user-agent": "Mozilla/5.0"
- }
-
- if seed is None:
- seed = random.randint(0, 100000)
-
- prompt = messages[-1]['content']
-
- async with ClientSession(headers=headers) as session:
- params = {
- "model": model,
- "prompt": prompt,
- "size": size,
- "seed": seed
- }
- async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
- response.raise_for_status()
- content_type = response.headers.get('Content-Type', '').lower()
+ if model not in provider.models:
+ raise ValueError(f"Unsupported model: {model}")
- if 'application/json' in content_type:
- async for chunk in response.content.iter_chunked(1024):
- if chunk:
- yield chunk.decode('utf-8')
- elif 'image' in content_type:
- image_data = b""
- async for chunk in response.content.iter_chunked(1024):
- if chunk:
- image_data += chunk
- image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
- alt_text = f"Generated image for prompt: {prompt}"
- yield ImageResponse(images=image_url, alt=alt_text)
-
- @classmethod
- async def _generate_text(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- stream: bool = False,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "authorization": "Bearer missing api key",
- "content-type": "application/json",
- "user-agent": "Mozilla/5.0"
- }
+ # Get the signature of the provider's create_async_generator method
+ sig = inspect.signature(provider.create_async_generator)
+
+ # Filter kwargs to only include parameters that the provider's method accepts
+ filtered_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters}
- async with ClientSession(headers=headers) as session:
- formatted_prompt = cls._format_messages(messages)
- prompt_parts = split_long_message(formatted_prompt)
- full_response = ""
+ # Add model and messages to filtered_kwargs
+ filtered_kwargs['model'] = model
+ filtered_kwargs['messages'] = messages
- for part in prompt_parts:
- data = {
- "messages": [{"role": "user", "content": part}],
- "model": model,
- "max_tokens": 4096,
- "temperature": 1,
- "top_p": 1,
- "stream": stream
- }
- async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- part_response = ""
- if stream:
- async for line in response.content:
- if line:
- line = line.decode('utf-8').strip()
- if line.startswith("data: ") and line != "data: [DONE]":
- json_data = json.loads(line[6:])
- content = json_data['choices'][0]['delta'].get('content', '')
- part_response += content
- else:
- json_data = await response.json()
- content = json_data['choices'][0]['message']['content']
- part_response = content
-
- part_response = re.sub(
- r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
- '',
- part_response
- )
-
- part_response = re.sub(
- r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
- '',
- part_response
- )
-
- full_response += part_response
- yield full_response
-
- @classmethod
- def _format_messages(cls, messages: Messages) -> str:
- return " ".join([msg['content'] for msg in messages])
+ async for result in provider.create_async_generator(**filtered_kwargs):
+ yield result
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
index bf607df4..53cf1da1 100644
--- a/g4f/Provider/Allyfy.py
+++ b/g4f/Provider/Allyfy.py
@@ -1,17 +1,28 @@
from __future__ import annotations
-
-from aiohttp import ClientSession
+import aiohttp
+import asyncio
import json
-
+import uuid
+from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-class Allyfy(AsyncGeneratorProvider):
+class Allyfy(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://allyfy.chat"
api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -21,50 +32,55 @@ class Allyfy(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+ client_id = str(uuid.uuid4())
+
headers = {
- "accept": "text/event-stream",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json;charset=utf-8",
- "dnt": "1",
- "origin": "https://www.allyfy.chat",
- "priority": "u=1, i",
- "referer": "https://www.allyfy.chat/",
- "referrer": "https://www.allyfy.chat",
- 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json;charset=utf-8',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f"{cls.url}/",
+ 'referrer': cls.url,
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
+
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
- "messages": [{"content": prompt, "role": "user"}],
+ "messages": messages,
"content": prompt,
"baseInfo": {
- "clientId": "q08kdrde1115003lyedfoir6af0yy531",
+ "clientId": client_id,
"pid": "38281",
"channelId": "100000",
"locale": "en-US",
- "localZone": 180,
+ "localZone": 120,
"packageName": "com.cch.allyfy.webh",
}
}
- async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- full_response = []
- async for line in response.content:
- line = line.decode().strip()
- if line.startswith("data:"):
- data_content = line[5:]
- if data_content == "[DONE]":
- break
- try:
- json_data = json.loads(data_content)
- if "content" in json_data:
- full_response.append(json_data["content"])
- except json.JSONDecodeError:
- continue
- yield "".join(full_response)
+ response_text = await response.text()
+
+ filtered_response = []
+ for line in response_text.splitlines():
+ if line.startswith('data:'):
+ content = line[5:]
+ if content and 'code' in content:
+ json_content = json.loads(content)
+ if json_content['content']:
+ filtered_response.append(json_content['content'])
+
+ final_response = ''.join(filtered_response)
+ yield final_response
diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py
index f5027111..b086d5e1 100644
--- a/g4f/Provider/AmigoChat.py
+++ b/g4f/Provider/AmigoChat.py
@@ -13,7 +13,7 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://amigochat.io/chat/"
chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
- working = True
+ working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 0013800e..e2595b02 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -274,7 +274,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"visitFromDelta": False,
"mobileClient": False,
"webSearchMode": web_search,
- "userSelectedModel": cls.userSelectedModel.get(model, model)
+ "userSelectedModel": cls.userSelectedModel.get(model, model),
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc"
}
headers_chat = {
diff --git a/g4f/Provider/airforce/AirforceChat.py b/g4f/Provider/airforce/AirforceChat.py
new file mode 100644
index 00000000..b4b1eca3
--- /dev/null
+++ b/g4f/Provider/airforce/AirforceChat.py
@@ -0,0 +1,375 @@
+from __future__ import annotations
+import re
+from aiohttp import ClientSession
+import json
+from typing import List
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+def clean_response(text: str) -> str:
+ """Clean response from unwanted patterns."""
+ patterns = [
+ r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
+ r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
+ r"Rate limit \(\d+\/hour\) exceeded\. Join our discord for more: https:\/\/discord\.com\/invite\/\S+",
+ r"</s>", # zephyr-7b-beta
+ ]
+
+ for pattern in patterns:
+ text = re.sub(pattern, '', text)
+ return text.strip()
+
+def split_message(message: dict, chunk_size: int = 995) -> List[dict]:
+ """Split a message into chunks of specified size."""
+ content = message.get('content', '')
+ if len(content) <= chunk_size:
+ return [message]
+
+ chunks = []
+ while content:
+ chunk = content[:chunk_size]
+ content = content[chunk_size:]
+ chunks.append({
+ 'role': message['role'],
+ 'content': chunk
+ })
+ return chunks
+
+def split_messages(messages: Messages, chunk_size: int = 995) -> Messages:
+ """Split all messages that exceed chunk_size into smaller messages."""
+ result = []
+ for message in messages:
+ result.extend(split_message(message, chunk_size))
+ return result
+
+class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "AirForce Chat"
+ api_endpoint_completions = "https://api.airforce/chat/completions" # Замініть на реальний ендпоінт
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'llama-3-70b-chat'
+ text_models = [
+ # anthropic
+ 'claude-3-haiku-20240307',
+ 'claude-3-sonnet-20240229',
+ 'claude-3-5-sonnet-20240620',
+ 'claude-3-5-sonnet-20241022',
+ 'claude-3-opus-20240229',
+
+ # openai
+ 'chatgpt-4o-latest',
+ 'gpt-4',
+ 'gpt-4-turbo',
+ 'gpt-4o-2024-05-13',
+ 'gpt-4o-mini-2024-07-18',
+ 'gpt-4o-mini',
+ 'gpt-4o-2024-08-06',
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-0125',
+ 'gpt-3.5-turbo-1106',
+ 'gpt-4o',
+ 'gpt-4-turbo-2024-04-09',
+ 'gpt-4-0125-preview',
+ 'gpt-4-1106-preview',
+
+ # meta-llama
+ default_model,
+ 'llama-3-70b-chat-turbo',
+ 'llama-3-8b-chat',
+ 'llama-3-8b-chat-turbo',
+ 'llama-3-70b-chat-lite',
+ 'llama-3-8b-chat-lite',
+ 'llama-2-13b-chat',
+ 'llama-3.1-405b-turbo',
+ 'llama-3.1-70b-turbo',
+ 'llama-3.1-8b-turbo',
+ 'LlamaGuard-2-8b',
+ 'llamaguard-7b',
+ 'Llama-Vision-Free',
+ 'Llama-Guard-7b',
+ 'Llama-3.2-90B-Vision-Instruct-Turbo',
+ 'Meta-Llama-Guard-3-8B',
+ 'Llama-3.2-11B-Vision-Instruct-Turbo',
+ 'Llama-Guard-3-11B-Vision-Turbo',
+ 'Llama-3.2-3B-Instruct-Turbo',
+ 'Llama-3.2-1B-Instruct-Turbo',
+ 'llama-2-7b-chat-int8',
+ 'llama-2-7b-chat-fp16',
+ 'Llama 3.1 405B Instruct',
+ 'Llama 3.1 70B Instruct',
+ 'Llama 3.1 8B Instruct',
+
+ # mistral-ai
+ 'Mixtral-8x7B-Instruct-v0.1',
+ 'Mixtral-8x22B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.2',
+ 'Mistral-7B-Instruct-v0.3',
+
+ # Gryphe
+ 'MythoMax-L2-13b-Lite',
+ 'MythoMax-L2-13b',
+
+ # openchat
+ 'openchat-3.5-0106',
+
+ # qwen
+ #'Qwen1.5-72B-Chat', Пуста відповідь
+ #'Qwen1.5-110B-Chat', Пуста відповідь
+ 'Qwen2-72B-Instruct',
+ 'Qwen2.5-7B-Instruct-Turbo',
+ 'Qwen2.5-72B-Instruct-Turbo',
+
+ # google
+ 'gemma-2b-it',
+ 'gemma-2-9b-it',
+ 'gemma-2-27b-it',
+
+ # gemini
+ 'gemini-1.5-flash',
+ 'gemini-1.5-pro',
+
+ # databricks
+ 'dbrx-instruct',
+
+ # deepseek-ai
+ 'deepseek-coder-6.7b-base',
+ 'deepseek-coder-6.7b-instruct',
+ 'deepseek-math-7b-instruct',
+
+ # NousResearch
+ 'deepseek-math-7b-instruct',
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO',
+ 'hermes-2-pro-mistral-7b',
+
+ # teknium
+ 'openhermes-2.5-mistral-7b',
+
+ # microsoft
+ 'WizardLM-2-8x22B',
+ 'phi-2',
+
+ # upstage
+ 'SOLAR-10.7B-Instruct-v1.0',
+
+ # pawan
+ 'cosmosrp',
+
+ # liquid
+ 'lfm-40b-moe',
+
+ # DiscoResearch
+ 'discolm-german-7b-v1',
+
+ # tiiuae
+ 'falcon-7b-instruct',
+
+ # defog
+ 'sqlcoder-7b-2',
+
+ # tinyllama
+ 'tinyllama-1.1b-chat',
+
+ # HuggingFaceH4
+ 'zephyr-7b-beta',
+ ]
+
+ models = [*text_models]
+
+ model_aliases = {
+ # anthropic
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
+ "claude-3-opus": "claude-3-opus-20240229",
+
+ # openai
+ "gpt-4o": "chatgpt-4o-latest",
+ #"gpt-4": "gpt-4",
+ #"gpt-4-turbo": "gpt-4-turbo",
+ "gpt-4o": "gpt-4o-2024-05-13",
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ #"gpt-4o-mini": "gpt-4o-mini",
+ "gpt-4o": "gpt-4o-2024-08-06",
+ "gpt-3.5-turbo": "gpt-3.5-turbo",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
+ #"gpt-4o": "gpt-4o",
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gpt-4": "gpt-4-0125-preview",
+ "gpt-4": "gpt-4-1106-preview",
+
+ # meta-llama
+ "llama-3-70b": "llama-3-70b-chat",
+ "llama-3-8b": "llama-3-8b-chat",
+ "llama-3-8b": "llama-3-8b-chat-turbo",
+ "llama-3-70b": "llama-3-70b-chat-lite",
+ "llama-3-8b": "llama-3-8b-chat-lite",
+ "llama-2-13b": "llama-2-13b-chat",
+ "llama-3.1-405b": "llama-3.1-405b-turbo",
+ "llama-3.1-70b": "llama-3.1-70b-turbo",
+ "llama-3.1-8b": "llama-3.1-8b-turbo",
+ "llamaguard-2-8b": "LlamaGuard-2-8b",
+ "llamaguard-7b": "llamaguard-7b",
+ #"llama_vision_free": "Llama-Vision-Free", # Unknown
+ "llamaguard-7b": "Llama-Guard-7b",
+ "llama-3.2-90b": "Llama-3.2-90B-Vision-Instruct-Turbo",
+ "llamaguard-3-8b": "Meta-Llama-Guard-3-8B",
+ "llama-3.2-11b": "Llama-3.2-11B-Vision-Instruct-Turbo",
+ "llamaguard-3-11b": "Llama-Guard-3-11B-Vision-Turbo",
+ "llama-3.2-3b": "Llama-3.2-3B-Instruct-Turbo",
+ "llama-3.2-1b": "Llama-3.2-1B-Instruct-Turbo",
+ "llama-2-7b": "llama-2-7b-chat-int8",
+ "llama-2-7b": "llama-2-7b-chat-fp16",
+ "llama-3.1-405b": "Llama 3.1 405B Instruct",
+ "llama-3.1-70b": "Llama 3.1 70B Instruct",
+ "llama-3.1-8b": "Llama 3.1 8B Instruct",
+
+ # mistral-ai
+ "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
+ "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
+ "mixtral-8x7b": "Mistral-7B-Instruct-v0.1",
+ "mixtral-8x7b": "Mistral-7B-Instruct-v0.2",
+ "mixtral-8x7b": "Mistral-7B-Instruct-v0.3",
+
+ # Gryphe
+ "mythomax-13b": "MythoMax-L2-13b-Lite",
+ "mythomax-13b": "MythoMax-L2-13b",
+
+ # openchat
+ "openchat-3.5": "openchat-3.5-0106",
+
+ # qwen
+ #"qwen-1.5-72b": "Qwen1.5-72B-Chat", # Empty answer
+ #"qwen-1.5-110b": "Qwen1.5-110B-Chat", # Empty answer
+ "qwen-2-72b": "Qwen2-72B-Instruct",
+ "qwen-2-5-7b": "Qwen2.5-7B-Instruct-Turbo",
+ "qwen-2-5-72b": "Qwen2.5-72B-Instruct-Turbo",
+
+ # google
+ "gemma-2b": "gemma-2b-it",
+ "gemma-2-9b": "gemma-2-9b-it",
+ "gemma-2b-27b": "gemma-2-27b-it",
+
+ # gemini
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-pro": "gemini-1.5-pro",
+
+ # databricks
+ "dbrx-instruct": "dbrx-instruct",
+
+ # deepseek-ai
+ #"deepseek-coder": "deepseek-coder-6.7b-base",
+ "deepseek-coder": "deepseek-coder-6.7b-instruct",
+ #"deepseek-math": "deepseek-math-7b-instruct",
+
+ # NousResearch
+ #"deepseek-math": "deepseek-math-7b-instruct",
+ "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+ "hermes-2": "hermes-2-pro-mistral-7b",
+
+ # teknium
+ "openhermes-2.5": "openhermes-2.5-mistral-7b",
+
+ # microsoft
+ "wizardlm-2-8x22b": "WizardLM-2-8x22B",
+ #"phi-2": "phi-2",
+
+ # upstage
+ "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
+
+ # pawan
+ #"cosmosrp": "cosmosrp",
+
+ # liquid
+ "lfm-40b": "lfm-40b-moe",
+
+ # DiscoResearch
+ "german-7b": "discolm-german-7b-v1",
+
+ # tiiuae
+ #"falcon-7b": "falcon-7b-instruct",
+
+ # defog
+ #"sqlcoder-7b": "sqlcoder-7b-2",
+
+ # tinyllama
+ #"tinyllama-1b": "tinyllama-1.1b-chat",
+
+ # HuggingFaceH4
+ "zephyr-7b": "zephyr-7b-beta",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ max_tokens: str = 4096,
+ temperature: str = 1,
+ top_p: str = 1,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ chunked_messages = split_messages(messages)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'authorization': 'Bearer missing api key',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://llmplayground.net',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://llmplayground.net/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ data = {
+ "messages": chunked_messages,
+ "model": model,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stream": stream
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint_completions, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ text = ""
+ if stream:
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith('data: '):
+ json_str = line[6:]
+ try:
+ chunk = json.loads(json_str)
+ if 'choices' in chunk and chunk['choices']:
+ content = chunk['choices'][0].get('delta', {}).get('content', '')
+ text += content # Збираємо дельти
+ except json.JSONDecodeError as e:
+ print(f"Error decoding JSON: {json_str}, Error: {e}")
+ elif line.strip() == "[DONE]":
+ break
+ yield clean_response(text)
+ else:
+ response_json = await response.json()
+ text = response_json["choices"][0]["message"]["content"]
+ yield clean_response(text)
+
diff --git a/g4f/Provider/airforce/AirforceImage.py b/g4f/Provider/airforce/AirforceImage.py
new file mode 100644
index 00000000..010d1a94
--- /dev/null
+++ b/g4f/Provider/airforce/AirforceImage.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import random
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class AirforceImage(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Airforce Image"
+ #url = "https://api.airforce"
+ api_endpoint_imagine2 = "https://api.airforce/imagine2"
+ #working = True
+
+ default_model = 'flux'
+ image_models = [
+ 'flux',
+ 'flux-realism',
+ 'flux-anime',
+ 'flux-3d',
+ 'flux-disney',
+ 'flux-pixel',
+ 'flux-4o',
+ 'any-dark',
+ 'stable-diffusion-xl-base',
+ 'stable-diffusion-xl-lightning',
+ ]
+ models = [*image_models]
+
+ model_aliases = {
+ "sdxl": "stable-diffusion-xl-base",
+ "sdxl": "stable-diffusion-xl-lightning",
+ }
+
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ size: str = '1:1',
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'authorization': 'Bearer missing api key',
+ 'cache-control': 'no-cache',
+ 'origin': 'https://llmplayground.net',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://llmplayground.net/',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[-1]['content']
+ seed = random.randint(0, 4294967295)
+ params = {
+ 'model': model,
+ 'prompt': prompt,
+ 'size': size,
+ 'seed': str(seed)
+ }
+ async with session.get(cls.api_endpoint_imagine2, params=params, proxy=proxy) as response:
+ response.raise_for_status()
+ if response.status == 200:
+ content_type = response.headers.get('Content-Type', '')
+ if 'image' in content_type:
+ image_url = str(response.url)
+ yield ImageResponse(image_url, alt="Airforce generated image")
+ else:
+ content = await response.text()
+ yield f"Unexpected content type: {content_type}\nResponse content: {content}"
+ else:
+ error_content = await response.text()
+ yield f"Error: {error_content}"
diff --git a/g4f/Provider/airforce/__init__.py b/g4f/Provider/airforce/__init__.py
new file mode 100644
index 00000000..5ffa6d31
--- /dev/null
+++ b/g4f/Provider/airforce/__init__.py
@@ -0,0 +1,2 @@
+from .AirforceChat import AirforceChat
+from .AirforceImage import AirforceImage