summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-10-02 23:25:26 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-10-02 23:25:26 +0200
commit6bf4c4bc37b71a7833b5450c5c66de6fdfa0eda3 (patch)
treeb51f553728c4e80a6c4fb4f1b98403ba62d0cf0c
parentfeat(g4f/client/async_client.py): improve async generator handling and logging (diff)
downloadgpt4free-6bf4c4bc37b71a7833b5450c5c66de6fdfa0eda3.tar
gpt4free-6bf4c4bc37b71a7833b5450c5c66de6fdfa0eda3.tar.gz
gpt4free-6bf4c4bc37b71a7833b5450c5c66de6fdfa0eda3.tar.bz2
gpt4free-6bf4c4bc37b71a7833b5450c5c66de6fdfa0eda3.tar.lz
gpt4free-6bf4c4bc37b71a7833b5450c5c66de6fdfa0eda3.tar.xz
gpt4free-6bf4c4bc37b71a7833b5450c5c66de6fdfa0eda3.tar.zst
gpt4free-6bf4c4bc37b71a7833b5450c5c66de6fdfa0eda3.zip
-rw-r--r--g4f/Provider/Airforce.py492
1 files changed, 332 insertions, 160 deletions
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 51f8ba55..986281a6 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -1,76 +1,200 @@
from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
+import random
import json
+from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse
-from .helper import format_prompt
-from ..errors import ResponseStatusError
+
+def split_long_message(message: str, max_length: int = 4000) -> list[str]:
+ return [message[i:i+max_length] for i in range(0, len(message), max_length)]
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Airforce"
url = "https://api.airforce"
- text_api_endpoint = "https://api.airforce/chat/completions"
image_api_endpoint = "https://api.airforce/imagine2"
+ text_api_endpoint = "https://api.airforce/chat/completions"
working = True
+
+ default_model = 'llama-3-70b-chat'
+
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
- default_model = 'llama-3-70b-chat'
+
text_models = [
- # Open source models
- 'llama-2-13b-chat',
- 'llama-3-70b-chat',
- 'llama-3-70b-chat-turbo',
- 'llama-3-70b-chat-lite',
- 'llama-3-8b-chat',
- 'llama-3-8b-chat-turbo',
- 'llama-3-8b-chat-lite',
- 'llama-3.1-405b-turbo',
- 'llama-3.1-70b-turbo',
- 'llama-3.1-8b-turbo',
- 'LlamaGuard-2-8b',
- 'Llama-Guard-7b',
- 'Meta-Llama-Guard-3-8B',
- 'Mixtral-8x7B-Instruct-v0.1',
- 'Mixtral-8x22B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.2',
- 'Mistral-7B-Instruct-v0.3',
- 'Qwen1.5-72B-Chat',
- 'Qwen1.5-110B-Chat',
- 'Qwen2-72B-Instruct',
- 'gemma-2b-it',
- 'gemma-2-9b-it',
- 'gemma-2-27b-it',
- 'dbrx-instruct',
- 'deepseek-llm-67b-chat',
- 'Nous-Hermes-2-Mixtral-8x7B-DPO',
- 'Nous-Hermes-2-Yi-34B',
- 'WizardLM-2-8x22B',
- 'SOLAR-10.7B-Instruct-v1.0',
- 'StripedHyena-Nous-7B',
- 'sparkdesk',
-
- # Other models
- 'chatgpt-4o-latest',
- 'gpt-4',
- 'gpt-4-turbo',
- 'gpt-4o-mini-2024-07-18',
- 'gpt-4o-mini',
- 'gpt-4o',
- 'gpt-3.5-turbo',
- 'gpt-3.5-turbo-0125',
- 'gpt-3.5-turbo-1106',
- 'gpt-3.5-turbo-16k',
- 'gpt-3.5-turbo-0613',
- 'gpt-3.5-turbo-16k-0613',
- 'gemini-1.5-flash',
- 'gemini-1.5-pro',
+ # anthorpic
+ 'claude-3-haiku-20240307',
+ 'claude-3-sonnet-20240229',
+ 'claude-3-5-sonnet-20240620',
+ 'claude-3-opus-20240229',
+
+ # openai
+ 'chatgpt-4o-latest',
+ 'gpt-4',
+ #'gpt-4-0613',
+ 'gpt-4-turbo',
+ 'gpt-4o-mini-2024-07-18',
+ 'gpt-4o-mini',
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-0125',
+ 'gpt-3.5-turbo-1106',
+ #'gpt-3.5-turbo-16k', # No response from the API.
+ #'gpt-3.5-turbo-0613', # No response from the API.
+ #'gpt-3.5-turbo-16k-0613', # No response from the API.
+ 'gpt-4o',
+ #'o1-mini', # No response from the API.
+
+ # meta-llama
+ 'llama-3-70b-chat',
+ 'llama-3-70b-chat-turbo',
+ 'llama-3-8b-chat',
+ 'llama-3-8b-chat-turbo',
+ 'llama-3-70b-chat-lite',
+ 'llama-3-8b-chat-lite',
+ #'llama-2-70b-chat', # Failed to load response after multiple retries.
+ 'llama-2-13b-chat',
+ #'llama-2-7b-chat', # Failed to load response after multiple retries.
+ 'llama-3.1-405b-turbo',
+ 'llama-3.1-70b-turbo',
+ 'llama-3.1-8b-turbo',
+ 'LlamaGuard-2-8b',
+ 'Llama-Guard-7b',
+ 'Llama-3.2-90B-Vision-Instruct-Turbo',
+
+ # codellama
+ #'CodeLlama-7b-Python-hf', # Failed to load response after multiple retries.
+ #'CodeLlama-7b-Python',
+ #'CodeLlama-13b-Python-hf', # Failed to load response after multiple retries.
+ #'CodeLlama-34b-Python-hf', # Failed to load response after multiple retries.
+ #'CodeLlama-70b-Python-hf', # Failed to load response after multiple retries.
+
+ # 01-ai
+ #'Yi-34B-Chat', # Failed to load response after multiple retries.
+ #'Yi-34B', # Failed to load response after multiple retries.
+ #'Yi-6B', # Failed to load response after multiple retries.
+
+ # mistral-ai
+ #'Mixtral-8x7B-v0.1',
+ #'Mixtral-8x22B', # Failed to load response after multiple retries.
+ 'Mixtral-8x7B-Instruct-v0.1',
+ 'Mixtral-8x22B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.2',
+ 'Mistral-7B-Instruct-v0.3',
+
+ # openchat
+ #'openchat-3.5', # Failed to load response after multiple retries.
+
+ # wizardlm
+ #'WizardLM-13B-V1.2', # Failed to load response after multiple retries.
+ #'WizardCoder-Python-34B-V1.0', # Failed to load response after multiple retries.
+
+ # qwen
+ #'Qwen1.5-0.5B-Chat', # Failed to load response after multiple retries.
+ #'Qwen1.5-1.8B-Chat', # Failed to load response after multiple retries.
+ #'Qwen1.5-4B-Chat', # Failed to load response after multiple retries.
+ 'Qwen1.5-7B-Chat',
+ 'Qwen1.5-14B-Chat',
+ 'Qwen1.5-72B-Chat',
+ 'Qwen1.5-110B-Chat',
+ 'Qwen2-72B-Instruct',
+
+ # google
+ 'gemma-2b-it',
+ #'gemma-7b-it', # Failed to load response after multiple retries.
+ #'gemma-2b', # Failed to load response after multiple retries.
+ #'gemma-7b', # Failed to load response after multiple retries.
+ 'gemma-2-9b-it', # fix bug
+ 'gemma-2-27b-it',
+
+ # gemini
+ 'gemini-1.5-flash',
+ 'gemini-1.5-pro',
+
+ # databricks
+ 'dbrx-instruct',
+
+ # lmsys
+ #'vicuna-7b-v1.5', # Failed to load response after multiple retries.
+ #'vicuna-13b-v1.5', # Failed to load response after multiple retries.
+
+ # cognitivecomputations
+ #'dolphin-2.5-mixtral-8x7b', # Failed to load response after multiple retries.
+
+ # deepseek-ai
+ #'deepseek-coder-33b-instruct', # No response from the API.
+ #'deepseek-coder-67b-instruct', # Failed to load response after multiple retries.
+ 'deepseek-llm-67b-chat',
+
+ # NousResearch
+ #'Nous-Capybara-7B-V1p9', # Failed to load response after multiple retries.
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO',
+ #'Nous-Hermes-2-Mixtral-8x7B-SFT', # Failed to load response after multiple retries.
+ #'Nous-Hermes-llama-2-7b', # Failed to load response after multiple retries.
+ #'Nous-Hermes-Llama2-13b', # Failed to load response after multiple retries.
+ 'Nous-Hermes-2-Yi-34B',
+
+ # Open-Orca
+ #'Mistral-7B-OpenOrca', # Failed to load response after multiple retries.
+
+ # togethercomputer
+ #'alpaca-7b', # Failed to load response after multiple retries.
+
+ # teknium
+ #'OpenHermes-2-Mistral-7B', # Failed to load response after multiple retries.
+ #'OpenHermes-2.5-Mistral-7B', # Failed to load response after multiple retries.
+
+ # microsoft
+ 'WizardLM-2-8x22B',
+
+ # Nexusflow
+ #'NexusRaven-V2-13B', # Failed to load response after multiple retries.
+
+ # Phind
+ #'Phind-CodeLlama-34B-v2', # Failed to load response after multiple retries.
+
+ # Snoflake
+ #'snowflake-arctic-instruct', # No response from the API.
+
+ # upstage
+ 'SOLAR-10.7B-Instruct-v1.0',
+
+ # togethercomputer
+ #'StripedHyena-Hessian-7B', # Failed to load response after multiple retries.
+ #'StripedHyena-Nous-7B', # Failed to load response after multiple retries.
+ #'Llama-2-7B-32K-Instruct', # Failed to load response after multiple retries.
+ #'CodeLlama-13b-Instruct', # No response from the API.
+ #'evo-1-131k-base', # Failed to load response after multiple retries.
+ #'OLMo-7B-Instruct', # Failed to load response after multiple retries.
+
+ # garage-bAInd
+ #'Platypus2-70B-instruct', # Failed to load response after multiple retries.
+
+ # snorkelai
+ #'Snorkel-Mistral-PairRM-DPO', # Failed to load response after multiple retries.
+
+ # Undi95
+ #'ReMM-SLERP-L2-13B', # Failed to load response after multiple retries.
+
+ # Gryphe
+ 'MythoMax-L2-13b',
+
+ # Autism
+ #'chronos-hermes-13b', # Failed to load response after multiple retries.
+
+ # Undi95
+ #'Toppy-M-7B', # Failed to load response after multiple retries.
+
+ # iFlytek
+ #'sparkdesk', # Failed to load response after multiple retries.
+
+ # pawan
+ 'cosmosrp',
+
]
-
image_models = [
'flux',
'flux-realism',
@@ -85,158 +209,206 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
models = [
*text_models,
- *image_models
+ *image_models,
]
model_aliases = {
- # Open source models
- "llama-2-13b": "llama-2-13b-chat",
+ # anthorpic
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3-opus": "claude-3-opus-20240229",
+
+ # openai
+ "gpt-4o": "chatgpt-4o-latest",
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
+
+ # meta-llama
"llama-3-70b": "llama-3-70b-chat",
"llama-3-70b": "llama-3-70b-chat-turbo",
- "llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat",
"llama-3-8b": "llama-3-8b-chat-turbo",
+ "llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat-lite",
+ "llama-2-13b": "llama-2-13b-chat",
"llama-3.1-405b": "llama-3.1-405b-turbo",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",
+ "llamaguard-2-8b": "LlamaGuard-2-8b",
+ "llamaguard-7b": "Llama-Guard-7b",
+ "llama-3.2-90b": "Llama-3.2-90B-Vision-Instruct-Turbo",
+
+ # mistral-ai
"mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.2",
"mistral-7b": "Mistral-7B-Instruct-v0.3",
- "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+
+ # qwen
+ "qwen-1.5-7b": "Qwen1.5-7B-Chat",
+ "qwen-1.5-14b": "Qwen1.5-14B-Chat",
"qwen-1.5-72b": "Qwen1.5-72B-Chat",
"qwen-1.5-110b": "Qwen1.5-110B-Chat",
"qwen-2-72b": "Qwen2-72B-Instruct",
+
+ # google
"gemma-2b": "gemma-2b-it",
- "gemma-2b-9b": "gemma-2-9b-it",
- "gemma-2b-27b": "gemma-2-27b-it",
+ "gemma-2-9b": "gemma-2-9b-it",
+ "gemma-2-27b": "gemma-2-27b-it",
+
+ # gemini
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-pro": "gemini-1.5-pro",
+
+ # deepseek-ai
"deepseek": "deepseek-llm-67b-chat",
+
+ # NousResearch
+ "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"yi-34b": "Nous-Hermes-2-Yi-34B",
+
+ # microsoft
"wizardlm-2-8x22b": "WizardLM-2-8x22B",
- "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
- "sh-n-7b": "StripedHyena-Nous-7B",
- "sparkdesk-v1.1": "sparkdesk",
- # Other models
- "gpt-4o": "chatgpt-4o-latest",
- "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- "gemini-flash": "gemini-1.5-flash",
- "gemini-pro": "gemini-1.5-pro",
+ # upstage
+ "solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
- # Image models
- "dalle-3": "dall-e-3",
+ # Gryphe
+ "mythomax-l2-13b": "MythoMax-L2-13b",
}
@classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases.get(model, cls.default_model)
+ else:
+ return cls.default_model
+
+ @classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
+
+ # If the model is an image model, use the image API
+ if model in cls.image_models:
+ async for result in cls._generate_image(model, messages, proxy, seed, size):
+ yield result
+ # If the model is a text model, use the text API
+ elif model in cls.text_models:
+ async for result in cls._generate_text(model, messages, proxy, stream):
+ yield result
+
+ @classmethod
+ async def _generate_image(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ **kwargs
+ ) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
- "content-type": "application/json",
- "origin": "https://api.airforce",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
- "authorization": "Bearer null",
"cache-control": "no-cache",
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": "https://llmplayground.net/",
- "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "cross-site",
+ "origin": "https://llmplayground.net",
+ "user-agent": "Mozilla/5.0"
}
- if model in cls.image_models:
- async for item in cls.generate_image(model, messages, headers, proxy, **kwargs):
- yield item
- else:
- async for item in cls.generate_text(model, messages, headers, proxy, **kwargs):
- yield item
+ if seed is None:
+ seed = random.randint(0, 100000)
- @classmethod
- async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
- async with ClientSession() as session:
- data = {
- "messages": [{"role": "user", "content": message['content']} for message in messages],
+ # Assume the first message is the prompt for the image
+ prompt = messages[0]['content']
+
+ async with ClientSession(headers=headers) as session:
+ params = {
"model": model,
- "max_tokens": kwargs.get('max_tokens', 4096),
- "temperature": kwargs.get('temperature', 1),
- "top_p": kwargs.get('top_p', 1),
- "stream": True
+ "prompt": prompt,
+ "size": size,
+ "seed": seed
}
+ async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
+ response.raise_for_status()
+ content_type = response.headers.get('Content-Type', '').lower()
- try:
- async with session.post(cls.text_api_endpoint, json=data, headers=headers, proxy=proxy) as response:
- response.raise_for_status()
- async for line in response.content:
- if line:
- line = line.decode('utf-8').strip()
- if line.startswith("data: "):
- if line == "data: [DONE]":
- break
- try:
- data = json.loads(line[6:])
- if 'choices' in data and len(data['choices']) > 0:
- delta = data['choices'][0].get('delta', {})
- if 'content' in delta:
- content = delta['content']
- if "One message exceeds the 1000chars per message limit" in content:
- raise ResponseStatusError(
- "Message too long",
- 400,
- "Please try a shorter message."
- )
- yield content
- except json.JSONDecodeError:
- continue
- except ResponseStatusError as e:
- raise e
- except Exception as e:
- raise ResponseStatusError(str(e), 500, "An unexpected error occurred")
+ if 'application/json' in content_type:
+ async for chunk in response.content.iter_chunked(1024):
+ if chunk:
+ yield chunk.decode('utf-8')
+ elif 'image' in content_type:
+ image_data = b""
+ async for chunk in response.content.iter_chunked(1024):
+ if chunk:
+ image_data += chunk
+ image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
+ alt_text = f"Generated image for prompt: {prompt}"
+ yield ImageResponse(images=image_url, alt=alt_text)
@classmethod
- async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
- prompt = messages[-1]['content'] if messages else ""
- params = {
- "prompt": prompt,
- "size": kwargs.get("size", "1:1"),
- "seed": kwargs.get("seed"),
- "model": model
+ async def _generate_text(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer missing api key",
+ "content-type": "application/json",
+ "user-agent": "Mozilla/5.0"
}
- params = {k: v for k, v in params.items() if v is not None}
- try:
- async with ClientSession(headers=headers) as session:
- async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response:
+ async with ClientSession(headers=headers) as session:
+ formatted_prompt = cls._format_messages(messages)
+ prompt_parts = split_long_message(formatted_prompt)
+ full_response = ""
+
+ for part in prompt_parts:
+ data = {
+ "messages": [{"role": "user", "content": part}],
+ "model": model,
+ "max_tokens": 4096,
+ "temperature": 1,
+ "top_p": 1,
+ "stream": stream
+ }
+ async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- content = await response.read()
-
- if response.content_type.startswith('image/'):
- image_url = str(response.url)
- yield ImageResponse(image_url, prompt)
+ part_response = ""
+ if stream:
+ async for line in response.content:
+ if line:
+ line = line.decode('utf-8').strip()
+ if line.startswith("data: ") and line != "data: [DONE]":
+ json_data = json.loads(line[6:])
+ content = json_data['choices'][0]['delta'].get('content', '')
+ part_response += content
else:
- try:
- text = content.decode('utf-8', errors='ignore')
- raise ResponseStatusError("Image generation failed", response.status, text)
- except Exception as decode_error:
- raise ResponseStatusError("Decoding error", 500, str(decode_error))
- except ClientResponseError as e:
- raise ResponseStatusError(f"HTTP {e.status}", e.status, e.message)
- except Exception as e:
- raise ResponseStatusError("Unexpected error", 500, str(e))
+ json_data = await response.json()
+ content = json_data['choices'][0]['message']['content']
+ part_response = content
+
+ full_response += part_response
+ yield full_response
+
+ @classmethod
+ def _format_messages(cls, messages: Messages) -> str:
+ """Formats messages for text generation."""
+ return " ".join([msg['content'] for msg in messages])