From bb9132bcb42a5f720398b65c721cc77957555863 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Mon, 9 Dec 2024 15:52:25 +0000
Subject: Updating provider documentation and small fixes in providers (#2469)
* refactor(g4f/Provider/Airforce.py): improve model handling and filtering
- Add hidden_models set to exclude specific models
- Add evil alias for uncensored model handling
- Extend filtering for model-specific response tokens
- Add response buffering for streamed content
- Update model fetching with error handling
* refactor(g4f/Provider/Blackbox.py): improve caching and model handling
- Add caching system for validated values with file-based storage
- Rename 'flux' model to 'ImageGeneration' and update references
- Add temperature, top_p and max_tokens parameters to generator
- Simplify HTTP headers and remove redundant options
- Add model alias mapping for ImageGeneration
- Add file system utilities for cache management
* feat(g4f/Provider/RobocodersAPI.py): add caching and error handling
- Add file-based caching system for access tokens and sessions
- Add robust error handling with specific error messages
- Add automatic dialog continuation on resource limits
- Add HTML parsing with BeautifulSoup for token extraction
- Add debug logging for error tracking
- Add timeout configuration for API requests
* refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases
- Change default model from llama-3-405b to llama-3-70b
- Remove llama-3-405b from supported models list
- Remove llama-3.1-405b from model aliases
* feat(g4f/Provider/Blackbox2.py): add image generation support
- Add image model 'flux' with dedicated API endpoint
- Refactor generator to support both text and image outputs
- Extract headers into reusable static method
- Add type hints for AsyncGenerator return type
- Split generation logic into _generate_text and _generate_image methods
- Add ImageResponse handling for image generation results
BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult
* refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration
- Update models list to include gpt-3.5-turbo
- Remove chatgpt-4o-latest from supported models
- Remove model_aliases mapping for gpt-4o
* feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support
- Add Accept-Language header for internationalization
- Maintain existing header configuration
- Improve request compatibility with language preferences
* refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance
- Add ProviderModelMixin to class inheritance
- Import ProviderModelMixin from base_provider
- Move BaseConversation import to base_provider imports
* refactor(g4f/Provider/Liaobots.py): update model details and aliases
- Add version suffix to o1 model IDs
- Update model aliases for o1-preview and o1-mini
- Standardize version format across model definitions
* refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation
- Split generation logic into dedicated image/text methods
- Add additional text models including sur and claude
- Add width/height parameters for image generation
- Add model existence validation
- Add hasattr checks for model lists initialization
* chore(gitignore): add provider cache directory
- Add g4f/Provider/.cache to gitignore patterns
* refactor(g4f/Provider/ReplicateHome.py): update model configuration
- Update default model to gemma-2b-it
- Add default_image_model configuration
- Remove llava-13b from supported models
- Simplify request headers
* feat(g4f/models.py): expand provider and model support
- Add new providers DarkAI and PollinationsAI
- Add new models for Mistral, Flux and image generation
- Update provider lists for existing models
- Add P1 and Evil models with experimental providers
BREAKING CHANGE: Remove llava-13b model support
* refactor(Airforce): Update type hint for split_message return
- Change return type of from to for consistency with import.
- Maintain overall functionality and structure of the class.
- Ensure compatibility with type hinting standards in Python.
* refactor(g4f/Provider/Airforce.py): Update type hint for split_message return
- Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import.
- Maintain overall functionality and structure of the 'Airforce' class.
- Ensure compatibility with type hinting standards in Python.
* feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency
- Introduce a check for the BeautifulSoup library and handle its absence gracefully.
- Raise a if BeautifulSoup is not installed, prompting the user to install it.
- Remove direct import of BeautifulSoup to avoid import errors when the library is missing.
* fix: Updating provider documentation and small fixes in providers
* Disabled the provider (RobocodersAPI)
* Fix: Conflicting file g4f/models.py
* Update g4f/models.py g4f/Provider/Airforce.py
* Update docs/providers-and-models.md g4f/models.py g4f/Provider/Airforce.py g4f/Provider/PollinationsAI.py
* Update docs/providers-and-models.md
* Update .gitignore
* Update g4f/models.py
* Update g4f/Provider/PollinationsAI.py
---------
Co-authored-by: kqlio67 <>
---
g4f/Provider/Airforce.py | 6 +-
g4f/Provider/AmigoChat.py | 2 -
g4f/Provider/Blackbox.py | 6 +-
g4f/Provider/ChatGptEs.py | 2 +-
g4f/Provider/DDG.py | 1 +
g4f/Provider/DeepInfraChat.py | 1 +
g4f/Provider/Flux.py | 5 +-
g4f/Provider/FreeGpt.py | 2 +
g4f/Provider/GizAI.py | 2 +-
g4f/Provider/Liaobots.py | 2 +-
g4f/Provider/MagickPen.py | 87 ----------
g4f/Provider/PerplexityLabs.py | 5 +-
g4f/Provider/PollinationsAI.py | 16 +-
g4f/Provider/Reka.py | 148 -----------------
g4f/Provider/RobocodersAPI.py | 238 --------------------------
g4f/Provider/RubiksAI.py | 3 +-
g4f/Provider/Upstage.py | 91 ----------
g4f/Provider/__init__.py | 10 +-
g4f/Provider/needs_auth/Gemini.py | 8 +
g4f/Provider/needs_auth/GeminiPro.py | 10 +-
g4f/Provider/needs_auth/GithubCopilot.py | 6 +-
g4f/Provider/needs_auth/HuggingChat.py | 12 +-
g4f/Provider/needs_auth/HuggingFace.py | 2 +-
g4f/Provider/needs_auth/HuggingFace2.py | 28 ----
g4f/Provider/needs_auth/HuggingFaceAPI.py | 28 ++++
g4f/Provider/needs_auth/Poe.py | 4 +-
g4f/Provider/needs_auth/Raycast.py | 2 -
g4f/Provider/needs_auth/Reka.py | 148 +++++++++++++++++
g4f/Provider/needs_auth/Theb.py | 5 +-
g4f/Provider/needs_auth/__init__.py | 3 +-
g4f/Provider/not_working/MagickPen.py | 87 ++++++++++
g4f/Provider/not_working/RobocodersAPI.py | 238 ++++++++++++++++++++++++++
g4f/Provider/not_working/Upstage.py | 91 ++++++++++
g4f/Provider/not_working/__init__.py | 5 +-
g4f/gui/client/index.html | 4 +-
g4f/models.py | 266 +++++++++++-------------------
36 files changed, 763 insertions(+), 811 deletions(-)
delete mode 100644 g4f/Provider/MagickPen.py
delete mode 100644 g4f/Provider/Reka.py
delete mode 100755 g4f/Provider/RobocodersAPI.py
delete mode 100644 g4f/Provider/Upstage.py
delete mode 100644 g4f/Provider/needs_auth/HuggingFace2.py
create mode 100644 g4f/Provider/needs_auth/HuggingFaceAPI.py
create mode 100644 g4f/Provider/needs_auth/Reka.py
create mode 100644 g4f/Provider/not_working/MagickPen.py
create mode 100755 g4f/Provider/not_working/RobocodersAPI.py
create mode 100644 g4f/Provider/not_working/Upstage.py
(limited to 'g4f')
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 474c9f88..442ee9d4 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -42,22 +42,24 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
hidden_models = {"Flux-1.1-Pro"}
- additional_models_imagine = ["flux-1.1-pro", "dall-e-3"]
+ additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"]
model_aliases = {
# Alias mappings for models
+ "gpt-4": "gpt-4o",
"openchat-3.5": "openchat-3.5-0106",
"deepseek-coder": "deepseek-coder-6.7b-instruct",
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"hermes-2-pro": "hermes-2-pro-mistral-7b",
"openhermes-2.5": "openhermes-2.5-mistral-7b",
"lfm-40b": "lfm-40b-moe",
- "discolm-german-7b": "discolm-german-7b-v1",
+ "german-7b": "discolm-german-7b-v1",
"llama-2-7b": "llama-2-7b-chat-int8",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"neural-7b": "neural-chat-7b-v3-1",
"zephyr-7b": "zephyr-7b-beta",
"evil": "any-uncensored",
+ "sdxl": "stable-diffusion-xl-lightning",
"sdxl": "stable-diffusion-xl-base",
"flux-pro": "flux-1.1-pro",
"llama-3.1-8b": "llama-3.1-8b-chat"
diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py
index 48dcfd74..bb732f24 100644
--- a/g4f/Provider/AmigoChat.py
+++ b/g4f/Provider/AmigoChat.py
@@ -108,7 +108,6 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
"mythomax-13b": "Gryphe/MythoMax-L2-13b",
"mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
- "mistral-tiny": "mistralai/mistral-tiny",
"mistral-nemo": "mistralai/mistral-nemo",
"deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
@@ -127,7 +126,6 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
### image ###
- "flux-realism": "flux-realism",
"flux-dev": "flux/dev",
}
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 537a3ea8..fec0a8a9 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -98,12 +98,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
model_aliases = {
- "gpt-4": "blackboxai",
+ ### chat ###
"gpt-4": "gpt-4o",
- "gpt-4o-mini": "gpt-4o",
- "gpt-3.5-turbo": "blackboxai",
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-sonnet-3.5",
+
+ ### image ###
"flux": "ImageGeneration",
}
diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py
index ef8441a8..88c4a855 100644
--- a/g4f/Provider/ChatGptEs.py
+++ b/g4f/Provider/ChatGptEs.py
@@ -19,7 +19,7 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
default_model = 'gpt-4o'
- models = ['gpt-3.5-turbo', 'gpt-4o', 'gpt-4o-mini']
+ models = ['gpt-4', 'gpt-4o', 'gpt-4o-mini']
@classmethod
def get_model(cls, model: str) -> str:
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index 42c18dfe..070a7db2 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -30,6 +30,7 @@ class Conversation(BaseConversation):
self.model = model
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "DuckDuckGo AI Chat"
url = "https://duckduckgo.com/aichat"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
working = True
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
index 87aeb790..6874b023 100644
--- a/g4f/Provider/DeepInfraChat.py
+++ b/g4f/Provider/DeepInfraChat.py
@@ -9,6 +9,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com/chat"
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
+
working = True
supports_stream = True
supports_system_message = True
diff --git a/g4f/Provider/Flux.py b/g4f/Provider/Flux.py
index 7a00e75a..b211ecef 100644
--- a/g4f/Provider/Flux.py
+++ b/g4f/Provider/Flux.py
@@ -8,13 +8,14 @@ from ..image import ImageResponse, ImagePreview
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Flux(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Flux Provider"
+ label = "HuggingSpace (black-forest-labs-flux-1-dev)"
url = "https://black-forest-labs-flux-1-dev.hf.space"
api_endpoint = "/gradio_api/call/infer"
working = True
default_model = 'flux-dev'
models = [default_model]
image_models = [default_model]
+ model_aliases = {"flux-dev": "flux-1-dev"}
@classmethod
async def create_async_generator(
@@ -55,4 +56,4 @@ class Flux(AsyncGeneratorProvider, ProviderModelMixin):
yield ImagePreview(url, prompt)
else:
yield ImageResponse(url, prompt)
- break
\ No newline at end of file
+ break
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index b38ff428..88189a16 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -21,9 +21,11 @@ RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完"
class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://freegptsnav.aifree.site"
+
working = True
supports_message_history = True
supports_system_message = True
+
default_model = 'gemini-pro'
@classmethod
diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py
index f00b344e..be2fd295 100644
--- a/g4f/Provider/GizAI.py
+++ b/g4f/Provider/GizAI.py
@@ -10,6 +10,7 @@ from .helper import format_prompt
class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://app.giz.ai/assistant"
api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
+
working = True
supports_stream = False
supports_system_message = True
@@ -17,7 +18,6 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'chat-gemini-flash'
models = [default_model]
-
model_aliases = {"gemini-flash": "chat-gemini-flash",}
@classmethod
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index bf9e79d4..1e8131f8 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -143,9 +143,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
+
default_model = "gpt-4o-2024-08-06"
models = list(models.keys())
-
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-2024-08-06",
diff --git a/g4f/Provider/MagickPen.py b/g4f/Provider/MagickPen.py
deleted file mode 100644
index 1d084a2f..00000000
--- a/g4f/Provider/MagickPen.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import hashlib
-import time
-import random
-import re
-import json
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://magickpen.com"
- api_endpoint = "https://api.magickpen.com/ask"
- working = False
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'gpt-4o-mini'
- models = ['gpt-4o-mini']
-
- @classmethod
- async def fetch_api_credentials(cls) -> tuple:
- url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
- async with ClientSession() as session:
- async with session.get(url) as response:
- text = await response.text()
-
- pattern = r'"X-API-Secret":"(\w+)"'
- match = re.search(pattern, text)
- X_API_SECRET = match.group(1) if match else None
-
- timestamp = str(int(time.time() * 1000))
- nonce = str(random.random())
-
- s = ["TGDBU9zCgM", timestamp, nonce]
- s.sort()
- signature_string = ''.join(s)
- signature = hashlib.md5(signature_string.encode()).hexdigest()
-
- pattern = r'secret:"(\w+)"'
- match = re.search(pattern, text)
- secret = match.group(1) if match else None
-
- if X_API_SECRET and timestamp and nonce and secret:
- return X_API_SECRET, signature, timestamp, nonce, secret
- else:
- raise Exception("Unable to extract all the necessary data from the JavaScript file.")
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
- X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials()
-
- headers = {
- 'accept': 'application/json, text/plain, */*',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'nonce': nonce,
- 'origin': cls.url,
- 'referer': f"{cls.url}/",
- 'secret': secret,
- 'signature': signature,
- 'timestamp': timestamp,
- 'x-api-secret': X_API_SECRET,
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- payload = {
- 'query': prompt,
- 'turnstileResponse': '',
- 'action': 'verify'
- }
- async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
- response.raise_for_status()
- async for chunk in response.content:
- if chunk:
- yield chunk.decode()
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index b3119cb6..3a6f0d39 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -29,6 +29,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"sonar-online": "sonar-small-128k-online",
"sonar-chat": "llama-3.1-sonar-large-128k-chat",
"sonar-chat": "llama-3.1-sonar-small-128k-chat",
+ "llama-3.3-70b": "llama-3.3-70b-instruct",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"llama-3.1-70b": "llama-3.1-70b-instruct",
"lfm-40b": "/models/LiquidCloud",
@@ -78,9 +79,9 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
assert(await ws.receive_str())
assert(await ws.receive_str() == "6")
message_data = {
- "version": "2.5",
+ "version": "2.13",
"source": "default",
- "model": cls.get_model(model),
+ "model": model,
"messages": messages
}
await ws.send_str("42" + json.dumps(["perplexity_labs", message_data]))
diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index 18349490..9520674a 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -13,7 +13,7 @@ from .needs_auth.OpenaiAPI import OpenaiAPI
from .helper import format_prompt
class PollinationsAI(OpenaiAPI):
- label = "Pollinations.AI"
+ label = "Pollinations AI"
url = "https://pollinations.ai"
working = True
@@ -22,28 +22,30 @@ class PollinationsAI(OpenaiAPI):
default_model = "openai"
- additional_models_image = ["unity", "midijourney", "rtist"]
+ additional_models_image = ["midjourney", "dall-e-3"]
additional_models_text = ["sur", "sur-mistral", "claude"]
model_aliases = {
"gpt-4o": "openai",
"mistral-nemo": "mistral",
"llama-3.1-70b": "llama", #
- "gpt-3.5-turbo": "searchgpt",
"gpt-4": "searchgpt",
- "gpt-3.5-turbo": "claude",
"gpt-4": "claude",
"qwen-2.5-coder-32b": "qwen-coder",
"claude-3.5-sonnet": "sur",
}
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
+ }
+
@classmethod
def get_models(cls):
if not hasattr(cls, 'image_models'):
cls.image_models = []
if not cls.image_models:
url = "https://image.pollinations.ai/models"
- response = requests.get(url)
+ response = requests.get(url, headers=cls.headers)
raise_for_status(response)
cls.image_models = response.json()
cls.image_models.extend(cls.additional_models_image)
@@ -51,7 +53,7 @@ class PollinationsAI(OpenaiAPI):
cls.models = []
if not cls.models:
url = "https://text.pollinations.ai/models"
- response = requests.get(url)
+ response = requests.get(url, headers=cls.headers)
raise_for_status(response)
cls.models = [model.get("name") for model in response.json()]
cls.models.extend(cls.image_models)
@@ -94,7 +96,7 @@ class PollinationsAI(OpenaiAPI):
@classmethod
async def _generate_text(cls, model: str, messages: Messages, api_base: str, api_key: str = None, proxy: str = None, **kwargs):
if api_key is None:
- async with ClientSession(connector=get_connector(proxy=proxy)) as session:
+ async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
prompt = format_prompt(messages)
async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response:
await raise_for_status(response)
diff --git a/g4f/Provider/Reka.py b/g4f/Provider/Reka.py
deleted file mode 100644
index 2306149e..00000000
--- a/g4f/Provider/Reka.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from __future__ import annotations
-
-import os, requests, time, json
-from ..typing import CreateResult, Messages, ImageType
-from .base_provider import AbstractProvider
-from ..cookies import get_cookies
-from ..image import to_bytes
-
-class Reka(AbstractProvider):
- url = "https://chat.reka.ai/"
- working = True
- needs_auth = True
- supports_stream = True
- default_vision_model = "reka"
- cookies = {}
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- api_key: str = None,
- image: ImageType = None,
- **kwargs
- ) -> CreateResult:
- cls.proxy = proxy
-
- if not api_key:
- cls.cookies = get_cookies("chat.reka.ai")
- if not cls.cookies:
- raise ValueError("No cookies found for chat.reka.ai")
- elif "appSession" not in cls.cookies:
- raise ValueError("No appSession found in cookies for chat.reka.ai, log in or provide bearer_auth")
- api_key = cls.get_access_token(cls)
-
- conversation = []
- for message in messages:
- conversation.append({
- "type": "human",
- "text": message["content"],
- })
-
- if image:
- image_url = cls.upload_image(cls, api_key, image)
- conversation[-1]["image_url"] = image_url
- conversation[-1]["media_type"] = "image"
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'authorization': f'Bearer {api_key}',
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'origin': 'https://chat.reka.ai',
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'conversation_history': conversation,
- 'stream': True,
- 'use_search_engine': False,
- 'use_code_interpreter': False,
- 'model_name': 'reka-core',
- 'random_seed': int(time.time() * 1000),
- }
-
- tokens = ''
-
- response = requests.post('https://chat.reka.ai/api/chat',
- cookies=cls.cookies, headers=headers, json=json_data, proxies=cls.proxy, stream=True)
-
- for completion in response.iter_lines():
- if b'data' in completion:
- token_data = json.loads(completion.decode('utf-8')[5:])['text']
-
- yield (token_data.replace(tokens, ''))
-
- tokens = token_data
-
- def upload_image(cls, access_token, image: ImageType) -> str:
- boundary_token = os.urandom(8).hex()
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'authorization': f'Bearer {access_token}',
- 'content-type': f'multipart/form-data; boundary=----WebKitFormBoundary{boundary_token}',
- 'origin': 'https://chat.reka.ai',
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'referer': 'https://chat.reka.ai/chat/hPReZExtDOPvUfF8vCPC',
- 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
- }
-
- image_data = to_bytes(image)
-
- boundary = f'----WebKitFormBoundary{boundary_token}'
- data = f'--{boundary}\r\nContent-Disposition: form-data; name="image"; filename="image.png"\r\nContent-Type: image/png\r\n\r\n'
- data += image_data.decode('latin-1')
- data += f'\r\n--{boundary}--\r\n'
-
- response = requests.post('https://chat.reka.ai/api/upload-image',
- cookies=cls.cookies, headers=headers, proxies=cls.proxy, data=data.encode('latin-1'))
-
- return response.json()['media_url']
-
- def get_access_token(cls):
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'referer': 'https://chat.reka.ai/chat',
- 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
- }
-
- try:
- response = requests.get('https://chat.reka.ai/bff/auth/access_token',
- cookies=cls.cookies, headers=headers, proxies=cls.proxy)
-
- return response.json()['accessToken']
-
- except Exception as e:
- raise ValueError(f"Failed to get access token: {e}, refresh your cookies / log in into chat.reka.ai")
\ No newline at end of file
diff --git a/g4f/Provider/RobocodersAPI.py b/g4f/Provider/RobocodersAPI.py
deleted file mode 100755
index 3a94e271..00000000
--- a/g4f/Provider/RobocodersAPI.py
+++ /dev/null
@@ -1,238 +0,0 @@
-from __future__ import annotations
-
-import json
-import aiohttp
-from pathlib import Path
-
-try:
- from bs4 import BeautifulSoup
- HAS_BEAUTIFULSOUP = True
-except ImportError:
- HAS_BEAUTIFULSOUP = False
- BeautifulSoup = None
-
-from aiohttp import ClientTimeout
-from ..errors import MissingRequirementsError
-from ..typing import AsyncResult, Messages
-from ..cookies import get_cookies_dir
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-from .. import debug
-
-
-class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
- label = "API Robocoders AI"
- url = "https://api.robocoders.ai/docs"
- api_endpoint = "https://api.robocoders.ai/chat"
- working = True
- supports_message_history = True
- default_model = 'GeneralCodingAgent'
- agent = [default_model, "RepoAgent", "FrontEndAgent"]
- models = [*agent]
-
- CACHE_DIR = Path(get_cookies_dir())
- CACHE_FILE = CACHE_DIR / "robocoders.json"
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
-
- timeout = ClientTimeout(total=600)
-
- async with aiohttp.ClientSession(timeout=timeout) as session:
- # Load or create access token and session ID
- access_token, session_id = await cls._get_or_create_access_and_session(session)
- if not access_token or not session_id:
- raise Exception("Failed to initialize API interaction")
-
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {access_token}"
- }
-
- prompt = format_prompt(messages)
-
- data = {
- "sid": session_id,
- "prompt": prompt,
- "agent": model
- }
-
- async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
- if response.status == 401: # Unauthorized, refresh token
- cls._clear_cached_data()
- raise Exception("Unauthorized: Invalid token, please retry.")
- elif response.status == 422:
- raise Exception("Validation Error: Invalid input.")
- elif response.status >= 500:
- raise Exception(f"Server Error: {response.status}")
- elif response.status != 200:
- raise Exception(f"Unexpected Error: {response.status}")
-
- async for line in response.content:
- if line:
- try:
- # Decode bytes into a string
- line_str = line.decode('utf-8')
- response_data = json.loads(line_str)
-
- # Get the message from the 'args.content' or 'message' field
- message = (response_data.get('args', {}).get('content') or
- response_data.get('message', ''))
-
- if message:
- yield message
-
- # Check for reaching the resource limit
- if (response_data.get('action') == 'message' and
- response_data.get('args', {}).get('wait_for_response')):
- # Automatically continue the dialog
- continue_data = {
- "sid": session_id,
- "prompt": "continue",
- "agent": model
- }
- async with session.post(
- cls.api_endpoint,
- headers=headers,
- json=continue_data,
- proxy=proxy
- ) as continue_response:
- if continue_response.status == 200:
- async for continue_line in continue_response.content:
- if continue_line:
- try:
- continue_line_str = continue_line.decode('utf-8')
- continue_data = json.loads(continue_line_str)
- continue_message = (
- continue_data.get('args', {}).get('content') or
- continue_data.get('message', '')
- )
- if continue_message:
- yield continue_message
- except json.JSONDecodeError:
- debug.log(f"Failed to decode continue JSON: {continue_line}")
- except Exception as e:
- debug.log(f"Error processing continue response: {e}")
-
- except json.JSONDecodeError:
- debug.log(f"Failed to decode JSON: {line}")
- except Exception as e:
- debug.log(f"Error processing response: {e}")
-
- @staticmethod
- async def _get_or_create_access_and_session(session: aiohttp.ClientSession):
- RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True) # Ensure cache directory exists
-
- # Load data from cache
- if RobocodersAPI.CACHE_FILE.exists():
- with open(RobocodersAPI.CACHE_FILE, "r") as f:
- data = json.load(f)
- access_token = data.get("access_token")
- session_id = data.get("sid")
-
- # Validate loaded data
- if access_token and session_id:
- return access_token, session_id
-
- # If data not valid, create new access token and session ID
- access_token = await RobocodersAPI._fetch_and_cache_access_token(session)
- session_id = await RobocodersAPI._create_and_cache_session(session, access_token)
- return access_token, session_id
-
- @staticmethod
- async def _fetch_and_cache_access_token(session: aiohttp.ClientSession) -> str:
- if not HAS_BEAUTIFULSOUP:
- raise MissingRequirementsError('Install "beautifulsoup4" package | pip install -U beautifulsoup4')
- return token
-
- url_auth = 'https://api.robocoders.ai/auth'
- headers_auth = {
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
- }
-
- async with session.get(url_auth, headers=headers_auth) as response:
- if response.status == 200:
- html = await response.text()
- soup = BeautifulSoup(html, 'html.parser')
- token_element = soup.find('pre', id='token')
- if token_element:
- token = token_element.text.strip()
-
- # Cache the token
- RobocodersAPI._save_cached_data({"access_token": token})
- return token
- return None
-
- @staticmethod
- async def _create_and_cache_session(session: aiohttp.ClientSession, access_token: str) -> str:
- url_create_session = 'https://api.robocoders.ai/create-session'
- headers_create_session = {
- 'Authorization': f'Bearer {access_token}'
- }
-
- async with session.get(url_create_session, headers=headers_create_session) as response:
- if response.status == 200:
- data = await response.json()
- session_id = data.get('sid')
-
- # Cache session ID
- RobocodersAPI._update_cached_data({"sid": session_id})
- return session_id
- elif response.status == 401:
- RobocodersAPI._clear_cached_data()
- raise Exception("Unauthorized: Invalid token during session creation.")
- elif response.status == 422:
- raise Exception("Validation Error: Check input parameters.")
- return None
-
- @staticmethod
- def _save_cached_data(new_data: dict):
- """Save new data to cache file"""
- RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True)
- RobocodersAPI.CACHE_FILE.touch(exist_ok=True)
- with open(RobocodersAPI.CACHE_FILE, "w") as f:
- json.dump(new_data, f)
-
- @staticmethod
- def _update_cached_data(updated_data: dict):
- """Update existing cache data with new values"""
- data = {}
- if RobocodersAPI.CACHE_FILE.exists():
- with open(RobocodersAPI.CACHE_FILE, "r") as f:
- try:
- data = json.load(f)
- except json.JSONDecodeError:
- # If cache file is corrupted, start with empty dict
- data = {}
-
- data.update(updated_data)
- with open(RobocodersAPI.CACHE_FILE, "w") as f:
- json.dump(data, f)
-
- @staticmethod
- def _clear_cached_data():
- """Remove cache file"""
- try:
- if RobocodersAPI.CACHE_FILE.exists():
- RobocodersAPI.CACHE_FILE.unlink()
- except Exception as e:
- debug.log(f"Error clearing cache: {e}")
-
- @staticmethod
- def _get_cached_data() -> dict:
- """Get all cached data"""
- if RobocodersAPI.CACHE_FILE.exists():
- try:
- with open(RobocodersAPI.CACHE_FILE, "r") as f:
- return json.load(f)
- except json.JSONDecodeError:
- return {}
- return {}
diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py
index 816ea60c..86d61564 100644
--- a/g4f/Provider/RubiksAI.py
+++ b/g4f/Provider/RubiksAI.py
@@ -16,6 +16,7 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Rubiks AI"
url = "https://rubiks.ai"
api_endpoint = "https://rubiks.ai/search/api/"
+
working = True
supports_stream = True
supports_system_message = True
@@ -127,4 +128,4 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
yield content
if web_search and sources:
- yield Sources(sources)
\ No newline at end of file
+ yield Sources(sources)
diff --git a/g4f/Provider/Upstage.py b/g4f/Provider/Upstage.py
deleted file mode 100644
index f6683c45..00000000
--- a/g4f/Provider/Upstage.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://console.upstage.ai/playground/chat"
- api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
- working = False
- default_model = 'solar-pro'
- models = [
- 'upstage/solar-1-mini-chat',
- 'upstage/solar-1-mini-chat-ja',
- 'solar-pro',
- ]
- model_aliases = {
- "solar-mini": "upstage/solar-1-mini-chat",
- "solar-mini": "upstage/solar-1-mini-chat-ja",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "dnt": "1",
- "origin": "https://console.upstage.ai",
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": "https://console.upstage.ai/",
- "sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "cross-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
- }
-
- async with ClientSession(headers=headers) as session:
- data = {
- "stream": True,
- "messages": [{"role": "user", "content": format_prompt(messages)}],
- "model": model
- }
-
- async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
- response.raise_for_status()
-
- response_text = ""
-
- async for line in response.content:
- if line:
- line = line.decode('utf-8').strip()
-
- if line.startswith("data: ") and line != "data: [DONE]":
- try:
- data = json.loads(line[6:])
- content = data['choices'][0]['delta'].get('content', '')
- if content:
- response_text += content
- yield content
- except json.JSONDecodeError:
- continue
-
- if line == "data: [DONE]":
- break
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 56a5262f..04ff8396 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -22,25 +22,21 @@ from .Copilot import Copilot
from .DarkAI import DarkAI
from .DDG import DDG
from .DeepInfraChat import DeepInfraChat
+from .Flux import Flux
from .Free2GPT import Free2GPT
from .FreeGpt import FreeGpt
from .GizAI import GizAI
from .Liaobots import Liaobots
-from .MagickPen import MagickPen
+from .Mhystical import Mhystical
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .PollinationsAI import PollinationsAI
from .Prodia import Prodia
-from .Reka import Reka
from .ReplicateHome import ReplicateHome
-from .RobocodersAPI import RobocodersAPI
from .RubiksAI import RubiksAI
from .TeachAnything import TeachAnything
-from .Upstage import Upstage
from .You import You
-from .Mhystical import Mhystical
-from .Flux import Flux
import sys
@@ -61,4 +57,4 @@ __map__: dict[str, ProviderType] = dict([
])
class ProviderUtils:
- convert: dict[str, ProviderType] = __map__
\ No newline at end of file
+ convert: dict[str, ProviderType] = __map__
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 9127708c..b55a604b 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -51,14 +51,22 @@ UPLOAD_IMAGE_HEADERS = {
}
class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Google Gemini"
url = "https://gemini.google.com"
+
needs_auth = True
working = True
+
default_model = 'gemini'
image_models = ["gemini"]
default_vision_model = "gemini"
models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"]
+ model_aliases = {
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-pro": "gemini-1.5-pro",
+ }
synthesize_content_type = "audio/vnd.wav"
+
_cookies: Cookies = None
_snlm0e: str = None
_sid: str = None
diff --git a/g4f/Provider/needs_auth/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py
index a7f1e0aa..d9204b25 100644
--- a/g4f/Provider/needs_auth/GeminiPro.py
+++ b/g4f/Provider/needs_auth/GeminiPro.py
@@ -11,14 +11,20 @@ from ...errors import MissingAuthError
from ..helper import get_connector
class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Gemini API"
+ label = "Google Gemini API"
url = "https://ai.google.dev"
+
working = True
supports_message_history = True
needs_auth = True
+
default_model = "gemini-1.5-pro"
default_vision_model = default_model
models = [default_model, "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
+ model_aliases = {
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-flash": "gemini-1.5-flash-8b",
+ }
@classmethod
async def create_async_generator(
@@ -108,4 +114,4 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
if candidate["finishReason"] == "STOP":
yield candidate["content"]["parts"][0]["text"]
else:
- yield candidate["finishReason"] + ' ' + candidate["safetyRatings"]
\ No newline at end of file
+ yield candidate["finishReason"] + ' ' + candidate["safetyRatings"]
diff --git a/g4f/Provider/needs_auth/GithubCopilot.py b/g4f/Provider/needs_auth/GithubCopilot.py
index 3eb66b5e..754c8d4e 100644
--- a/g4f/Provider/needs_auth/GithubCopilot.py
+++ b/g4f/Provider/needs_auth/GithubCopilot.py
@@ -16,10 +16,12 @@ class Conversation(BaseConversation):
self.conversation_id = conversation_id
class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://copilot.microsoft.com"
+ url = "https://github.com/copilot"
+
working = True
needs_auth = True
supports_stream = True
+
default_model = "gpt-4o"
models = [default_model, "o1-mini", "o1-preview", "claude-3.5-sonnet"]
@@ -90,4 +92,4 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
if line.startswith(b"data: "):
data = json.loads(line[6:])
if data.get("type") == "content":
- yield data.get("body")
\ No newline at end of file
+ yield data.get("body")
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 2f3dbb57..431273f6 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -24,16 +24,19 @@ class Conversation(BaseConversation):
class HuggingChat(AbstractProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
+
working = True
supports_stream = True
needs_auth = True
+
default_model = "Qwen/Qwen2.5-72B-Instruct"
+ default_image_model = "black-forest-labs/FLUX.1-dev"
image_models = [
"black-forest-labs/FLUX.1-dev"
]
models = [
default_model,
- 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Llama-3.3-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/QwQ-32B-Preview',
'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
@@ -45,8 +48,9 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
*image_models
]
model_aliases = {
+ ### Chat ###
"qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct",
- "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwq-32b": "Qwen/QwQ-32B-Preview",
"nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
@@ -55,6 +59,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
"hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
"mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
"phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",
+
+ ### Image ###
"flux-dev": "black-forest-labs/FLUX.1-dev",
}
@@ -214,4 +220,4 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
return data[message_keys["id"]]
except (KeyError, IndexError, TypeError) as e:
- raise RuntimeError(f"Failed to extract message ID: {str(e)}")
\ No newline at end of file
+ raise RuntimeError(f"Failed to extract message ID: {str(e)}")
diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py
index 94530252..fd1da2a7 100644
--- a/g4f/Provider/needs_auth/HuggingFace.py
+++ b/g4f/Provider/needs_auth/HuggingFace.py
@@ -17,7 +17,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
default_model = HuggingChat.default_model
- default_image_model = "black-forest-labs/FLUX.1-dev"
+ default_image_model = HuggingChat.default_image_model
models = [*HuggingChat.models, default_image_model]
image_models = [default_image_model]
model_aliases = HuggingChat.model_aliases
diff --git a/g4f/Provider/needs_auth/HuggingFace2.py b/g4f/Provider/needs_auth/HuggingFace2.py
deleted file mode 100644
index 0bde770b..00000000
--- a/g4f/Provider/needs_auth/HuggingFace2.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from __future__ import annotations
-
-from .OpenaiAPI import OpenaiAPI
-from .HuggingChat import HuggingChat
-from ...typing import AsyncResult, Messages
-
-class HuggingFace2(OpenaiAPI):
- label = "HuggingFace (Inference API)"
- url = "https://huggingface.co"
- working = True
- default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
- default_vision_model = default_model
- models = [
- *HuggingChat.models
- ]
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = "https://api-inference.huggingface.co/v1",
- max_tokens: int = 500,
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
- )
diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py
new file mode 100644
index 00000000..a93ab3a6
--- /dev/null
+++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from .OpenaiAPI import OpenaiAPI
+from .HuggingChat import HuggingChat
+from ...typing import AsyncResult, Messages
+
+class HuggingFaceAPI(OpenaiAPI):
+ label = "HuggingFace (Inference API)"
+ url = "https://api-inference.huggingface.co"
+ working = True
+ default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
+ default_vision_model = default_model
+ models = [
+ *HuggingChat.models
+ ]
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://api-inference.huggingface.co/v1",
+ max_tokens: int = 500,
+ **kwargs
+ ) -> AsyncResult:
+ return super().create_async_generator(
+ model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
+ )
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
index 65fdbef9..46b998e8 100644
--- a/g4f/Provider/needs_auth/Poe.py
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -24,8 +24,8 @@ class Poe(AbstractProvider):
url = "https://poe.com"
working = True
needs_auth = True
- supports_gpt_35_turbo = True
supports_stream = True
+
models = models.keys()
@classmethod
@@ -113,4 +113,4 @@ if(window._message && window._message != window._last_message) {
elif chunk != "":
break
else:
- time.sleep(0.1)
\ No newline at end of file
+ time.sleep(0.1)
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index b8ec5a97..008fcad8 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -10,8 +10,6 @@ from ..base_provider import AbstractProvider
class Raycast(AbstractProvider):
url = "https://raycast.com"
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
supports_stream = True
needs_auth = True
working = True
diff --git a/g4f/Provider/needs_auth/Reka.py b/g4f/Provider/needs_auth/Reka.py
new file mode 100644
index 00000000..780ff31e
--- /dev/null
+++ b/g4f/Provider/needs_auth/Reka.py
@@ -0,0 +1,148 @@
+from __future__ import annotations
+
+import os, requests, time, json
+from ...typing import CreateResult, Messages, ImageType
+from ..base_provider import AbstractProvider
+from ...cookies import get_cookies
+from ...image import to_bytes
+
+class Reka(AbstractProvider):
+ url = "https://chat.reka.ai/"
+ working = True
+ needs_auth = True
+ supports_stream = True
+ default_vision_model = "reka"
+ cookies = {}
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ api_key: str = None,
+ image: ImageType = None,
+ **kwargs
+ ) -> CreateResult:
+ cls.proxy = proxy
+
+ if not api_key:
+ cls.cookies = get_cookies("chat.reka.ai")
+ if not cls.cookies:
+ raise ValueError("No cookies found for chat.reka.ai")
+ elif "appSession" not in cls.cookies:
+ raise ValueError("No appSession found in cookies for chat.reka.ai, log in or provide bearer_auth")
+ api_key = cls.get_access_token(cls)
+
+ conversation = []
+ for message in messages:
+ conversation.append({
+ "type": "human",
+ "text": message["content"],
+ })
+
+ if image:
+ image_url = cls.upload_image(cls, api_key, image)
+ conversation[-1]["image_url"] = image_url
+ conversation[-1]["media_type"] = "image"
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': f'Bearer {api_key}',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.reka.ai',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
+ }
+
+ json_data = {
+ 'conversation_history': conversation,
+ 'stream': True,
+ 'use_search_engine': False,
+ 'use_code_interpreter': False,
+ 'model_name': 'reka-core',
+ 'random_seed': int(time.time() * 1000),
+ }
+
+ tokens = ''
+
+ response = requests.post('https://chat.reka.ai/api/chat',
+ cookies=cls.cookies, headers=headers, json=json_data, proxies=cls.proxy, stream=True)
+
+ for completion in response.iter_lines():
+ if b'data' in completion:
+ token_data = json.loads(completion.decode('utf-8')[5:])['text']
+
+ yield (token_data.replace(tokens, ''))
+
+ tokens = token_data
+
+ def upload_image(cls, access_token, image: ImageType) -> str:
+ boundary_token = os.urandom(8).hex()
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control': 'no-cache',
+ 'authorization': f'Bearer {access_token}',
+ 'content-type': f'multipart/form-data; boundary=----WebKitFormBoundary{boundary_token}',
+ 'origin': 'https://chat.reka.ai',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://chat.reka.ai/chat/hPReZExtDOPvUfF8vCPC',
+ 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
+ }
+
+ image_data = to_bytes(image)
+
+ boundary = f'----WebKitFormBoundary{boundary_token}'
+ data = f'--{boundary}\r\nContent-Disposition: form-data; name="image"; filename="image.png"\r\nContent-Type: image/png\r\n\r\n'
+ data += image_data.decode('latin-1')
+ data += f'\r\n--{boundary}--\r\n'
+
+ response = requests.post('https://chat.reka.ai/api/upload-image',
+ cookies=cls.cookies, headers=headers, proxies=cls.proxy, data=data.encode('latin-1'))
+
+ return response.json()['media_url']
+
+ def get_access_token(cls):
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control': 'no-cache',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://chat.reka.ai/chat',
+ 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
+ }
+
+ try:
+ response = requests.get('https://chat.reka.ai/bff/auth/access_token',
+ cookies=cls.cookies, headers=headers, proxies=cls.proxy)
+
+ return response.json()['accessToken']
+
+ except Exception as e:
+ raise ValueError(f"Failed to get access token: {e}, refresh your cookies / log in into chat.reka.ai")
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index c7d7d58e..7d3de027 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -35,9 +35,8 @@ class Theb(AbstractProvider):
label = "TheB.AI"
url = "https://beta.theb.ai"
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
supports_stream = True
+
models = models.keys()
@classmethod
@@ -155,4 +154,4 @@ return '';
elif chunk != "":
break
else:
- time.sleep(0.1)
\ No newline at end of file
+ time.sleep(0.1)
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index c67dfb56..d79e7e3d 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -11,7 +11,7 @@ from .GithubCopilot import GithubCopilot
from .Groq import Groq
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
-from .HuggingFace2 import HuggingFace2
+from .HuggingFaceAPI import HuggingFaceAPI
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .MicrosoftDesigner import MicrosoftDesigner
@@ -21,6 +21,7 @@ from .OpenaiChat import OpenaiChat
from .PerplexityApi import PerplexityApi
from .Poe import Poe
from .Raycast import Raycast
+from .Reka import Reka
from .Replicate import Replicate
from .Theb import Theb
from .ThebApi import ThebApi
diff --git a/g4f/Provider/not_working/MagickPen.py b/g4f/Provider/not_working/MagickPen.py
new file mode 100644
index 00000000..56d8e4c4
--- /dev/null
+++ b/g4f/Provider/not_working/MagickPen.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import hashlib
+import time
+import random
+import re
+import json
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://magickpen.com"
+ api_endpoint = "https://api.magickpen.com/ask"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o-mini'
+ models = ['gpt-4o-mini']
+
+ @classmethod
+ async def fetch_api_credentials(cls) -> tuple:
+ url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
+ async with ClientSession() as session:
+ async with session.get(url) as response:
+ text = await response.text()
+
+ pattern = r'"X-API-Secret":"(\w+)"'
+ match = re.search(pattern, text)
+ X_API_SECRET = match.group(1) if match else None
+
+ timestamp = str(int(time.time() * 1000))
+ nonce = str(random.random())
+
+ s = ["TGDBU9zCgM", timestamp, nonce]
+ s.sort()
+ signature_string = ''.join(s)
+ signature = hashlib.md5(signature_string.encode()).hexdigest()
+
+ pattern = r'secret:"(\w+)"'
+ match = re.search(pattern, text)
+ secret = match.group(1) if match else None
+
+ if X_API_SECRET and timestamp and nonce and secret:
+ return X_API_SECRET, signature, timestamp, nonce, secret
+ else:
+ raise Exception("Unable to extract all the necessary data from the JavaScript file.")
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials()
+
+ headers = {
+ 'accept': 'application/json, text/plain, */*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'nonce': nonce,
+ 'origin': cls.url,
+ 'referer': f"{cls.url}/",
+ 'secret': secret,
+ 'signature': signature,
+ 'timestamp': timestamp,
+ 'x-api-secret': X_API_SECRET,
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ payload = {
+ 'query': prompt,
+ 'turnstileResponse': '',
+ 'action': 'verify'
+ }
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/RobocodersAPI.py b/g4f/Provider/not_working/RobocodersAPI.py
new file mode 100755
index 00000000..2716b704
--- /dev/null
+++ b/g4f/Provider/not_working/RobocodersAPI.py
@@ -0,0 +1,238 @@
+from __future__ import annotations
+
+import json
+import aiohttp
+from pathlib import Path
+
+try:
+ from bs4 import BeautifulSoup
+ HAS_BEAUTIFULSOUP = True
+except ImportError:
+ HAS_BEAUTIFULSOUP = False
+ BeautifulSoup = None
+
+from aiohttp import ClientTimeout
+from ...errors import MissingRequirementsError
+from ...typing import AsyncResult, Messages
+from ...cookies import get_cookies_dir
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+from ... import debug
+
+
+class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "API Robocoders AI"
+ url = "https://api.robocoders.ai/docs"
+ api_endpoint = "https://api.robocoders.ai/chat"
+ working = False
+ supports_message_history = True
+ default_model = 'GeneralCodingAgent'
+ agent = [default_model, "RepoAgent", "FrontEndAgent"]
+ models = [*agent]
+
+ CACHE_DIR = Path(get_cookies_dir())
+ CACHE_FILE = CACHE_DIR / "robocoders.json"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+
+ timeout = ClientTimeout(total=600)
+
+ async with aiohttp.ClientSession(timeout=timeout) as session:
+ # Load or create access token and session ID
+ access_token, session_id = await cls._get_or_create_access_and_session(session)
+ if not access_token or not session_id:
+ raise Exception("Failed to initialize API interaction")
+
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {access_token}"
+ }
+
+ prompt = format_prompt(messages)
+
+ data = {
+ "sid": session_id,
+ "prompt": prompt,
+ "agent": model
+ }
+
+ async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
+ if response.status == 401: # Unauthorized, refresh token
+ cls._clear_cached_data()
+ raise Exception("Unauthorized: Invalid token, please retry.")
+ elif response.status == 422:
+ raise Exception("Validation Error: Invalid input.")
+ elif response.status >= 500:
+ raise Exception(f"Server Error: {response.status}")
+ elif response.status != 200:
+ raise Exception(f"Unexpected Error: {response.status}")
+
+ async for line in response.content:
+ if line:
+ try:
+ # Decode bytes into a string
+ line_str = line.decode('utf-8')
+ response_data = json.loads(line_str)
+
+ # Get the message from the 'args.content' or 'message' field
+ message = (response_data.get('args', {}).get('content') or
+ response_data.get('message', ''))
+
+ if message:
+ yield message
+
+ # Check for reaching the resource limit
+ if (response_data.get('action') == 'message' and
+ response_data.get('args', {}).get('wait_for_response')):
+ # Automatically continue the dialog
+ continue_data = {
+ "sid": session_id,
+ "prompt": "continue",
+ "agent": model
+ }
+ async with session.post(
+ cls.api_endpoint,
+ headers=headers,
+ json=continue_data,
+ proxy=proxy
+ ) as continue_response:
+ if continue_response.status == 200:
+ async for continue_line in continue_response.content:
+ if continue_line:
+ try:
+ continue_line_str = continue_line.decode('utf-8')
+ continue_data = json.loads(continue_line_str)
+ continue_message = (
+ continue_data.get('args', {}).get('content') or
+ continue_data.get('message', '')
+ )
+ if continue_message:
+ yield continue_message
+ except json.JSONDecodeError:
+ debug.log(f"Failed to decode continue JSON: {continue_line}")
+ except Exception as e:
+ debug.log(f"Error processing continue response: {e}")
+
+ except json.JSONDecodeError:
+ debug.log(f"Failed to decode JSON: {line}")
+ except Exception as e:
+ debug.log(f"Error processing response: {e}")
+
+ @staticmethod
+ async def _get_or_create_access_and_session(session: aiohttp.ClientSession):
+ RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True) # Ensure cache directory exists
+
+ # Load data from cache
+ if RobocodersAPI.CACHE_FILE.exists():
+ with open(RobocodersAPI.CACHE_FILE, "r") as f:
+ data = json.load(f)
+ access_token = data.get("access_token")
+ session_id = data.get("sid")
+
+ # Validate loaded data
+ if access_token and session_id:
+ return access_token, session_id
+
+ # If data not valid, create new access token and session ID
+ access_token = await RobocodersAPI._fetch_and_cache_access_token(session)
+ session_id = await RobocodersAPI._create_and_cache_session(session, access_token)
+ return access_token, session_id
+
+ @staticmethod
+ async def _fetch_and_cache_access_token(session: aiohttp.ClientSession) -> str:
+ if not HAS_BEAUTIFULSOUP:
+ raise MissingRequirementsError('Install "beautifulsoup4" package | pip install -U beautifulsoup4')
+ return token
+
+ url_auth = 'https://api.robocoders.ai/auth'
+ headers_auth = {
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
+ }
+
+ async with session.get(url_auth, headers=headers_auth) as response:
+ if response.status == 200:
+ html = await response.text()
+ soup = BeautifulSoup(html, 'html.parser')
+ token_element = soup.find('pre', id='token')
+ if token_element:
+ token = token_element.text.strip()
+
+ # Cache the token
+ RobocodersAPI._save_cached_data({"access_token": token})
+ return token
+ return None
+
+ @staticmethod
+ async def _create_and_cache_session(session: aiohttp.ClientSession, access_token: str) -> str:
+ url_create_session = 'https://api.robocoders.ai/create-session'
+ headers_create_session = {
+ 'Authorization': f'Bearer {access_token}'
+ }
+
+ async with session.get(url_create_session, headers=headers_create_session) as response:
+ if response.status == 200:
+ data = await response.json()
+ session_id = data.get('sid')
+
+ # Cache session ID
+ RobocodersAPI._update_cached_data({"sid": session_id})
+ return session_id
+ elif response.status == 401:
+ RobocodersAPI._clear_cached_data()
+ raise Exception("Unauthorized: Invalid token during session creation.")
+ elif response.status == 422:
+ raise Exception("Validation Error: Check input parameters.")
+ return None
+
+ @staticmethod
+ def _save_cached_data(new_data: dict):
+ """Save new data to cache file"""
+ RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True)
+ RobocodersAPI.CACHE_FILE.touch(exist_ok=True)
+ with open(RobocodersAPI.CACHE_FILE, "w") as f:
+ json.dump(new_data, f)
+
+ @staticmethod
+ def _update_cached_data(updated_data: dict):
+ """Update existing cache data with new values"""
+ data = {}
+ if RobocodersAPI.CACHE_FILE.exists():
+ with open(RobocodersAPI.CACHE_FILE, "r") as f:
+ try:
+ data = json.load(f)
+ except json.JSONDecodeError:
+ # If cache file is corrupted, start with empty dict
+ data = {}
+
+ data.update(updated_data)
+ with open(RobocodersAPI.CACHE_FILE, "w") as f:
+ json.dump(data, f)
+
+ @staticmethod
+ def _clear_cached_data():
+ """Remove cache file"""
+ try:
+ if RobocodersAPI.CACHE_FILE.exists():
+ RobocodersAPI.CACHE_FILE.unlink()
+ except Exception as e:
+ debug.log(f"Error clearing cache: {e}")
+
+ @staticmethod
+ def _get_cached_data() -> dict:
+ """Get all cached data"""
+ if RobocodersAPI.CACHE_FILE.exists():
+ try:
+ with open(RobocodersAPI.CACHE_FILE, "r") as f:
+ return json.load(f)
+ except json.JSONDecodeError:
+ return {}
+ return {}
diff --git a/g4f/Provider/not_working/Upstage.py b/g4f/Provider/not_working/Upstage.py
new file mode 100644
index 00000000..74355631
--- /dev/null
+++ b/g4f/Provider/not_working/Upstage.py
@@ -0,0 +1,91 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://console.upstage.ai/playground/chat"
+ api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
+ working = False
+ default_model = 'solar-pro'
+ models = [
+ 'upstage/solar-1-mini-chat',
+ 'upstage/solar-1-mini-chat-ja',
+ 'solar-pro',
+ ]
+ model_aliases = {
+ "solar-mini": "upstage/solar-1-mini-chat",
+ "solar-mini": "upstage/solar-1-mini-chat-ja",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://console.upstage.ai",
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": "https://console.upstage.ai/",
+ "sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "cross-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "stream": True,
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "model": model
+ }
+
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ response_text = ""
+
+ async for line in response.content:
+ if line:
+ line = line.decode('utf-8').strip()
+
+ if line.startswith("data: ") and line != "data: [DONE]":
+ try:
+ data = json.loads(line[6:])
+ content = data['choices'][0]['delta'].get('content', '')
+ if content:
+ response_text += content
+ yield content
+ except json.JSONDecodeError:
+ continue
+
+ if line == "data: [DONE]":
+ break
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
index 69e38879..a58870c2 100644
--- a/g4f/Provider/not_working/__init__.py
+++ b/g4f/Provider/not_working/__init__.py
@@ -5,10 +5,13 @@ from .AiChats import AiChats
from .AIUncensored import AIUncensored
from .Aura import Aura
from .Chatgpt4o import Chatgpt4o
+from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree
from .FlowGpt import FlowGpt
from .FreeNetfly import FreeNetfly
from .GPROChat import GPROChat
from .Koala import Koala
+from .MagickPen import MagickPen
from .MyShell import MyShell
-from .Chatgpt4Online import Chatgpt4Online
+from .RobocodersAPI import RobocodersAPI
+from .Upstage import Upstage
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 5410b46e..b5fcd280 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -284,7 +284,7 @@
-
+
@@ -302,4 +302,4 @@