From 29c13e26cd794208786a8a6cf421c264015c7e3a Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Thu, 25 Jul 2024 09:21:55 +0300
Subject: Comprehensive Update: New Providers, Model Enhancements, and
Functionality Improvements
---
g4f/Provider/Allyfy.py | 71 ++++++++++
g4f/Provider/ChatGot.py | 75 ++++++++++
g4f/Provider/Chatgpt4Online.py | 101 +++++++-------
g4f/Provider/GeminiProChat.py | 4 +-
g4f/Provider/HuggingChat.py | 3 +-
g4f/Provider/HuggingFace.py | 5 +-
g4f/Provider/Liaobots.py | 45 ++++--
g4f/Provider/PerplexityLabs.py | 15 +-
g4f/Provider/Pi.py | 3 +-
g4f/Provider/ReplicateHome.py | 48 ++++---
g4f/Provider/You.py | 20 +--
g4f/Provider/__init__.py | 2 +
g4f/Provider/needs_auth/Openai.py | 3 +-
g4f/Provider/needs_auth/OpenaiChat.py | 2 +-
g4f/models.py | 250 +++++++++++++++++++++++++++-------
15 files changed, 486 insertions(+), 161 deletions(-)
create mode 100644 g4f/Provider/Allyfy.py
create mode 100644 g4f/Provider/ChatGot.py
(limited to 'g4f')
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
new file mode 100644
index 00000000..8733b1ec
--- /dev/null
+++ b/g4f/Provider/Allyfy.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class Allyfy(AsyncGeneratorProvider):
+ url = "https://chatbot.allyfy.chat"
+ api_endpoint = "/api/v1/message/stream/super/chat"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json;charset=utf-8",
+ "dnt": "1",
+ "origin": "https://www.allyfy.chat",
+ "priority": "u=1, i",
+ "referer": "https://www.allyfy.chat/",
+ "referrer": "https://www.allyfy.chat",
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [{"content": prompt, "role": "user"}],
+ "content": prompt,
+ "baseInfo": {
+ "clientId": "q08kdrde1115003lyedfoir6af0yy531",
+ "pid": "38281",
+ "channelId": "100000",
+ "locale": "en-US",
+ "localZone": 180,
+ "packageName": "com.cch.allyfy.webh",
+ }
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = []
+ async for line in response.content:
+ line = line.decode().strip()
+ if line.startswith("data:"):
+ data_content = line[5:]
+ if data_content == "[DONE]":
+ break
+ try:
+ json_data = json.loads(data_content)
+ if "content" in json_data:
+ full_response.append(json_data["content"])
+ except json.JSONDecodeError:
+ continue
+ yield "".join(full_response)
diff --git a/g4f/Provider/ChatGot.py b/g4f/Provider/ChatGot.py
new file mode 100644
index 00000000..55e8d0b6
--- /dev/null
+++ b/g4f/Provider/ChatGot.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ..errors import RateLimitError
+from ..requests import raise_for_status
+from ..requests.aiohttp import get_connector
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.chatgot.one/"
+ working = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "TE": "trailers",
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index ff9a2c8f..d55be65b 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -1,22 +1,18 @@
from __future__ import annotations
-import re
import json
from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from ..requests import get_args_from_browser
-from ..webdriver import WebDriver
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from .helper import format_prompt
+
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
- supports_message_history = True
- supports_gpt_35_turbo = True
- working = True
- _wpnonce = None
- _context_id = None
+ api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
+ working = True
+ supports_gpt_4 = True
@classmethod
async def create_async_generator(
@@ -24,49 +20,52 @@ class Chatgpt4Online(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
- webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
- args = get_args_from_browser(f"{cls.url}/chat/", webdriver, proxy=proxy)
- async with ClientSession(**args) as session:
- if not cls._wpnonce:
- async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
- response.raise_for_status()
- response = await response.text()
- result = re.search(r'restNonce":"(.*?)"', response)
- if result:
- cls._wpnonce = result.group(1)
- else:
- raise RuntimeError("No nonce found")
- result = re.search(r'contextId":(.*?),', response)
- if result:
- cls._context_id = result.group(1)
- else:
- raise RuntimeError("No contextId found")
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ "x-wp-nonce": "d9505e9877",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
data = {
- "botId":"default",
- "customId":None,
- "session":"N/A",
- "chatId":get_random_string(11),
- "contextId":cls._context_id,
- "messages":messages[:-1],
- "newMessage":messages[-1]["content"],
- "newImageId":None,
- "stream":True
+ "botId": "default",
+ "newMessage": prompt,
+ "stream": True,
}
- async with session.post(
- f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
- json=data,
- proxy=proxy,
- headers={"x-wp-nonce": cls._wpnonce}
- ) as response:
+
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
- async for line in response.content:
- if line.startswith(b"data: "):
- line = json.loads(line[6:])
- if "type" not in line:
- raise RuntimeError(f"Response: {line}")
- elif line["type"] == "live":
- yield line["data"]
- elif line["type"] == "end":
- break
+ full_response = ""
+
+ async for chunk in response.content.iter_any():
+ if chunk:
+ try:
+ # Extract the JSON object from the chunk
+ for line in chunk.decode().splitlines():
+ if line.startswith("data: "):
+ json_data = json.loads(line[6:])
+ if json_data["type"] == "live":
+ full_response += json_data["data"]
+ elif json_data["type"] == "end":
+ final_data = json.loads(json_data["data"])
+ full_response = final_data["reply"]
+ break
+ except json.JSONDecodeError:
+ continue
+
+ yield full_response
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py
index c61e2ff3..208ca773 100644
--- a/g4f/Provider/GeminiProChat.py
+++ b/g4f/Provider/GeminiProChat.py
@@ -13,10 +13,10 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.chatgot.one/"
+ url = "https://gemini-pro.chat/"
working = True
supports_message_history = True
- default_model = ''
+ default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index d480d13c..f7c6b581 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -13,8 +13,9 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index a5e27ccf..6634aa75 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -14,16 +14,17 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
working = True
needs_auth = True
supports_message_history = True
+ default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
'microsoft/Phi-3-mini-4k-instruct',
]
- default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 277d8ea2..0cb5edff 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -10,14 +10,23 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5-Turbo",
+ "gpt-4o-mini-free": {
+ "id": "gpt-4o-mini-free",
+ "name": "GPT-4o-Mini-Free",
"model": "ChatGPT",
"provider": "OpenAI",
- "maxLength": 48000,
- "tokenLimit": 14000,
- "context": "16K",
+ "maxLength": 31200,
+ "tokenLimit": 7800,
+ "context": "8K",
+ },
+ "gpt-4o-mini": {
+ "id": "gpt-4o-mini",
+ "name": "GPT-4o-Mini",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
"gpt-4o-free": {
"context": "8K",
@@ -91,6 +100,15 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
+ "claude-3-5-sonnet-20240620": {
+ "id": "claude-3-5-sonnet-20240620",
+ "name": "Claude-3.5-Sonnet",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
"claude-3-haiku-20240307": {
"id": "claude-3-haiku-20240307",
"name": "Claude-3-Haiku",
@@ -155,10 +173,21 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
- default_model = "gpt-3.5-turbo"
+ default_model = "gpt-4o"
models = list(models.keys())
model_aliases = {
- "claude-v2": "claude-2.0"
+ "gpt-4o-mini": "gpt-4o-mini-free",
+ "gpt-4o": "gpt-4o-free",
+ "claude-3-opus": "claude-3-opus-20240229",
+ "claude-3-opus": "claude-3-opus-20240229-aws",
+ "claude-3-opus": "claude-3-opus-20240229-gcp",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gemini-pro": "gemini-1.5-pro-latest",
+ "gemini-pro": "gemini-1.0-pro-latest",
+ "gemini-flash": "gemini-1.5-flash-latest",
}
_auth_code = ""
_cookie_jar = None
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index 4a2cc9e5..0a298e55 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -15,21 +15,8 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = "mixtral-8x7b-instruct"
models = [
- "llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat",
- "dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct",
- "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it",
- "related"
+ "llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat", "llama-3-8b-instruct", "llama-3-70b-instruct", "gemma-2-9b-it", "gemma-2-27b-it", "nemotron-4-340b-instruct", "mixtral-8x7b-instruct",
]
- model_aliases = {
- "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
- "mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
- "codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
- "llava-v1.5-7b": "llava-v1.5-7b-wrapper",
- "databricks/dbrx-instruct": "dbrx-instruct",
- "meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
- "meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
- }
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 5a1e9f0e..e03830f4 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -11,6 +11,7 @@ class Pi(AbstractProvider):
working = True
supports_stream = True
_session = None
+ default_model = "pi"
@classmethod
def create_completion(
@@ -65,4 +66,4 @@ class Pi(AbstractProvider):
yield json.loads(line.split(b'data: ')[1])
elif line.startswith(b'data: {"title":'):
yield json.loads(line.split(b'data: ')[1])
-
\ No newline at end of file
+
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
index 48336831..e6c8d2d3 100644
--- a/g4f/Provider/ReplicateHome.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -14,40 +14,46 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
parent = "Replicate"
working = True
- default_model = 'stability-ai/sdxl'
+ default_model = 'stability-ai/stable-diffusion-3'
models = [
- # image
- 'stability-ai/sdxl',
- 'ai-forever/kandinsky-2.2',
+ # Models for image generation
+ 'stability-ai/stable-diffusion-3',
+ 'bytedance/sdxl-lightning-4step',
+ 'playgroundai/playground-v2.5-1024px-aesthetic',
- # text
- 'meta/llama-2-70b-chat',
- 'mistralai/mistral-7b-instruct-v0.2'
+ # Models for image generation
+ 'meta/meta-llama-3-70b-instruct',
+ 'mistralai/mixtral-8x7b-instruct-v0.1',
+ 'google-deepmind/gemma-2b-it',
]
versions = {
- # image
- 'stability-ai/sdxl': [
- "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
- "2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2",
- "7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
+ # Model versions for generating images
+ 'stability-ai/stable-diffusion-3': [
+ "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
],
- 'ai-forever/kandinsky-2.2': [
- "ad9d7879fbffa2874e1d909d1d37d9bc682889cc65b31f7bb00d2362619f194a"
+ 'bytedance/sdxl-lightning-4step': [
+ "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f"
+ ],
+ 'playgroundai/playground-v2.5-1024px-aesthetic': [
+ "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
],
-
- # Text
- 'meta/llama-2-70b-chat': [
- "dp-542693885b1777c98ef8c5a98f2005e7"
+
+ # Model versions for text generation
+ 'meta/meta-llama-3-70b-instruct': [
+ "dp-cf04fe09351e25db628e8b6181276547"
],
- 'mistralai/mistral-7b-instruct-v0.2': [
+ 'mistralai/mixtral-8x7b-instruct-v0.1': [
"dp-89e00f489d498885048e94f9809fbc76"
+ ],
+ 'google-deepmind/gemma-2b-it': [
+ "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626"
]
}
- image_models = {"stability-ai/sdxl", "ai-forever/kandinsky-2.2"}
- text_models = {"meta/llama-2-70b-chat", "mistralai/mistral-7b-instruct-v0.2"}
+ image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
+ text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 162d6adb..cdf5f430 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -24,27 +24,27 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
+ "gpt-4o-mini",
"gpt-4o",
- "gpt-4",
"gpt-4-turbo",
- "claude-instant",
- "claude-2",
+ "gpt-4",
+ "claude-3.5-sonnet",
"claude-3-opus",
"claude-3-sonnet",
"claude-3-haiku",
- "gemini-pro",
+ "claude-2",
+ "llama-3.1-70b",
+ "llama-3",
+ "gemini-1-5-flash",
"gemini-1-5-pro",
+ "gemini-1-0-pro",
"databricks-dbrx-instruct",
"command-r",
"command-r-plus",
- "llama3",
- "zephyr",
+ "dolphin-2.5",
default_vision_model,
*image_models
]
- model_aliases = {
- "claude-v2": "claude-2",
- }
_cookies = None
_cookies_used = 0
_telemetry_ids = []
@@ -220,4 +220,4 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
'stytch_session_jwt': session["session_jwt"],
'ydc_stytch_session': session["session_token"],
'ydc_stytch_session_jwt': session["session_jwt"],
- }
\ No newline at end of file
+ }
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 56c01150..0bcab3f2 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -11,10 +11,12 @@ from .selenium import *
from .needs_auth import *
from .AI365VIP import AI365VIP
+from .Allyfy import Allyfy
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
+from .ChatGot import ChatGot
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/Openai.py
index 9da6bad8..a0740c47 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/Openai.py
@@ -16,6 +16,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True
supports_message_history = True
supports_system_message = True
+ default_model = ""
@classmethod
async def create_async_generator(
@@ -120,4 +121,4 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
if api_key is not None else {}
),
**({} if headers is None else headers)
- }
\ No newline at end of file
+ }
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 9321c24a..e581cf55 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -61,7 +61,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
default_model = None
default_vision_model = "gpt-4o"
- models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "auto"]
+ models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "gpt-4o-mini", "auto"]
model_aliases = {
"text-davinci-002-render-sha": "gpt-3.5-turbo",
"": "gpt-3.5-turbo",
diff --git a/g4f/models.py b/g4f/models.py
index e9016561..0c66fd66 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -5,9 +5,12 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
AI365VIP,
+ Allyfy,
Bing,
Blackbox,
+ ChatGot,
Chatgpt4o,
+ Chatgpt4Online,
ChatgptFree,
DDG,
DeepInfra,
@@ -84,6 +87,7 @@ gpt_35_long = Model(
DDG,
AI365VIP,
Pizzagpt,
+ Allyfy,
])
)
@@ -107,6 +111,7 @@ gpt_35_turbo = Model(
DDG,
AI365VIP,
Pizzagpt,
+ Allyfy,
])
)
@@ -133,7 +138,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = IterListProvider([
- Bing, Liaobots,
+ Bing, Liaobots, Chatgpt4Online
])
)
@@ -165,7 +170,15 @@ gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
- You, Liaobots, Chatgpt4o, AI365VIP
+ You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat
+ ])
+)
+
+gpt_4o_mini = Model(
+ name = 'gpt-4o-mini',
+ base_provider = 'openai',
+ best_provider = IterListProvider([
+ Liaobots, OpenaiChat, You,
])
)
@@ -185,12 +198,6 @@ meta = Model(
best_provider = MetaAI
)
-llama_2_70b_chat = Model(
- name = "meta/llama-2-70b-chat",
- base_provider = "meta",
- best_provider = IterListProvider([ReplicateHome])
-)
-
llama3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
@@ -200,7 +207,19 @@ llama3_8b_instruct = Model(
llama3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
- best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate, HuggingChat, DDG])
+ best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate, DDG, ReplicateHome])
+)
+
+llama_3_1_70b_Instruct = Model(
+ name = "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ base_provider = "meta",
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
+)
+
+llama_3_1_405b_Instruct_FP8 = Model(
+ name = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
+ base_provider = "meta",
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
)
codellama_34b_instruct = Model(
@@ -220,13 +239,13 @@ codellama_70b_instruct = Model(
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG])
+ best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG, ReplicateHome])
)
mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
base_provider = "huggingface",
- best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat, ReplicateHome])
+ best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat])
)
@@ -265,10 +284,22 @@ gemini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google',
- best_provider = IterListProvider([GeminiPro, You, GeminiProChat])
+ best_provider = IterListProvider([GeminiPro, You, ChatGot, GeminiProChat, Liaobots])
+)
+
+gemini_flash = Model(
+ name = 'gemini-flash',
+ base_provider = 'Google',
+ best_provider = IterListProvider([Liaobots])
)
# gemma
+gemma_2b_it = Model(
+ name = 'gemma-2b-it',
+ base_provider = 'Google',
+ best_provider = IterListProvider([ReplicateHome])
+)
+
gemma_2_9b_it = Model(
name = 'gemma-2-9b-it',
base_provider = 'Google',
@@ -283,28 +314,46 @@ gemma_2_27b_it = Model(
### Anthropic ###
-claude_v2 = Model(
- name = 'claude-v2',
- base_provider = 'anthropic',
- best_provider = IterListProvider([Vercel])
+claude_2 = Model(
+ name = 'claude-2',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([You])
+)
+
+claude_2_0 = Model(
+ name = 'claude-2.0',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([Liaobots])
+)
+
+claude_2_1 = Model(
+ name = 'claude-2.1',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([Liaobots])
)
claude_3_opus = Model(
name = 'claude-3-opus',
- base_provider = 'anthropic',
- best_provider = You
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([You, Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
- base_provider = 'anthropic',
- best_provider = You
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([You, Liaobots])
+)
+
+claude_3_5_sonnet = Model(
+ name = 'claude-3-5-sonnet',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([Liaobots])
)
claude_3_haiku = Model(
name = 'claude-3-haiku',
- base_provider = 'anthropic',
- best_provider = IterListProvider([DDG, AI365VIP])
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([DDG, AI365VIP, Liaobots])
)
@@ -348,6 +397,58 @@ command_r_plus = Model(
)
+### iFlytek ###
+SparkDesk_v1_1 = Model(
+ name = 'SparkDesk-v1.1',
+ base_provider = 'iFlytek',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### DeepSeek ###
+deepseek_coder = Model(
+ name = 'deepseek-coder',
+ base_provider = 'DeepSeek',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+deepseek_chat = Model(
+ name = 'deepseek-chat',
+ base_provider = 'DeepSeek',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### Qwen ###
+Qwen2_7B_Instruct = Model(
+ name = 'Qwen2-7B-Instruct',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### Zhipu AI ###
+glm4_9B_chat = Model(
+ name = 'glm4-9B-chat',
+ base_provider = 'Zhipu AI',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+chatglm3_6B = Model(
+ name = 'chatglm3-6B',
+ base_provider = 'Zhipu AI',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### 01-ai ###
+Yi_1_5_9B_Chat = Model(
+ name = 'Yi-1.5-9B-Chat',
+ base_provider = '01-ai',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
### Other ###
pi = Model(
name = 'pi',
@@ -364,14 +465,27 @@ pi = Model(
sdxl = Model(
name = 'stability-ai/sdxl',
base_provider = 'Stability AI',
- best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
+ best_provider = IterListProvider([DeepInfraImage])
+
+)
+
+stable_diffusion_3 = Model(
+ name = 'stability-ai/stable-diffusion-3',
+ base_provider = 'Stability AI',
+ best_provider = IterListProvider([ReplicateHome])
+
+)
+
+sdxl_lightning_4step = Model(
+ name = 'bytedance/sdxl-lightning-4step',
+ base_provider = 'Stability AI',
+ best_provider = IterListProvider([ReplicateHome])
)
-### AI Forever ###
-kandinsky_2_2 = Model(
- name = 'ai-forever/kandinsky-2.2',
- base_provider = 'AI Forever',
+playground_v2_5_1024px_aesthetic = Model(
+ name = 'playgroundai/playground-v2.5-1024px-aesthetic',
+ base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
@@ -385,12 +499,12 @@ class ModelUtils:
"""
convert: dict[str, Model] = {
- ############
- ### Text ###
- ############
+ ############
+ ### Text ###
+ ############
- ### OpenAI ###
- ### GPT-3.5 / GPT-4 ###
+ ### OpenAI ###
+ ### GPT-3.5 / GPT-4 ###
# gpt-3.5
'gpt-3.5-turbo' : gpt_35_turbo,
'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
@@ -400,6 +514,7 @@ class ModelUtils:
# gpt-4
'gpt-4o' : gpt_4o,
+ 'gpt-4o-mini' : gpt_4o_mini,
'gpt-4' : gpt_4,
'gpt-4-0613' : gpt_4_0613,
'gpt-4-32k' : gpt_4_32k,
@@ -407,14 +522,16 @@ class ModelUtils:
'gpt-4-turbo' : gpt_4_turbo,
- ### Meta ###
+ ### Meta ###
"meta-ai": meta,
- 'llama-2-70b-chat': llama_2_70b_chat,
'llama3-8b': llama3_8b_instruct, # alias
'llama3-70b': llama3_70b_instruct, # alias
'llama3-8b-instruct' : llama3_8b_instruct,
'llama3-70b-instruct': llama3_70b_instruct,
+ 'llama-3.1-70b-Instruct': llama_3_1_70b_Instruct,
+ 'llama-3.1-405B-Instruct-FP8': llama_3_1_405b_Instruct_FP8,
+
'codellama-34b-instruct': codellama_34b_instruct,
'codellama-70b-instruct': codellama_70b_instruct,
@@ -426,31 +543,36 @@ class ModelUtils:
### NousResearch ###
- 'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
- ### 01-ai ###
- 'Yi-1.5-34B-Chat': Yi_1_5_34B_Chat,
+ ### 01-ai ###
+ 'Yi-1.5-34B-Chat': Yi_1_5_34B_Chat,
- ### Microsoft ###
- 'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
+ ### Microsoft ###
+ 'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
### Google ###
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
+ 'gemini-flash': gemini_flash,
# gemma
+ 'gemma-2b-it': gemma_2b_it,
'gemma-2-9b-it': gemma_2_9b_it,
'gemma-2-27b-it': gemma_2_27b_it,
### Anthropic ###
- 'claude-v2': claude_v2,
+ 'claude-2': claude_2,
+ 'claude-2.0': claude_2_0,
+ 'claude-2.1': claude_2_1,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
+ 'claude-3-5-sonnet': claude_3_5_sonnet,
'claude-3-haiku': claude_3_haiku,
@@ -462,11 +584,11 @@ class ModelUtils:
'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
- ### Blackbox ###
- 'blackbox': blackbox,
+ ### Blackbox ###
+ 'blackbox': blackbox,
- ### CohereForAI ###
+ ### CohereForAI ###
'command-r+': command_r_plus,
@@ -474,24 +596,54 @@ class ModelUtils:
'dbrx-instruct': dbrx_instruct,
- ### GigaChat ###
+ ### GigaChat ###
'gigachat': gigachat,
+ ### iFlytek ###
+ 'SparkDesk-v1.1': SparkDesk_v1_1,
+
+
+ ### DeepSeek ###
+ 'deepseek-coder': deepseek_coder,
+ 'deepseek-chat': deepseek_chat,
+
+
+ ### ### Qwen ### ###
+ 'Qwen2-7B-Instruct': Qwen2_7B_Instruct,
+
+
+ ### Zhipu AI ###
+ 'glm4-9B-chat': glm4_9B_chat,
+ 'chatglm3-6B': chatglm3_6B,
+
+
+ ### 01-ai ###
+ 'Yi-1.5-9B-Chat': Yi_1_5_9B_Chat,
+
+
# Other
'pi': pi,
#############
- ### Image ###
- #############
+ ### Image ###
+ #############
- ### Stability AI ###
+ ### Stability AI ###
'sdxl': sdxl,
+ 'stable-diffusion-3': stable_diffusion_3,
+
+ ### ByteDance ###
+ 'sdxl-lightning-4step': sdxl_lightning_4step,
- ### AI Forever ###
- 'kandinsky-2.2': kandinsky_2_2,
+ ### ByteDance ###
+ 'sdxl-lightning-4step': sdxl_lightning_4step,
+
+ ### Playground ###
+ 'playground-v2.5-1024px-aesthetic': playground_v2_5_1024px_aesthetic,
+
}
_all_models = list(ModelUtils.convert.keys())
--
cgit v1.2.3
From 32d471d0ed903977d055f51146c9323a0830c8e2 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Sat, 27 Jul 2024 12:23:56 +0300
Subject: Update g4f/models.py g4f/Provider/needs_auth/OpenaiChat.py
---
g4f/Provider/needs_auth/OpenaiChat.py | 5 +----
g4f/models.py | 2 --
2 files changed, 1 insertion(+), 6 deletions(-)
(limited to 'g4f')
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index e581cf55..82462040 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -55,16 +55,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
- supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
default_model = None
default_vision_model = "gpt-4o"
- models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "gpt-4o-mini", "auto"]
+ models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"]
model_aliases = {
- "text-davinci-002-render-sha": "gpt-3.5-turbo",
- "": "gpt-3.5-turbo",
"gpt-4-turbo-preview": "gpt-4",
"dall-e": "gpt-4",
}
diff --git a/g4f/models.py b/g4f/models.py
index 0c66fd66..162182bd 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -80,7 +80,6 @@ gpt_35_long = Model(
best_provider = IterListProvider([
FreeGpt,
You,
- OpenaiChat,
Koala,
ChatgptFree,
FreeChatgpt,
@@ -105,7 +104,6 @@ gpt_35_turbo = Model(
FreeGpt,
You,
Koala,
- OpenaiChat,
ChatgptFree,
FreeChatgpt,
DDG,
--
cgit v1.2.3
From 5141e6238c9178facbd06af9c1bef1050bfcbe3d Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Sat, 27 Jul 2024 16:20:20 +0300
Subject: Fixed error g4f/models.py
---
g4f/models.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
(limited to 'g4f')
diff --git a/g4f/models.py b/g4f/models.py
index 162182bd..cf1e93c3 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -136,7 +136,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = IterListProvider([
- Bing, Liaobots, Chatgpt4Online
+ Bing, Chatgpt4Online
])
)
@@ -161,7 +161,9 @@ gpt_4_32k_0613 = Model(
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
- best_provider = Bing
+ best_provider = IterListProvider([
+ Bing, Liaobots
+ ])
)
gpt_4o = Model(
--
cgit v1.2.3
From 7e5744677d41269c1912ed3f209f77d71e580c5f Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Sat, 27 Jul 2024 16:30:07 +0300
Subject: Fixed error g4f/models.py
---
g4f/models.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
(limited to 'g4f')
diff --git a/g4f/models.py b/g4f/models.py
index cf1e93c3..21079507 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -161,9 +161,7 @@ gpt_4_32k_0613 = Model(
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
- best_provider = IterListProvider([
- Bing, Liaobots
- ])
+ best_provider = Bing
)
gpt_4o = Model(
--
cgit v1.2.3
From d57f77c2e73b99199797b92e0fd6747019facf5b Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Sun, 28 Jul 2024 13:34:28 +0300
Subject: Fixed a bug in the Liaobots provider
---
g4f/Provider/Liaobots.py | 118 +++++-----------------------------
g4f/models.py | 163 ++++++++++++++++++++++++-----------------------
2 files changed, 97 insertions(+), 184 deletions(-)
(limited to 'g4f')
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 0cb5edff..af90860d 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -57,115 +57,26 @@ models = {
},
"gpt-4-0613": {
"id": "gpt-4-0613",
- "name": "GPT-4-0613",
+ "name": "GPT-4",
"model": "ChatGPT",
"provider": "OpenAI",
- "maxLength": 32000,
- "tokenLimit": 7600,
- "context": "8K",
- },
- "claude-3-opus-20240229": {
- "id": "claude-3-opus-20240229",
- "name": "Claude-3-Opus",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-opus-20240229-aws": {
- "id": "claude-3-opus-20240229-aws",
- "name": "Claude-3-Opus-Aws",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-opus-100k-poe": {
- "id": "claude-3-opus-100k-poe",
- "name": "Claude-3-Opus-100k-Poe",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 400000,
- "tokenLimit": 99000,
- "context": "100K",
- },
- "claude-3-sonnet-20240229": {
- "id": "claude-3-sonnet-20240229",
- "name": "Claude-3-Sonnet",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-5-sonnet-20240620": {
- "id": "claude-3-5-sonnet-20240620",
- "name": "Claude-3.5-Sonnet",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-haiku-20240307": {
- "id": "claude-3-haiku-20240307",
- "name": "Claude-3-Haiku",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-2.1": {
- "id": "claude-2.1",
- "name": "Claude-2.1-200k",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-2.0": {
- "id": "claude-2.0",
- "name": "Claude-2.0-100k",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "100K",
- },
- "gemini-1.0-pro-latest": {
- "id": "gemini-1.0-pro-latest",
- "name": "Gemini-Pro",
- "model": "Gemini",
- "provider": "Google",
- "maxLength": 120000,
- "tokenLimit": 30000,
- "context": "32K",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
- "gemini-1.5-flash-latest": {
- "id": "gemini-1.5-flash-latest",
- "name": "Gemini-1.5-Flash-1M",
- "model": "Gemini",
- "provider": "Google",
- "maxLength": 4000000,
- "tokenLimit": 1000000,
- "context": "1024K",
+ "gpt-4-turbo": {
+ "id": "gpt-4-turbo",
+ "name": "GPT-4-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
- "gemini-1.5-pro-latest": {
- "id": "gemini-1.5-pro-latest",
- "name": "Gemini-1.5-Pro-1M",
- "model": "Gemini",
- "provider": "Google",
- "maxLength": 4000000,
- "tokenLimit": 1000000,
- "context": "1024K",
- }
}
+
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site"
working = True
@@ -178,13 +89,14 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-free",
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gpt-4-": "gpt-4-0613",
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
"claude-3-opus": "claude-3-opus-20240229-gcp",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3-haiku": "claude-3-haiku-20240307",
- "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gemini-pro": "gemini-1.5-pro-latest",
"gemini-pro": "gemini-1.0-pro-latest",
"gemini-flash": "gemini-1.5-flash-latest",
diff --git a/g4f/models.py b/g4f/models.py
index 21079507..f837223a 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -161,9 +161,10 @@ gpt_4_32k_0613 = Model(
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
- best_provider = Bing
+ best_provider = IterListProvider([
+ Bing, Liaobots
+ ])
)
-
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
@@ -497,50 +498,50 @@ class ModelUtils:
"""
convert: dict[str, Model] = {
- ############
- ### Text ###
- ############
+############
+### Text ###
+############
### OpenAI ###
### GPT-3.5 / GPT-4 ###
- # gpt-3.5
- 'gpt-3.5-turbo' : gpt_35_turbo,
- 'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
- 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
- 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
- 'gpt-3.5-long': gpt_35_long,
-
- # gpt-4
- 'gpt-4o' : gpt_4o,
- 'gpt-4o-mini' : gpt_4o_mini,
- 'gpt-4' : gpt_4,
- 'gpt-4-0613' : gpt_4_0613,
- 'gpt-4-32k' : gpt_4_32k,
- 'gpt-4-32k-0613' : gpt_4_32k_0613,
- 'gpt-4-turbo' : gpt_4_turbo,
+ # gpt-3.5
+ 'gpt-3.5-turbo' : gpt_35_turbo,
+ 'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
+ 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
+ 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
+ 'gpt-3.5-long': gpt_35_long,
+
+ # gpt-4
+ 'gpt-4o' : gpt_4o,
+ 'gpt-4o-mini' : gpt_4o_mini,
+ 'gpt-4' : gpt_4,
+ 'gpt-4-0613' : gpt_4_0613,
+ 'gpt-4-32k' : gpt_4_32k,
+ 'gpt-4-32k-0613' : gpt_4_32k_0613,
+ 'gpt-4-turbo' : gpt_4_turbo,
### Meta ###
- "meta-ai": meta,
+ "meta-ai": meta,
- 'llama3-8b': llama3_8b_instruct, # alias
- 'llama3-70b': llama3_70b_instruct, # alias
- 'llama3-8b-instruct' : llama3_8b_instruct,
- 'llama3-70b-instruct': llama3_70b_instruct,
- 'llama-3.1-70b-Instruct': llama_3_1_70b_Instruct,
- 'llama-3.1-405B-Instruct-FP8': llama_3_1_405b_Instruct_FP8,
+ 'llama3-8b': llama3_8b_instruct, # alias
+ 'llama3-70b': llama3_70b_instruct, # alias
+ 'llama3-8b-instruct' : llama3_8b_instruct,
+ 'llama3-70b-instruct': llama3_70b_instruct,
+ 'llama-3.1-70b-Instruct': llama_3_1_70b_Instruct,
+ 'llama-3.1-405B-Instruct-FP8': llama_3_1_405b_Instruct_FP8,
- 'codellama-34b-instruct': codellama_34b_instruct,
- 'codellama-70b-instruct': codellama_70b_instruct,
+ 'codellama-34b-instruct': codellama_34b_instruct,
+ 'codellama-70b-instruct': codellama_70b_instruct,
- ### Mistral (Opensource) ###
- 'mixtral-8x7b': mixtral_8x7b,
- 'mistral-7b-v02': mistral_7b_v02,
+ ### Mistral (Opensource) ###
+ 'mixtral-8x7b': mixtral_8x7b,
+ 'mistral-7b-v02': mistral_7b_v02,
- ### NousResearch ###
+ ### NousResearch ###
'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
@@ -552,95 +553,95 @@ class ModelUtils:
'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
- ### Google ###
- # gemini
- 'gemini': gemini,
- 'gemini-pro': gemini_pro,
- 'gemini-flash': gemini_flash,
-
- # gemma
- 'gemma-2b-it': gemma_2b_it,
- 'gemma-2-9b-it': gemma_2_9b_it,
- 'gemma-2-27b-it': gemma_2_27b_it,
+ ### Google ###
+ # gemini
+ 'gemini': gemini,
+ 'gemini-pro': gemini_pro,
+ 'gemini-flash': gemini_flash,
+
+ # gemma
+ 'gemma-2b-it': gemma_2b_it,
+ 'gemma-2-9b-it': gemma_2_9b_it,
+ 'gemma-2-27b-it': gemma_2_27b_it,
- ### Anthropic ###
- 'claude-2': claude_2,
- 'claude-2.0': claude_2_0,
- 'claude-2.1': claude_2_1,
- 'claude-3-opus': claude_3_opus,
- 'claude-3-sonnet': claude_3_sonnet,
- 'claude-3-5-sonnet': claude_3_5_sonnet,
- 'claude-3-haiku': claude_3_haiku,
+ ### Anthropic ###
+ 'claude-2': claude_2,
+ 'claude-2.0': claude_2_0,
+ 'claude-2.1': claude_2_1,
+ 'claude-3-opus': claude_3_opus,
+ 'claude-3-sonnet': claude_3_sonnet,
+ 'claude-3-5-sonnet': claude_3_5_sonnet,
+ 'claude-3-haiku': claude_3_haiku,
- ### Reka AI ###
- 'reka': reka_core,
+ ### Reka AI ###
+ 'reka': reka_core,
- ### NVIDIA ###
- 'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
-
+ ### NVIDIA ###
+ 'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
+
### Blackbox ###
'blackbox': blackbox,
### CohereForAI ###
- 'command-r+': command_r_plus,
+ 'command-r+': command_r_plus,
- ### Databricks ###
- 'dbrx-instruct': dbrx_instruct,
+ ### Databricks ###
+ 'dbrx-instruct': dbrx_instruct,
### GigaChat ###
- 'gigachat': gigachat,
+ 'gigachat': gigachat,
### iFlytek ###
- 'SparkDesk-v1.1': SparkDesk_v1_1,
+ 'SparkDesk-v1.1': SparkDesk_v1_1,
### DeepSeek ###
- 'deepseek-coder': deepseek_coder,
- 'deepseek-chat': deepseek_chat,
-
+ 'deepseek-coder': deepseek_coder,
+ 'deepseek-chat': deepseek_chat,
- ### ### Qwen ### ###
- 'Qwen2-7B-Instruct': Qwen2_7B_Instruct,
+ ### ### Qwen ### ###
+ 'Qwen2-7B-Instruct': Qwen2_7B_Instruct,
- ### Zhipu AI ###
- 'glm4-9B-chat': glm4_9B_chat,
- 'chatglm3-6B': chatglm3_6B,
+ ### Zhipu AI ###
+ 'glm4-9B-chat': glm4_9B_chat,
+ 'chatglm3-6B': chatglm3_6B,
- ### 01-ai ###
- 'Yi-1.5-9B-Chat': Yi_1_5_9B_Chat,
+ ### 01-ai ###
+ 'Yi-1.5-9B-Chat': Yi_1_5_9B_Chat,
- # Other
- 'pi': pi,
+ # Other
+ 'pi': pi,
+
- #############
- ### Image ###
- #############
+#############
+### Image ###
+#############
### Stability AI ###
- 'sdxl': sdxl,
- 'stable-diffusion-3': stable_diffusion_3,
+ 'sdxl': sdxl,
+ 'stable-diffusion-3': stable_diffusion_3,
### ByteDance ###
- 'sdxl-lightning-4step': sdxl_lightning_4step,
+ 'sdxl-lightning-4step': sdxl_lightning_4step,
### ByteDance ###
- 'sdxl-lightning-4step': sdxl_lightning_4step,
+ 'sdxl-lightning-4step': sdxl_lightning_4step,
### Playground ###
- 'playground-v2.5-1024px-aesthetic': playground_v2_5_1024px_aesthetic,
+ 'playground-v2.5-1024px-aesthetic': playground_v2_5_1024px_aesthetic,
}
--
cgit v1.2.3
From 15fdd2cb069f47f468f330d40f0d34c2cd18ebc8 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Sun, 28 Jul 2024 19:24:48 +0300
Subject: Added Marsyoo provider with support for the gpt-4o model. Updating
models.py
---
g4f/Provider/Marsyoo.py | 64 ++++++++++++++++++++++++++++++++++++++++++++++++
g4f/Provider/__init__.py | 1 +
g4f/models.py | 3 ++-
3 files changed, 67 insertions(+), 1 deletion(-)
create mode 100644 g4f/Provider/Marsyoo.py
(limited to 'g4f')
diff --git a/g4f/Provider/Marsyoo.py b/g4f/Provider/Marsyoo.py
new file mode 100644
index 00000000..1c5fa9fd
--- /dev/null
+++ b/g4f/Provider/Marsyoo.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession, ClientResponseError
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Marsyoo(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aiagent.marsyoo.com"
+ api_endpoint = "/api/chat-messages"
+ working = True
+ supports_gpt_4 = True
+ default_model = 'gpt-4o'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Connection": "keep-alive",
+ "DNT": "1",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/chat",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ "authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI0MWNkOTE3MS1mNTg1LTRjMTktOTY0Ni01NzgxMTBjYWViNTciLCJzdWIiOiJXZWIgQVBJIFBhc3Nwb3J0IiwiYXBwX2lkIjoiNDFjZDkxNzEtZjU4NS00YzE5LTk2NDYtNTc4MTEwY2FlYjU3IiwiYXBwX2NvZGUiOiJMakhzdWJqNjhMTXZCT0JyIiwiZW5kX3VzZXJfaWQiOiI4YjE5YjY2Mi05M2E1LTRhYTktOGNjNS03MDhmNWE0YmQxNjEifQ.pOzdQ4wTrQjjRlEv1XY9TZitkW5KW1K-wbcUJAoBJ5I",
+ "content-type": "application/json",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": "Linux",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "response_mode": "streaming",
+ "query": prompt,
+ "inputs": {},
+ }
+ try:
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line:
+ try:
+ json_data = json.loads(line.decode('utf-8').strip().lstrip('data: '))
+ if json_data['event'] == 'message':
+ yield json_data['answer']
+ elif json_data['event'] == 'message_end':
+ return
+ except json.JSONDecodeError:
+ continue
+ except ClientResponseError as e:
+ yield f"Error: HTTP {e.status}: {e.message}"
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 0bcab3f2..c47ae823 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -37,6 +37,7 @@ from .Koala import Koala
from .Liaobots import Liaobots
from .Llama import Llama
from .Local import Local
+from .Marsyoo import Marsyoo
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .Ollama import Ollama
diff --git a/g4f/models.py b/g4f/models.py
index f837223a..a8d9317a 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -25,6 +25,7 @@ from .Provider import (
HuggingFace,
Koala,
Liaobots,
+ Marsyoo,
MetaAI,
OpenaiChat,
PerplexityLabs,
@@ -169,7 +170,7 @@ gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
- You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat
+ You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo
])
)
--
cgit v1.2.3
From 51faa61f236b90efcb865eda1eb07b04e7416fc6 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Mon, 29 Jul 2024 01:37:16 +0300
Subject: Updates, improvements, corrections of models.py and index.html errors
---
g4f/gui/client/index.html | 4 +-
g4f/models.py | 302 ++++++++++++++++++++--------------------------
2 files changed, 136 insertions(+), 170 deletions(-)
(limited to 'g4f')
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index a2f883d9..1a660062 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -229,8 +229,8 @@
-
-
+
+
diff --git a/g4f/models.py b/g4f/models.py
index a8d9317a..bcc50b9f 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -6,38 +6,38 @@ from .Provider import IterListProvider, ProviderType
from .Provider import (
AI365VIP,
Allyfy,
- Bing,
- Blackbox,
- ChatGot,
- Chatgpt4o,
- Chatgpt4Online,
- ChatgptFree,
- DDG,
- DeepInfra,
- DeepInfraImage,
- FreeChatgpt,
- FreeGpt,
- Gemini,
- GeminiPro,
- GeminiProChat,
- GigaChat,
- HuggingChat,
- HuggingFace,
- Koala,
- Liaobots,
- Marsyoo,
- MetaAI,
- OpenaiChat,
- PerplexityLabs,
- Pi,
- Pizzagpt,
- Reka,
- Replicate,
- ReplicateHome,
- Vercel,
- You,
+ Bing,
+ Blackbox,
+ ChatGot,
+ Chatgpt4o,
+ Chatgpt4Online,
+ ChatgptFree,
+ DDG,
+ DeepInfra,
+ DeepInfraImage,
+ FreeChatgpt,
+ FreeGpt,
+ Gemini,
+ GeminiPro,
+ GeminiProChat,
+ GigaChat,
+ HuggingChat,
+ HuggingFace,
+ Koala,
+ Liaobots,
+ Marsyoo,
+ MetaAI,
+ OpenaiChat,
+ PerplexityLabs,
+ Pi,
+ Pizzagpt,
+ Reka,
+ Replicate,
+ ReplicateHome,
+ You,
)
+
@dataclass(unsafe_hash=True)
class Model:
"""
@@ -198,40 +198,40 @@ meta = Model(
best_provider = MetaAI
)
-llama3_8b_instruct = Model(
+llama_3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
-llama3_70b_instruct = Model(
+llama_3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
- best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate, DDG, ReplicateHome])
+ best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
-llama_3_1_70b_Instruct = Model(
- name = "meta-llama/Meta-Llama-3.1-70B-Instruct",
+llama3_70b_instruct = Model(
+ name = "meta/meta-llama-3-70b-instruct",
base_provider = "meta",
- best_provider = IterListProvider([HuggingChat, HuggingFace])
+ best_provider = IterListProvider([ReplicateHome])
)
-llama_3_1_405b_Instruct_FP8 = Model(
- name = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
+llama_3_70b_chat_hf = Model(
+ name = "meta-llama/Llama-3-70b-chat-hf",
base_provider = "meta",
- best_provider = IterListProvider([HuggingChat, HuggingFace])
+ best_provider = IterListProvider([DDG])
)
-codellama_34b_instruct = Model(
- name = "codellama/CodeLlama-34b-Instruct-hf",
+llama_3_1_70b_Instruct = Model(
+ name = "meta-llama/Meta-Llama-3.1-70B-Instruct",
base_provider = "meta",
- best_provider = HuggingChat
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
)
-codellama_70b_instruct = Model(
- name = "codellama/CodeLlama-70b-Instruct-hf",
+llama_3_1_405b_Instruct_FP8 = Model(
+ name = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
base_provider = "meta",
- best_provider = IterListProvider([DeepInfra])
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
)
@@ -499,150 +499,116 @@ class ModelUtils:
"""
convert: dict[str, Model] = {
-############
-### Text ###
-############
-
- ### OpenAI ###
- ### GPT-3.5 / GPT-4 ###
- # gpt-3.5
- 'gpt-3.5-turbo' : gpt_35_turbo,
- 'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
- 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
- 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
- 'gpt-3.5-long': gpt_35_long,
-
- # gpt-4
- 'gpt-4o' : gpt_4o,
- 'gpt-4o-mini' : gpt_4o_mini,
- 'gpt-4' : gpt_4,
- 'gpt-4-0613' : gpt_4_0613,
- 'gpt-4-32k' : gpt_4_32k,
- 'gpt-4-32k-0613' : gpt_4_32k_0613,
- 'gpt-4-turbo' : gpt_4_turbo,
-
-
- ### Meta ###
- "meta-ai": meta,
-
- 'llama3-8b': llama3_8b_instruct, # alias
- 'llama3-70b': llama3_70b_instruct, # alias
- 'llama3-8b-instruct' : llama3_8b_instruct,
- 'llama3-70b-instruct': llama3_70b_instruct,
- 'llama-3.1-70b-Instruct': llama_3_1_70b_Instruct,
- 'llama-3.1-405B-Instruct-FP8': llama_3_1_405b_Instruct_FP8,
+ ############
+ ### Text ###
+ ############
-
- 'codellama-34b-instruct': codellama_34b_instruct,
- 'codellama-70b-instruct': codellama_70b_instruct,
-
-
- ### Mistral (Opensource) ###
- 'mixtral-8x7b': mixtral_8x7b,
- 'mistral-7b-v02': mistral_7b_v02,
+ ### OpenAI ###
+ ### GPT-3.5 / GPT-4 ###
+ # gpt-3.5
+ 'gpt-3.5-turbo': gpt_35_turbo,
+ 'gpt-3.5-long': gpt_35_long,
+
+ # gpt-4
+ 'gpt-4o' : gpt_4o,
+ 'gpt-4o-mini' : gpt_4o_mini,
+ 'gpt-4' : gpt_4,
+ 'gpt-4-turbo' : gpt_4_turbo,
+ ### Meta ###
+ "meta-ai": meta,
- ### NousResearch ###
- 'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
-
-
- ### 01-ai ###
- 'Yi-1.5-34B-Chat': Yi_1_5_34B_Chat,
-
-
- ### Microsoft ###
- 'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
-
-
- ### Google ###
- # gemini
- 'gemini': gemini,
- 'gemini-pro': gemini_pro,
- 'gemini-flash': gemini_flash,
-
- # gemma
- 'gemma-2b-it': gemma_2b_it,
- 'gemma-2-9b-it': gemma_2_9b_it,
- 'gemma-2-27b-it': gemma_2_27b_it,
-
-
- ### Anthropic ###
- 'claude-2': claude_2,
- 'claude-2.0': claude_2_0,
- 'claude-2.1': claude_2_1,
- 'claude-3-opus': claude_3_opus,
- 'claude-3-sonnet': claude_3_sonnet,
- 'claude-3-5-sonnet': claude_3_5_sonnet,
- 'claude-3-haiku': claude_3_haiku,
-
-
- ### Reka AI ###
- 'reka': reka_core,
-
-
- ### NVIDIA ###
- 'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
-
-
- ### Blackbox ###
- 'blackbox': blackbox,
-
-
- ### CohereForAI ###
- 'command-r+': command_r_plus,
+ 'llama-3-8b': llama_3_8b_instruct,
+ 'llama-3-70b': llama_3_70b_instruct,
+ 'llama-3-70b-chat': llama_3_70b_chat_hf,
+ 'llama-3-70b-instruct': llama3_70b_instruct,
+ 'llama-3.1-70b': llama_3_1_70b_Instruct,
+ 'llama-3.1-405b': llama_3_1_405b_Instruct_FP8,
+ ### Mistral (Opensource) ###
+ 'mixtral-8x7b': mixtral_8x7b,
+ 'mistral-7b-v02': mistral_7b_v02,
- ### Databricks ###
- 'dbrx-instruct': dbrx_instruct,
-
+ ### NousResearch ###
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
- ### GigaChat ###
- 'gigachat': gigachat,
-
-
- ### iFlytek ###
- 'SparkDesk-v1.1': SparkDesk_v1_1,
+ ### 01-ai ###
+ 'Yi-1.5-34b': Yi_1_5_34B_Chat,
+ ### Microsoft ###
+ 'Phi-3-mini-4k': Phi_3_mini_4k_instruct,
+
+ ### Google ###
+ # gemini
+ 'gemini': gemini,
+ 'gemini-pro': gemini_pro,
+ 'gemini-flash': gemini_flash,
- ### DeepSeek ###
- 'deepseek-coder': deepseek_coder,
- 'deepseek-chat': deepseek_chat,
+ # gemma
+ 'gemma-2b': gemma_2b_it,
+ 'gemma-2-9b': gemma_2_9b_it,
+ 'gemma-2-27b': gemma_2_27b_it,
+
+ ### Anthropic ###
+ 'claude-2': claude_2,
+ 'claude-2.0': claude_2_0,
+ 'claude-2.1': claude_2_1,
+ 'claude-3-opus': claude_3_opus,
+ 'claude-3-sonnet': claude_3_sonnet,
+ 'claude-3-5-sonnet': claude_3_5_sonnet,
+ 'claude-3-haiku': claude_3_haiku,
+
+ ### Reka AI ###
+ 'reka': reka_core,
+
+ ### NVIDIA ###
+ 'nemotron-4-340b': nemotron_4_340b_instruct,
+ ### Blackbox ###
+ 'blackbox': blackbox,
- ### ### Qwen ### ###
- 'Qwen2-7B-Instruct': Qwen2_7B_Instruct,
+ ### CohereForAI ###
+ 'command-r+': command_r_plus,
+ ### Databricks ###
+ 'dbrx-instruct': dbrx_instruct,
+
+ ### GigaChat ###
+ 'gigachat': gigachat,
- ### Zhipu AI ###
- 'glm4-9B-chat': glm4_9B_chat,
- 'chatglm3-6B': chatglm3_6B,
+ ### iFlytek ###
+ 'SparkDesk-v1.1': SparkDesk_v1_1,
+ ### DeepSeek ###
+ 'deepseek-coder': deepseek_coder,
+ 'deepseek-chat': deepseek_chat,
- ### 01-ai ###
- 'Yi-1.5-9B-Chat': Yi_1_5_9B_Chat,
+ ### Qwen ###
+ 'Qwen2-7b': Qwen2_7B_Instruct,
+ ### Zhipu AI ###
+ 'glm4-9b': glm4_9B_chat,
+ 'chatglm3-6b': chatglm3_6B,
- # Other
- 'pi': pi,
-
+ ### 01-ai ###
+ 'Yi-1.5-9b': Yi_1_5_9B_Chat,
+ # Other
+ 'pi': pi,
-#############
-### Image ###
-#############
-
- ### Stability AI ###
- 'sdxl': sdxl,
- 'stable-diffusion-3': stable_diffusion_3,
+ #############
+ ### Image ###
+ #############
- ### ByteDance ###
- 'sdxl-lightning-4step': sdxl_lightning_4step,
+ ### Stability AI ###
+ 'sdxl': sdxl,
+ 'stable-diffusion-3': stable_diffusion_3,
- ### ByteDance ###
- 'sdxl-lightning-4step': sdxl_lightning_4step,
+ ### ByteDance ###
+ 'sdxl-lightning': sdxl_lightning_4step,
- ### Playground ###
- 'playground-v2.5-1024px-aesthetic': playground_v2_5_1024px_aesthetic,
+ ### Playground ###
+ 'playground-v2.5': playground_v2_5_1024px_aesthetic,
}
--
cgit v1.2.3
From e3fcb87dc7ad0e932a0164e016095e1528a09fb9 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Mon, 29 Jul 2024 21:48:38 +0300
Subject: Adding a new LiteIcoding provider
---
g4f/Provider/LiteIcoding.py | 97 +++++++++++++++++++++++++++++++++++++++++
g4f/models.py | 102 ++++++++++++++++++++------------------------
2 files changed, 143 insertions(+), 56 deletions(-)
create mode 100644 g4f/Provider/LiteIcoding.py
(limited to 'g4f')
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
new file mode 100644
index 00000000..6aa407ca
--- /dev/null
+++ b/g4f/Provider/LiteIcoding.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ClientResponseError
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://lite.icoding.ink"
+ api_endpoint = "/api/v1/gpt/message"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o"
+ models = [
+ 'gpt-4o',
+ 'gpt-4-turbo',
+ 'claude-3',
+ 'claude-3.5',
+ 'gemini-1.5',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Authorization": "Bearer null",
+ "Connection": "keep-alive",
+ "Content-Type": "application/json;charset=utf-8",
+ "DNT": "1",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": (
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/126.0.0.0 Safari/537.36"
+ ),
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ }
+
+ data = {
+ "model": model,
+ "chatId": "-1",
+ "messages": [
+ {
+ "role": msg["role"],
+ "content": msg["content"],
+ "time": msg.get("time", ""),
+ "attachments": msg.get("attachments", []),
+ }
+ for msg in messages
+ ],
+ "plugins": [],
+ "systemPrompt": "",
+ "temperature": 0.5,
+ }
+
+ async with ClientSession(headers=headers) as session:
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ buffer = ""
+ full_response = ""
+ async for chunk in response.content.iter_any():
+ if chunk:
+ buffer += chunk.decode()
+ while "\n\n" in buffer:
+ part, buffer = buffer.split("\n\n", 1)
+ if part.startswith("data: "):
+ content = part[6:].strip()
+ if content and content != "[DONE]":
+ content = content.strip('"')
+ full_response += content
+
+ full_response = full_response.replace('" "', ' ')
+ yield full_response.strip()
+
+ except ClientResponseError as e:
+ raise RuntimeError(
+ f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}"
+ ) from e
+
+ except Exception as e:
+ raise RuntimeError(f"Unexpected error: {str(e)}") from e
diff --git a/g4f/models.py b/g4f/models.py
index bcc50b9f..c963115a 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -25,6 +25,7 @@ from .Provider import (
HuggingFace,
Koala,
Liaobots,
+ LiteIcoding,
Marsyoo,
MetaAI,
OpenaiChat,
@@ -114,24 +115,6 @@ gpt_35_turbo = Model(
])
)
-gpt_35_turbo_16k = Model(
- name = 'gpt-3.5-turbo-16k',
- base_provider = 'openai',
- best_provider = gpt_35_long.best_provider
-)
-
-gpt_35_turbo_16k_0613 = Model(
- name = 'gpt-3.5-turbo-16k-0613',
- base_provider = 'openai',
- best_provider = gpt_35_long.best_provider
-)
-
-gpt_35_turbo_0613 = Model(
- name = 'gpt-3.5-turbo-0613',
- base_provider = 'openai',
- best_provider = gpt_35_turbo.best_provider
-)
-
# gpt-4
gpt_4 = Model(
name = 'gpt-4',
@@ -141,36 +124,18 @@ gpt_4 = Model(
])
)
-gpt_4_0613 = Model(
- name = 'gpt-4-0613',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
-)
-
-gpt_4_32k = Model(
- name = 'gpt-4-32k',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
-)
-
-gpt_4_32k_0613 = Model(
- name = 'gpt-4-32k-0613',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
-)
-
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
- Bing, Liaobots
+ Bing, Liaobots, LiteIcoding
])
)
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
- You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo
+ You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo, LiteIcoding
])
)
@@ -210,7 +175,7 @@ llama_3_70b_instruct = Model(
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
-llama3_70b_instruct = Model(
+llama_3_70b_instruct = Model(
name = "meta/meta-llama-3-70b-instruct",
base_provider = "meta",
best_provider = IterListProvider([ReplicateHome])
@@ -222,13 +187,13 @@ llama_3_70b_chat_hf = Model(
best_provider = IterListProvider([DDG])
)
-llama_3_1_70b_Instruct = Model(
+llama_3_1_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3.1-70B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
-llama_3_1_405b_Instruct_FP8 = Model(
+llama_3_1_405b_instruct_FP8 = Model(
name = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
base_provider = "meta",
best_provider = IterListProvider([HuggingChat, HuggingFace])
@@ -258,7 +223,7 @@ Nous_Hermes_2_Mixtral_8x7B_DPO = Model(
### 01-ai ###
-Yi_1_5_34B_Chat = Model(
+Yi_1_5_34B_chat = Model(
name = "01-ai/Yi-1.5-34B-Chat",
base_provider = "01-ai",
best_provider = IterListProvider([HuggingFace, HuggingChat])
@@ -284,7 +249,7 @@ gemini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google',
- best_provider = IterListProvider([GeminiPro, You, ChatGot, GeminiProChat, Liaobots])
+ best_provider = IterListProvider([GeminiPro, You, ChatGot, GeminiProChat, Liaobots, LiteIcoding])
)
gemini_flash = Model(
@@ -293,6 +258,12 @@ gemini_flash = Model(
best_provider = IterListProvider([Liaobots])
)
+gemini_1_5 = Model(
+ name = 'gemini-1.5',
+ base_provider = 'Google',
+ best_provider = IterListProvider([LiteIcoding])
+)
+
# gemma
gemma_2b_it = Model(
name = 'gemma-2b-it',
@@ -356,6 +327,18 @@ claude_3_haiku = Model(
best_provider = IterListProvider([DDG, AI365VIP, Liaobots])
)
+claude_3 = Model(
+ name = 'claude-3',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([LiteIcoding])
+)
+
+claude_3_5 = Model(
+ name = 'claude-3.5',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([LiteIcoding])
+)
+
### Reka AI ###
reka_core = Model(
@@ -420,7 +403,7 @@ deepseek_chat = Model(
### Qwen ###
-Qwen2_7B_Instruct = Model(
+Qwen2_7B_instruct = Model(
name = 'Qwen2-7B-Instruct',
base_provider = 'Qwen',
best_provider = IterListProvider([FreeChatgpt])
@@ -442,7 +425,7 @@ chatglm3_6B = Model(
### 01-ai ###
-Yi_1_5_9B_Chat = Model(
+Yi_1_5_9B_chat = Model(
name = 'Yi-1.5-9B-Chat',
base_provider = '01-ai',
best_provider = IterListProvider([FreeChatgpt])
@@ -518,12 +501,12 @@ class ModelUtils:
### Meta ###
"meta-ai": meta,
- 'llama-3-8b': llama_3_8b_instruct,
- 'llama-3-70b': llama_3_70b_instruct,
+ 'llama-3-8b-instruct': llama_3_8b_instruct,
+ 'llama-3-70b-instruct': llama_3_70b_instruct,
'llama-3-70b-chat': llama_3_70b_chat_hf,
- 'llama-3-70b-instruct': llama3_70b_instruct,
- 'llama-3.1-70b': llama_3_1_70b_Instruct,
- 'llama-3.1-405b': llama_3_1_405b_Instruct_FP8,
+ 'llama-3-70b-instruct': llama_3_70b_instruct,
+ 'llama-3.1-70b-instruct': llama_3_1_70b_instruct,
+ 'llama-3.1-405b-instruct': llama_3_1_405b_instruct_FP8,
### Mistral (Opensource) ###
'mixtral-8x7b': mixtral_8x7b,
@@ -533,15 +516,16 @@ class ModelUtils:
'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
### 01-ai ###
- 'Yi-1.5-34b': Yi_1_5_34B_Chat,
+ 'Yi-1.5-34b-chat': Yi_1_5_34B_chat,
### Microsoft ###
- 'Phi-3-mini-4k': Phi_3_mini_4k_instruct,
+ 'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
### Google ###
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
+ 'gemini-pro': gemini_1_5,
'gemini-flash': gemini_flash,
# gemma
@@ -553,16 +537,22 @@ class ModelUtils:
'claude-2': claude_2,
'claude-2.0': claude_2_0,
'claude-2.1': claude_2_1,
+
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-5-sonnet': claude_3_5_sonnet,
'claude-3-haiku': claude_3_haiku,
+
+ 'claude-3-opus': claude_3,
+ 'claude-3-5-sonnet': claude_3_5,
+
+
### Reka AI ###
'reka': reka_core,
### NVIDIA ###
- 'nemotron-4-340b': nemotron_4_340b_instruct,
+ 'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
### Blackbox ###
'blackbox': blackbox,
@@ -584,14 +574,14 @@ class ModelUtils:
'deepseek-chat': deepseek_chat,
### Qwen ###
- 'Qwen2-7b': Qwen2_7B_Instruct,
+ 'Qwen2-7b-instruct': Qwen2_7B_instruct,
### Zhipu AI ###
- 'glm4-9b': glm4_9B_chat,
+ 'glm4-9b-chat': glm4_9B_chat,
'chatglm3-6b': chatglm3_6B,
### 01-ai ###
- 'Yi-1.5-9b': Yi_1_5_9B_Chat,
+ 'Yi-1.5-9b-chat': Yi_1_5_9B_chat,
# Other
'pi': pi,
--
cgit v1.2.3
From 1a9cbedf562afa4ef2bed2e33641686e20a8a464 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Mon, 29 Jul 2024 21:59:53 +0300
Subject: Update g4f/Provider/__init__.py
---
g4f/Provider/__init__.py | 2 ++
1 file changed, 2 insertions(+)
(limited to 'g4f')
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index c47ae823..f7f15a8e 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -33,8 +33,10 @@ from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
+from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
+from .LiteIcoding import LiteIcoding
from .Llama import Llama
from .Local import Local
from .Marsyoo import Marsyoo
--
cgit v1.2.3
From 0204ffd2b8947404cd908b70e63e18607cc95e31 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Tue, 30 Jul 2024 01:15:36 +0300
Subject: Adding a new FreeNetfly provider
---
g4f/Provider/FreeNetfly.py | 107 +++++++++++++++++++++++++++++++++++++++++++++
g4f/Provider/__init__.py | 1 +
g4f/models.py | 3 +-
3 files changed, 110 insertions(+), 1 deletion(-)
create mode 100644 g4f/Provider/FreeNetfly.py
(limited to 'g4f')
diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/FreeNetfly.py
new file mode 100644
index 00000000..624f33cf
--- /dev/null
+++ b/g4f/Provider/FreeNetfly.py
@@ -0,0 +1,107 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, ClientTimeout, ClientError
+from typing import AsyncGenerator
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://free.netfly.top"
+ api_endpoint = "/api/openai/v1/chat/completions"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ data = {
+ "messages": messages,
+ "stream": True,
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
+ }
+
+ max_retries = 3
+ retry_delay = 1
+
+ for attempt in range(max_retries):
+ try:
+ async with ClientSession(headers=headers) as session:
+ timeout = ClientTimeout(total=60)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
+ response.raise_for_status()
+ async for chunk in cls._process_response(response):
+ yield chunk
+ return # If successful, exit the function
+ except (ClientError, asyncio.TimeoutError) as e:
+ if attempt == max_retries - 1:
+ raise # If all retries failed, raise the last exception
+ await asyncio.sleep(retry_delay)
+ retry_delay *= 2 # Exponential backoff
+
+ @classmethod
+ async def _process_response(cls, response) -> AsyncGenerator[str, None]:
+ buffer = ""
+ async for line in response.content:
+ buffer += line.decode('utf-8')
+ if buffer.endswith('\n\n'):
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: '):
+ if subline == 'data: [DONE]':
+ return
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ print(f"Failed to parse JSON: {subline}")
+ except KeyError:
+ print(f"Unexpected JSON structure: {data}")
+ buffer = ""
+
+ # Process any remaining data in the buffer
+ if buffer:
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: ') and subline != 'data: [DONE]':
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except (json.JSONDecodeError, KeyError):
+ pass
+
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index f7f15a8e..0c512060 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -27,6 +27,7 @@ from .DeepInfraImage import DeepInfraImage
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
+from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
from .GigaChat import GigaChat
diff --git a/g4f/models.py b/g4f/models.py
index c963115a..0ad87514 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -17,6 +17,7 @@ from .Provider import (
DeepInfraImage,
FreeChatgpt,
FreeGpt,
+ FreeNetfly,
Gemini,
GeminiPro,
GeminiProChat,
@@ -143,7 +144,7 @@ gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'openai',
best_provider = IterListProvider([
- Liaobots, OpenaiChat, You,
+ Liaobots, OpenaiChat, You, FreeNetfly
])
)
--
cgit v1.2.3
From 93fa8960fead232b0bf9d44e0d2c5bcfbf5726f8 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Thu, 1 Aug 2024 01:32:50 +0300
Subject: Added new providers TeachAnything MagickPenAsk MagickPenChat
---
g4f/Provider/MagickPenAsk.py | 50 ++++++++++++++++++++++++++++++++++
g4f/Provider/MagickPenChat.py | 50 ++++++++++++++++++++++++++++++++++
g4f/Provider/TeachAnything.py | 62 +++++++++++++++++++++++++++++++++++++++++++
g4f/Provider/__init__.py | 3 +++
g4f/models.py | 10 ++++---
5 files changed, 171 insertions(+), 4 deletions(-)
create mode 100644 g4f/Provider/MagickPenAsk.py
create mode 100644 g4f/Provider/MagickPenChat.py
create mode 100644 g4f/Provider/TeachAnything.py
(limited to 'g4f')
diff --git a/g4f/Provider/MagickPenAsk.py b/g4f/Provider/MagickPenAsk.py
new file mode 100644
index 00000000..54058228
--- /dev/null
+++ b/g4f/Provider/MagickPenAsk.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.magickpen.com"
+ api_endpoint = "/ask"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://magickpen.com",
+ "priority": "u=1, i",
+ "referer": "https://magickpen.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "query": format_prompt(messages),
+ "plan": "Pay as you go"
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/MagickPenChat.py b/g4f/Provider/MagickPenChat.py
new file mode 100644
index 00000000..6c30028a
--- /dev/null
+++ b/g4f/Provider/MagickPenChat.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.magickpen.com"
+ api_endpoint = "/chat/free"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o-mini"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "accept-language": "en-US,en;q=0.9",
+ "access-control-allow-origin": "*",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://magickpen.com",
+ "priority": "u=1, i",
+ "referer": "https://magickpen.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "history": [{"role": "user", "content": format_prompt(messages)}]
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/TeachAnything.py b/g4f/Provider/TeachAnything.py
new file mode 100644
index 00000000..908dd56e
--- /dev/null
+++ b/g4f/Provider/TeachAnything.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+from typing import Any, Dict
+
+from aiohttp import ClientSession, ClientTimeout
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.teach-anything.com"
+ api_endpoint = "/api/generate"
+ working = True
+ default_model = "llama-3-70b-instruct"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str | None = None,
+ **kwargs: Any
+ ) -> AsyncResult:
+ headers = cls._get_headers()
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {"prompt": prompt}
+
+ timeout = ClientTimeout(total=60)
+
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ json=data,
+ proxy=proxy,
+ timeout=timeout
+ ) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
+
+ @staticmethod
+ def _get_headers() -> Dict[str, str]:
+ return {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://www.teach-anything.com",
+ "priority": "u=1, i",
+ "referer": "https://www.teach-anything.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 0c512060..8bbf71b3 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -40,6 +40,8 @@ from .Liaobots import Liaobots
from .LiteIcoding import LiteIcoding
from .Llama import Llama
from .Local import Local
+from .MagickPenAsk import MagickPenAsk
+from .MagickPenChat import MagickPenChat
from .Marsyoo import Marsyoo
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
@@ -50,6 +52,7 @@ from .Pizzagpt import Pizzagpt
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
+from .TeachAnything import TeachAnything
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
diff --git a/g4f/models.py b/g4f/models.py
index 0ad87514..b6b54792 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -27,6 +27,8 @@ from .Provider import (
Koala,
Liaobots,
LiteIcoding,
+ MagickPenAsk,
+ MagickPenChat,
Marsyoo,
MetaAI,
OpenaiChat,
@@ -36,6 +38,7 @@ from .Provider import (
Reka,
Replicate,
ReplicateHome,
+ TeachAnything,
You,
)
@@ -109,7 +112,6 @@ gpt_35_turbo = Model(
Koala,
ChatgptFree,
FreeChatgpt,
- DDG,
AI365VIP,
Pizzagpt,
Allyfy,
@@ -136,7 +138,7 @@ gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
- You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo, LiteIcoding
+ You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo, LiteIcoding, MagickPenAsk,
])
)
@@ -144,7 +146,7 @@ gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'openai',
best_provider = IterListProvider([
- Liaobots, OpenaiChat, You, FreeNetfly
+ DDG, Liaobots, OpenaiChat, You, FreeNetfly, MagickPenChat,
])
)
@@ -179,7 +181,7 @@ llama_3_70b_instruct = Model(
llama_3_70b_instruct = Model(
name = "meta/meta-llama-3-70b-instruct",
base_provider = "meta",
- best_provider = IterListProvider([ReplicateHome])
+ best_provider = IterListProvider([ReplicateHome, TeachAnything])
)
llama_3_70b_chat_hf = Model(
--
cgit v1.2.3