diff options
author | kqlio67 <kqlio67@users.noreply.github.com> | 2024-11-06 16:25:09 +0100 |
---|---|---|
committer | kqlio67 <kqlio67@users.noreply.github.com> | 2024-11-06 16:25:09 +0100 |
commit | e98793d0a7af43878cf023fb045dd945a82507cf (patch) | |
tree | 205f2318755db4c7ad41a6d13e735c5d48e1450b /g4f/Provider | |
parent | Update (g4f/Provider/DeepInfra.py g4f/Provider/__init__.py g4f/Provider/needs_auth/) (diff) | |
download | gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.gz gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.bz2 gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.lz gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.xz gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.zst gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/DarkAI.py | 10 | ||||
-rw-r--r-- | g4f/Provider/DeepInfraChat.py | 57 | ||||
-rw-r--r-- | g4f/Provider/Editee.py | 77 | ||||
-rw-r--r-- | g4f/Provider/Free2GPT.py | 8 | ||||
-rw-r--r-- | g4f/Provider/FreeChatgpt.py | 96 | ||||
-rw-r--r-- | g4f/Provider/FreeGpt.py | 2 | ||||
-rw-r--r-- | g4f/Provider/Liaobots.py | 23 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 23 | ||||
-rw-r--r-- | g4f/Provider/deprecated/__init__.py | 3 | ||||
-rw-r--r-- | g4f/Provider/local/Local.py (renamed from g4f/Provider/Local.py) | 12 | ||||
-rw-r--r-- | g4f/Provider/local/Ollama.py (renamed from g4f/Provider/Ollama.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/local/__init__.py | 2 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/DeepInfraImage.py (renamed from g4f/Provider/DeepInfraImage.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/HuggingFace.py (renamed from g4f/Provider/HuggingFace.py) | 12 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/MetaAI.py (renamed from g4f/Provider/MetaAI.py) | 12 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/MetaAIAccount.py (renamed from g4f/Provider/MetaAIAccount.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/OpenRouter.py | 32 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/Replicate.py (renamed from g4f/Provider/Replicate.py) | 14 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/__init__.py | 8 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/gigachat/GigaChat.py (renamed from g4f/Provider/gigachat/GigaChat.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/gigachat/__init__.py (renamed from g4f/Provider/gigachat/__init__.py) | 0 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt (renamed from g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt) | 0 | ||||
-rw-r--r-- | g4f/Provider/not_working/Ai4Chat.py (renamed from g4f/Provider/Ai4Chat.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/not_working/AiChatOnline.py (renamed from g4f/Provider/AiChatOnline.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/not_working/AiChats.py (renamed from g4f/Provider/AiChats.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/AmigoChat.py (renamed from g4f/Provider/AmigoChat.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/Aura.py (renamed from g4f/Provider/Aura.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/Chatgpt4o.py (renamed from g4f/Provider/Chatgpt4o.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/ChatgptFree.py (renamed from g4f/Provider/ChatgptFree.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/FlowGpt.py (renamed from g4f/Provider/FlowGpt.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/FreeNetfly.py (renamed from g4f/Provider/FreeNetfly.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/not_working/GPROChat.py (renamed from g4f/Provider/GPROChat.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/not_working/Koala.py (renamed from g4f/Provider/Koala.py) | 10 | ||||
-rw-r--r-- | g4f/Provider/not_working/MyShell.py (renamed from g4f/Provider/selenium/MyShell.py) | 0 | ||||
-rw-r--r-- | g4f/Provider/not_working/__init__.py | 12 | ||||
-rw-r--r-- | g4f/Provider/selenium/__init__.py | 1 |
36 files changed, 135 insertions, 381 deletions
diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py index 6ffb615e..54f456fe 100644 --- a/g4f/Provider/DarkAI.py +++ b/g4f/Provider/DarkAI.py @@ -9,19 +9,19 @@ from .helper import format_prompt class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.aiuncensored.info" + url = "https://darkai.foundation/chat" api_endpoint = "https://darkai.foundation/chat" working = True supports_stream = True supports_system_message = True supports_message_history = True - default_model = 'gpt-4o' + default_model = 'llama-3-405b' models = [ - default_model, # Uncensored + 'gpt-4o', # Uncensored 'gpt-3.5-turbo', # Uncensored 'llama-3-70b', # Uncensored - 'llama-3-405b', + default_model, ] model_aliases = { @@ -51,8 +51,6 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): headers = { "accept": "text/event-stream", "content-type": "application/json", - "origin": "https://www.aiuncensored.info", - "referer": "https://www.aiuncensored.info/", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" } async with ClientSession(headers=headers) as session: diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py index b8cc6ab8..5c668599 100644 --- a/g4f/Provider/DeepInfraChat.py +++ b/g4f/Provider/DeepInfraChat.py @@ -6,7 +6,6 @@ import json from ..typing import AsyncResult, Messages, ImageType from ..image import to_data_uri from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): @@ -17,42 +16,18 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): supports_system_message = True supports_message_history = True - default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct' + default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' models = [ - 'meta-llama/Meta-Llama-3.1-405B-Instruct', - 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'meta-llama/Meta-Llama-3.1-8B-Instruct', - 'mistralai/Mixtral-8x22B-Instruct-v0.1', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', + default_model, 'microsoft/WizardLM-2-8x22B', - 'microsoft/WizardLM-2-7B', - 'Qwen/Qwen2-72B-Instruct', - 'microsoft/Phi-3-medium-4k-instruct', - 'google/gemma-2-27b-it', - 'openbmb/MiniCPM-Llama3-V-2_5', # Image upload is available - 'mistralai/Mistral-7B-Instruct-v0.3', - 'lizpreciatior/lzlv_70b_fp16_hf', - 'openchat/openchat-3.6-8b', - 'Phind/Phind-CodeLlama-34B-v2', - 'cognitivecomputations/dolphin-2.9.1-llama-3-70b', + 'Qwen/Qwen2.5-72B-Instruct', ] model_aliases = { - "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct", - "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", - "llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct", - "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1", - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", - "wizardlm-2-7b": "microsoft/WizardLM-2-7B", - "qwen-2-72b": "Qwen/Qwen2-72B-Instruct", - "phi-3-medium-4k": "microsoft/Phi-3-medium-4k-instruct", - "gemma-2b-27b": "google/gemma-2-27b-it", - "minicpm-llama-3-v2.5": "openbmb/MiniCPM-Llama3-V-2_5", # Image upload is available - "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", - "lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf", - "openchat-3.6-8b": "openchat/openchat-3.6-8b", - "phind-codellama-34b-v2": "Phind/Phind-CodeLlama-34B-v2", - "dolphin-2.9.1-llama-3-70b": "cognitivecomputations/dolphin-2.9.1-llama-3-70b", + "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", } @@ -97,30 +72,12 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): } async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) data = { 'model': model, - 'messages': [ - {'role': 'system', 'content': 'Be a helpful assistant'}, - {'role': 'user', 'content': prompt} - ], + 'messages': messages, 'stream': True } - if model == 'openbmb/MiniCPM-Llama3-V-2_5' and image is not None: - data['messages'][-1]['content'] = [ - { - 'type': 'image_url', - 'image_url': { - 'url': to_data_uri(image) - } - }, - { - 'type': 'text', - 'text': messages[-1]['content'] - } - ] - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: response.raise_for_status() async for line in response.content: diff --git a/g4f/Provider/Editee.py b/g4f/Provider/Editee.py deleted file mode 100644 index 8ac2324a..00000000 --- a/g4f/Provider/Editee.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class Editee(AsyncGeneratorProvider, ProviderModelMixin): - label = "Editee" - url = "https://editee.com" - api_endpoint = "https://editee.com/submit/chatgptfree" - working = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'claude' - models = ['claude', 'gpt4', 'gemini' 'mistrallarge'] - - model_aliases = { - "claude-3.5-sonnet": "claude", - "gpt-4o": "gpt4", - "gemini-pro": "gemini", - "mistral-large": "mistrallarge", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "Accept": "application/json, text/plain, */*", - "Accept-Language": "en-US,en;q=0.9", - "Cache-Control": "no-cache", - "Content-Type": "application/json", - "Origin": cls.url, - "Pragma": "no-cache", - "Priority": "u=1, i", - "Referer": f"{cls.url}/chat-gpt", - "Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"', - "Sec-CH-UA-Mobile": '?0', - "Sec-CH-UA-Platform": '"Linux"', - "Sec-Fetch-Dest": 'empty', - "Sec-Fetch-Mode": 'cors', - "Sec-Fetch-Site": 'same-origin', - "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', - "X-Requested-With": 'XMLHttpRequest', - } - - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "user_input": prompt, - "context": " ", - "template_id": "", - "selected_model": model - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - response_data = await response.json() - yield response_data['text'] diff --git a/g4f/Provider/Free2GPT.py b/g4f/Provider/Free2GPT.py index a79bd1da..6ba9ac0f 100644 --- a/g4f/Provider/Free2GPT.py +++ b/g4f/Provider/Free2GPT.py @@ -16,7 +16,7 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat10.free2gpt.xyz" working = True supports_message_history = True - default_model = 'llama-3.1-70b' + default_model = 'mistral-7b' @classmethod async def create_async_generator( @@ -49,12 +49,8 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): connector=get_connector(connector, proxy), headers=headers ) as session: timestamp = int(time.time() * 1e3) - system_message = { - "role": "system", - "content": "" - } data = { - "messages": [system_message] + messages, + "messages": messages, "time": timestamp, "pass": None, "sign": generate_signature(timestamp, messages[-1]["content"]), diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py deleted file mode 100644 index a9dc0f56..00000000 --- a/g4f/Provider/FreeChatgpt.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations -import json -from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://chat.chatgpt.org.uk" - api_endpoint = "/api/openai/v1/chat/completions" - working = True - default_model = '@cf/qwen/qwen1.5-14b-chat-awq' - models = [ - '@cf/qwen/qwen1.5-14b-chat-awq', - 'SparkDesk-v1.1', - 'Qwen2-7B-Instruct', - 'glm4-9B-chat', - 'chatglm3-6B', - 'Yi-1.5-9B-Chat', - ] - - model_aliases = { - "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq", - "sparkdesk-v1.1": "SparkDesk-v1.1", - "qwen-2-7b": "Qwen2-7B-Instruct", - "glm-4-9b": "glm4-9B-chat", - "glm-3-6b": "chatglm3-6B", - "yi-1.5-9b": "Yi-1.5-9B-Chat", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model.lower() in cls.model_aliases: - return cls.model_aliases[model.lower()] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "application/json, text/event-stream", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "dnt": "1", - "origin": cls.url, - "referer": f"{cls.url}/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "messages": [ - {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"}, - {"role": "user", "content": prompt} - ], - "stream": True, - "model": model, - "temperature": 0.5, - "presence_penalty": 0, - "frequency_penalty": 0, - "top_p": 1 - } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: - response.raise_for_status() - accumulated_text = "" - async for line in response.content: - if line: - line_str = line.decode().strip() - if line_str == "data: [DONE]": - yield accumulated_text - break - elif line_str.startswith("data: "): - try: - chunk = json.loads(line_str[6:]) - delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "") - accumulated_text += delta_content - yield delta_content # Yield each chunk of content - except json.JSONDecodeError: - pass diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 82a3824b..b38ff428 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -24,7 +24,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_message_history = True supports_system_message = True - default_model = 'llama-3.1-70b' + default_model = 'gemini-pro' @classmethod async def create_async_generator( diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 56f765de..addd3ed7 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -63,6 +63,15 @@ models = { "tokenLimit": 126000, "context": "128K", }, + "grok-beta": { + "id": "grok-beta", + "name": "Grok-Beta", + "model": "Grok", + "provider": "x.ai", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "100K", + }, "grok-2": { "id": "grok-2", "name": "Grok-2", @@ -99,18 +108,18 @@ models = { "tokenLimit": 200000, "context": "200K", }, - "claude-3-opus-20240229-gcp": { - "id": "claude-3-opus-20240229-gcp", - "name": "Claude-3-Opus-Gcp", + "claude-3-5-sonnet-20240620": { + "id": "claude-3-5-sonnet-20240620", + "name": "Claude-3.5-Sonnet", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, "tokenLimit": 200000, "context": "200K", }, - "claude-3-5-sonnet-20240620": { - "id": "claude-3-5-sonnet-20240620", - "name": "Claude-3.5-Sonnet", + "claude-3-5-sonnet-20241022": { + "id": "claude-3-5-sonnet-20241022", + "name": "Claude-3.5-Sonnet-V2", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, @@ -183,9 +192,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): "claude-3-opus": "claude-3-opus-20240229", "claude-3-opus": "claude-3-opus-20240229-aws", - "claude-3-opus": "claude-3-opus-20240229-gcp", "claude-3-sonnet": "claude-3-sonnet-20240229", "claude-3.5-sonnet": "claude-3-5-sonnet-20240620", + "claude-3.5-sonnet": "claude-3-5-sonnet-20241022", "claude-3-haiku": "claude-3-haiku-20240307", "claude-2.1": "claude-2.1", diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 55fabd25..f297f4dc 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -8,59 +8,40 @@ from ..providers.create_images import CreateImagesProvider from .deprecated import * from .selenium import * from .needs_auth import * +from .not_working import * +from .local import * -from .gigachat import * from .nexra import * -from .Ai4Chat import Ai4Chat from .AI365VIP import AI365VIP from .AIChatFree import AIChatFree from .AIUncensored import AIUncensored from .Allyfy import Allyfy -from .AmigoChat import AmigoChat -from .AiChatOnline import AiChatOnline -from .AiChats import AiChats from .AiMathGPT import AiMathGPT from .Airforce import Airforce -from .Aura import Aura from .Bing import Bing from .BingCreateImages import BingCreateImages from .Blackbox import Blackbox from .ChatGpt import ChatGpt from .Chatgpt4Online import Chatgpt4Online -from .Chatgpt4o import Chatgpt4o from .ChatGptEs import ChatGptEs -from .ChatgptFree import ChatgptFree from .ChatifyAI import ChatifyAI from .Cloudflare import Cloudflare from .DarkAI import DarkAI from .DDG import DDG from .DeepInfraChat import DeepInfraChat -from .DeepInfraImage import DeepInfraImage -from .Editee import Editee -from .FlowGpt import FlowGpt from .Free2GPT import Free2GPT -from .FreeChatgpt import FreeChatgpt from .FreeGpt import FreeGpt -from .FreeNetfly import FreeNetfly from .GeminiPro import GeminiPro from .GizAI import GizAI -from .GPROChat import GPROChat from .HuggingChat import HuggingChat -from .HuggingFace import HuggingFace -from .Koala import Koala from .Liaobots import Liaobots -from .Local import Local from .MagickPen import MagickPen -from .MetaAI import MetaAI -#from .MetaAIAccount import MetaAIAccount -from .Ollama import Ollama from .PerplexityLabs import PerplexityLabs from .Pi import Pi from .Pizzagpt import Pizzagpt from .Prodia import Prodia from .Reka import Reka -from .Replicate import Replicate from .ReplicateHome import ReplicateHome from .RubiksAI import RubiksAI from .TeachAnything import TeachAnything diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index bf923f2a..368a71a0 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -25,11 +25,10 @@ from .Aichat import Aichat from .Berlin import Berlin from .Phind import Phind from .AiAsk import AiAsk -from ..AiChatOnline import AiChatOnline from .ChatAnywhere import ChatAnywhere from .FakeGpt import FakeGpt from .GeekGpt import GeekGpt from .GPTalk import GPTalk from .Hashnode import Hashnode from .Ylokh import Ylokh -from .OpenAssistant import OpenAssistant
\ No newline at end of file +from .OpenAssistant import OpenAssistant diff --git a/g4f/Provider/Local.py b/g4f/Provider/local/Local.py index 471231c6..4dc6e3f9 100644 --- a/g4f/Provider/Local.py +++ b/g4f/Provider/local/Local.py @@ -1,15 +1,15 @@ from __future__ import annotations -from ..locals.models import get_models +from ...locals.models import get_models try: - from ..locals.provider import LocalProvider + from ...locals.provider import LocalProvider has_requirements = True except ImportError: has_requirements = False -from ..typing import Messages, CreateResult -from ..providers.base_provider import AbstractProvider, ProviderModelMixin -from ..errors import MissingRequirementsError +from ...typing import Messages, CreateResult +from ...providers.base_provider import AbstractProvider, ProviderModelMixin +from ...errors import MissingRequirementsError class Local(AbstractProvider, ProviderModelMixin): label = "GPT4All" @@ -40,4 +40,4 @@ class Local(AbstractProvider, ProviderModelMixin): messages, stream, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/local/Ollama.py index f9116541..c503a46a 100644 --- a/g4f/Provider/Ollama.py +++ b/g4f/Provider/local/Ollama.py @@ -3,8 +3,8 @@ from __future__ import annotations import requests import os -from .needs_auth.Openai import Openai -from ..typing import AsyncResult, Messages +from ..needs_auth.Openai import Openai +from ...typing import AsyncResult, Messages class Ollama(Openai): label = "Ollama" @@ -37,4 +37,4 @@ class Ollama(Openai): api_base: str = f"http://{host}:{port}/v1" return super().create_async_generator( model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/local/__init__.py b/g4f/Provider/local/__init__.py new file mode 100644 index 00000000..05f6022e --- /dev/null +++ b/g4f/Provider/local/__init__.py @@ -0,0 +1,2 @@ +from .Local import Local +from .Ollama import Ollama diff --git a/g4f/Provider/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py index cee608ce..2310c1c8 100644 --- a/g4f/Provider/DeepInfraImage.py +++ b/g4f/Provider/needs_auth/DeepInfraImage.py @@ -2,10 +2,10 @@ from __future__ import annotations import requests -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..typing import AsyncResult, Messages -from ..requests import StreamSession, raise_for_status -from ..image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ...requests import StreamSession, raise_for_status +from ...image import ImageResponse class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): url = "https://deepinfra.com" diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index 586e5f5f..ecc75d1c 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -3,13 +3,13 @@ from __future__ import annotations import json from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_connector -from ..errors import RateLimitError, ModelNotFoundError -from ..requests.raise_for_status import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_connector +from ...errors import RateLimitError, ModelNotFoundError +from ...requests.raise_for_status import raise_for_status -from .HuggingChat import HuggingChat +from ..HuggingChat import HuggingChat class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/chat" diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py index 218b7ebb..4b730abd 100644 --- a/g4f/Provider/MetaAI.py +++ b/g4f/Provider/needs_auth/MetaAI.py @@ -8,12 +8,12 @@ from typing import Dict, List from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages, Cookies -from ..requests import raise_for_status, DEFAULT_HEADERS -from ..image import ImageResponse, ImagePreview -from ..errors import ResponseError -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, get_connector, format_cookies +from ...typing import AsyncResult, Messages, Cookies +from ...requests import raise_for_status, DEFAULT_HEADERS +from ...image import ImageResponse, ImagePreview +from ...errors import ResponseError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, get_connector, format_cookies class Sources(): def __init__(self, link_list: List[Dict[str, str]]) -> None: diff --git a/g4f/Provider/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py index 369b3f2f..2d54f3e0 100644 --- a/g4f/Provider/MetaAIAccount.py +++ b/g4f/Provider/needs_auth/MetaAIAccount.py @@ -1,8 +1,8 @@ from __future__ import annotations -from ..typing import AsyncResult, Messages, Cookies -from .helper import format_prompt, get_cookies -from .MetaAI import MetaAI +from ...typing import AsyncResult, Messages, Cookies +from ..helper import format_prompt, get_cookies +from ..MetaAI import MetaAI class MetaAIAccount(MetaAI): needs_auth = True @@ -20,4 +20,4 @@ class MetaAIAccount(MetaAI): ) -> AsyncResult: cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies async for chunk in cls(proxy).prompt(format_prompt(messages), cookies): - yield chunk
\ No newline at end of file + yield chunk diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py deleted file mode 100644 index 5e0bf336..00000000 --- a/g4f/Provider/needs_auth/OpenRouter.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import annotations - -import requests - -from .Openai import Openai -from ...typing import AsyncResult, Messages - -class OpenRouter(Openai): - label = "OpenRouter" - url = "https://openrouter.ai" - working = False - default_model = "mistralai/mistral-7b-instruct:free" - - @classmethod - def get_models(cls): - if not cls.models: - url = 'https://openrouter.ai/api/v1/models' - models = requests.get(url).json()["data"] - cls.models = [model['id'] for model in models] - return cls.models - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - api_base: str = "https://openrouter.ai/api/v1", - **kwargs - ) -> AsyncResult: - return super().create_async_generator( - model, messages, api_base=api_base, **kwargs - ) diff --git a/g4f/Provider/Replicate.py b/g4f/Provider/needs_auth/Replicate.py index 7ff8ad65..ec993aa4 100644 --- a/g4f/Provider/Replicate.py +++ b/g4f/Provider/needs_auth/Replicate.py @@ -1,11 +1,11 @@ from __future__ import annotations -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, filter_none -from ..typing import AsyncResult, Messages -from ..requests import raise_for_status -from ..requests.aiohttp import StreamSession -from ..errors import ResponseError, MissingAuthError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, filter_none +from ...typing import AsyncResult, Messages +from ...requests import raise_for_status +from ...requests.aiohttp import StreamSession +from ...errors import ResponseError, MissingAuthError class Replicate(AsyncGeneratorProvider, ProviderModelMixin): url = "https://replicate.com" @@ -85,4 +85,4 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin): if new_text: yield new_text else: - yield "\n"
\ No newline at end of file + yield "\n" diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index aa3547a5..0626a837 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -1,4 +1,7 @@ +from .gigachat import * + from .DeepInfra import DeepInfra +from .DeepInfraImage import DeepInfraImage from .Gemini import Gemini from .Raycast import Raycast from .Theb import Theb @@ -7,6 +10,9 @@ from .OpenaiChat import OpenaiChat from .Poe import Poe from .Openai import Openai from .Groq import Groq -from .OpenRouter import OpenRouter #from .OpenaiAccount import OpenaiAccount from .PerplexityApi import PerplexityApi +from .Replicate import Replicate +from .MetaAI import MetaAI +#from .MetaAIAccount import MetaAIAccount +from .HuggingFace import HuggingFace diff --git a/g4f/Provider/gigachat/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py index b1b293e3..c9f1c011 100644 --- a/g4f/Provider/gigachat/GigaChat.py +++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py @@ -9,10 +9,10 @@ import json from aiohttp import ClientSession, TCPConnector, BaseConnector from g4f.requests import raise_for_status -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ...errors import MissingAuthError -from ..helper import get_connector +from ....typing import AsyncResult, Messages +from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ....errors import MissingAuthError +from ...helper import get_connector access_token = "" token_expires_at = 0 diff --git a/g4f/Provider/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py index c9853742..c9853742 100644 --- a/g4f/Provider/gigachat/__init__.py +++ b/g4f/Provider/needs_auth/gigachat/__init__.py diff --git a/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt index 4c143a21..4c143a21 100644 --- a/g4f/Provider/gigachat/russian_trusted_root_ca_pem.crt +++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt diff --git a/g4f/Provider/Ai4Chat.py b/g4f/Provider/not_working/Ai4Chat.py index 1096279d..584c878a 100644 --- a/g4f/Provider/Ai4Chat.py +++ b/g4f/Provider/not_working/Ai4Chat.py @@ -5,9 +5,9 @@ import re import logging from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin): diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py index 26aacef6..02574501 100644 --- a/g4f/Provider/AiChatOnline.py +++ b/g4f/Provider/not_working/AiChatOnline.py @@ -3,9 +3,9 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_string, format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_string, format_prompt class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin): site_url = "https://aichatonline.org" diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/not_working/AiChats.py index 7ff25639..51a85c91 100644 --- a/g4f/Provider/AiChats.py +++ b/g4f/Provider/not_working/AiChats.py @@ -3,10 +3,10 @@ from __future__ import annotations import json import base64 from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...image import ImageResponse +from ..helper import format_prompt class AiChats(AsyncGeneratorProvider, ProviderModelMixin): url = "https://ai-chats.org" diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/not_working/AmigoChat.py index b086d5e1..274a5e14 100644 --- a/g4f/Provider/AmigoChat.py +++ b/g4f/Provider/not_working/AmigoChat.py @@ -4,10 +4,10 @@ import json import uuid from aiohttp import ClientSession, ClientTimeout, ClientResponseError -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt -from ..image import ImageResponse +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt +from ...image import ImageResponse class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin): url = "https://amigochat.io/chat/" diff --git a/g4f/Provider/Aura.py b/g4f/Provider/not_working/Aura.py index e2c56754..e841d909 100644 --- a/g4f/Provider/Aura.py +++ b/g4f/Provider/not_working/Aura.py @@ -2,10 +2,10 @@ from __future__ import annotations from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from ..requests import get_args_from_browser -from ..webdriver import WebDriver +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ...requests import get_args_from_browser +from ...webdriver import WebDriver class Aura(AsyncGeneratorProvider): url = "https://openchat.team" diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py index 7730fc84..61ccaa16 100644 --- a/g4f/Provider/Chatgpt4o.py +++ b/g4f/Provider/not_working/Chatgpt4o.py @@ -1,10 +1,10 @@ from __future__ import annotations import re -from ..requests import StreamSession, raise_for_status -from ..typing import Messages -from .base_provider import AsyncProvider, ProviderModelMixin -from .helper import format_prompt +from ...requests import StreamSession, raise_for_status +from ...typing import Messages +from ..base_provider import AsyncProvider, ProviderModelMixin +from ..helper import format_prompt class Chatgpt4o(AsyncProvider, ProviderModelMixin): diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py index d1222efb..6b3877b1 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/not_working/ChatgptFree.py @@ -3,10 +3,10 @@ from __future__ import annotations import re import json import asyncio -from ..requests import StreamSession, raise_for_status -from ..typing import Messages, AsyncGenerator -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt +from ...requests import StreamSession, raise_for_status +from ...typing import Messages, AsyncGenerator +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatgptfree.ai" diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py index 1a45997b..b7d8537a 100644 --- a/g4f/Provider/FlowGpt.py +++ b/g4f/Provider/not_working/FlowGpt.py @@ -5,10 +5,10 @@ import time import hashlib from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_hex, get_random_string -from ..requests.raise_for_status import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_hex, get_random_string +from ...requests.raise_for_status import raise_for_status class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://flowgpt.com/chat" diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py index ada5d51a..8362019c 100644 --- a/g4f/Provider/FreeNetfly.py +++ b/g4f/Provider/not_working/FreeNetfly.py @@ -5,14 +5,14 @@ import asyncio from aiohttp import ClientSession, ClientTimeout, ClientError from typing import AsyncGenerator -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin): url = "https://free.netfly.top" api_endpoint = "/api/openai/v1/chat/completions" - working = True + working = False default_model = 'gpt-3.5-turbo' models = [ 'gpt-3.5-turbo', diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/not_working/GPROChat.py index a33c9571..88cb2c03 100644 --- a/g4f/Provider/GPROChat.py +++ b/g4f/Provider/not_working/GPROChat.py @@ -2,9 +2,9 @@ from __future__ import annotations import hashlib import time from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): label = "GPROChat" diff --git a/g4f/Provider/Koala.py b/g4f/Provider/not_working/Koala.py index 0dd76b71..d6230da7 100644 --- a/g4f/Provider/Koala.py +++ b/g4f/Provider/not_working/Koala.py @@ -4,15 +4,15 @@ import json from typing import AsyncGenerator, Optional, List, Dict, Union, Any from aiohttp import ClientSession, BaseConnector, ClientResponse -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_string, get_connector -from ..requests import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_string, get_connector +from ...requests import raise_for_status class Koala(AsyncGeneratorProvider, ProviderModelMixin): url = "https://koala.sh/chat" api_endpoint = "https://koala.sh/api/gpt/" - working = True + working = False supports_message_history = True default_model = 'gpt-4o-mini' diff --git a/g4f/Provider/selenium/MyShell.py b/g4f/Provider/not_working/MyShell.py index 02e182d4..02e182d4 100644 --- a/g4f/Provider/selenium/MyShell.py +++ b/g4f/Provider/not_working/MyShell.py diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py new file mode 100644 index 00000000..52c5c538 --- /dev/null +++ b/g4f/Provider/not_working/__init__.py @@ -0,0 +1,12 @@ +from .Ai4Chat import Ai4Chat +from .AiChatOnline import AiChatOnline +from .AiChats import AiChats +from .AmigoChat import AmigoChat +from .Aura import Aura +from .Chatgpt4o import Chatgpt4o +from .ChatgptFree import ChatgptFree +from .FlowGpt import FlowGpt +from .FreeNetfly import FreeNetfly +from .GPROChat import GPROChat +from .Koala import Koala +from .MyShell import MyShell diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py index 3a59ea58..44adf5fb 100644 --- a/g4f/Provider/selenium/__init__.py +++ b/g4f/Provider/selenium/__init__.py @@ -1,4 +1,3 @@ -from .MyShell import MyShell from .PerplexityAi import PerplexityAi from .Phind import Phind from .TalkAi import TalkAi |