diff options
Diffstat (limited to 'g4f/Provider')
-rw-r--r-- | g4f/Provider/Aura.py | 21 | ||||
-rw-r--r-- | g4f/Provider/Bing.py | 177 | ||||
-rw-r--r-- | g4f/Provider/ChatForAi.py | 31 | ||||
-rw-r--r-- | g4f/Provider/Chatgpt4Online.py | 2 | ||||
-rw-r--r-- | g4f/Provider/ChatgptAi.py | 22 | ||||
-rw-r--r-- | g4f/Provider/ChatgptFree.py | 10 | ||||
-rw-r--r-- | g4f/Provider/ChatgptNext.py | 20 | ||||
-rw-r--r-- | g4f/Provider/ChatgptX.py | 6 | ||||
-rw-r--r-- | g4f/Provider/FlowGpt.py | 7 | ||||
-rw-r--r-- | g4f/Provider/FreeChatgpt.py | 7 | ||||
-rw-r--r-- | g4f/Provider/FreeGpt.py | 16 | ||||
-rw-r--r-- | g4f/Provider/GeminiProChat.py | 15 | ||||
-rw-r--r-- | g4f/Provider/GptTalkRu.py | 50 | ||||
-rw-r--r-- | g4f/Provider/Koala.py | 26 | ||||
-rw-r--r-- | g4f/Provider/Liaobots.py | 13 | ||||
-rw-r--r-- | g4f/Provider/PerplexityLabs.py | 2 | ||||
-rw-r--r-- | g4f/Provider/Pi.py | 21 | ||||
-rw-r--r-- | g4f/Provider/Vercel.py | 22 | ||||
-rw-r--r-- | g4f/Provider/You.py | 60 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 70 | ||||
-rw-r--r-- | g4f/Provider/bing/conversation.py | 39 | ||||
-rw-r--r-- | g4f/Provider/deprecated/AiAsk.py (renamed from g4f/Provider/AiAsk.py) | 4 | ||||
-rw-r--r-- | g4f/Provider/deprecated/AiChatOnline.py (renamed from g4f/Provider/AiChatOnline.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/deprecated/ChatAnywhere.py (renamed from g4f/Provider/ChatAnywhere.py) | 4 | ||||
-rw-r--r-- | g4f/Provider/deprecated/FakeGpt.py (renamed from g4f/Provider/FakeGpt.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/deprecated/GPTalk.py (renamed from g4f/Provider/GPTalk.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/deprecated/GeekGpt.py (renamed from g4f/Provider/GeekGpt.py) | 4 | ||||
-rw-r--r-- | g4f/Provider/deprecated/Hashnode.py (renamed from g4f/Provider/Hashnode.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/deprecated/Ylokh.py (renamed from g4f/Provider/Ylokh.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/deprecated/__init__.py | 10 | ||||
-rw-r--r-- | g4f/Provider/helper.py | 3 | ||||
-rw-r--r-- | g4f/Provider/not_working/AItianhu.py (renamed from g4f/Provider/AItianhu.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/not_working/Bestim.py (renamed from g4f/Provider/Bestim.py) | 112 | ||||
-rw-r--r-- | g4f/Provider/not_working/ChatBase.py (renamed from g4f/Provider/ChatBase.py) | 6 | ||||
-rw-r--r-- | g4f/Provider/not_working/ChatgptDemo.py (renamed from g4f/Provider/ChatgptDemo.py) | 50 | ||||
-rw-r--r-- | g4f/Provider/not_working/ChatgptDemoAi.py (renamed from g4f/Provider/ChatgptDemoAi.py) | 7 | ||||
-rw-r--r-- | g4f/Provider/not_working/ChatgptLogin.py (renamed from g4f/Provider/ChatgptLogin.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/Chatxyz.py (renamed from g4f/Provider/Chatxyz.py) | 4 | ||||
-rw-r--r-- | g4f/Provider/not_working/Gpt6.py (renamed from g4f/Provider/Gpt6.py) | 9 | ||||
-rw-r--r-- | g4f/Provider/not_working/GptChatly.py (renamed from g4f/Provider/GptChatly.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/GptForLove.py (renamed from g4f/Provider/GptForLove.py) | 10 | ||||
-rw-r--r-- | g4f/Provider/not_working/GptGo.py (renamed from g4f/Provider/GptGo.py) | 10 | ||||
-rw-r--r-- | g4f/Provider/not_working/GptGod.py (renamed from g4f/Provider/GptGod.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/not_working/OnlineGpt.py (renamed from g4f/Provider/OnlineGpt.py) | 9 | ||||
-rw-r--r-- | g4f/Provider/not_working/__init__.py | 14 |
45 files changed, 518 insertions, 437 deletions
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py index d8f3471c..877b7fef 100644 --- a/g4f/Provider/Aura.py +++ b/g4f/Provider/Aura.py @@ -4,6 +4,8 @@ from aiohttp import ClientSession from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider +from ..requests import get_args_from_browser +from ..webdriver import WebDriver class Aura(AsyncGeneratorProvider): url = "https://openchat.team" @@ -15,24 +17,11 @@ class Aura(AsyncGeneratorProvider): model: str, messages: Messages, proxy: str = None, + webdriver: WebDriver = None, **kwargs ) -> AsyncResult: - headers = { - "Accept": "*/*", - "Accept-Encoding": "gzip, deflate, br", - "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8", - "Content-Type": "application/json", - "Origin": f"{cls.url}", - "Referer": f"{cls.url}/", - "Sec-Ch-Ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"', - "Sec-Ch-Ua-Mobile": "?0", - "Sec-Ch-Ua-Platform": '"Linux"', - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", - } - async with ClientSession(headers=headers) as session: + args = get_args_from_browser(cls.url, webdriver, proxy) + async with ClientSession(**args) as session: new_messages = [] system_message = [] for message in messages: diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index 5bc89479..77178686 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -2,13 +2,12 @@ from __future__ import annotations import random import json -import os import uuid import time from urllib import parse from aiohttp import ClientSession, ClientTimeout, BaseConnector -from ..typing import AsyncResult, Messages, ImageType +from ..typing import AsyncResult, Messages, ImageType, Cookies from ..image import ImageResponse, ImageRequest from .base_provider import AsyncGeneratorProvider from .helper import get_connector @@ -39,7 +38,7 @@ class Bing(AsyncGeneratorProvider): messages: Messages, proxy: str = None, timeout: int = 900, - cookies: dict = None, + cookies: Cookies = None, connector: BaseConnector = None, tone: str = Tones.balanced, image: ImageType = None, @@ -65,7 +64,7 @@ class Bing(AsyncGeneratorProvider): else: prompt = messages[-1]["content"] context = create_context(messages[:-1]) - + cookies = {**get_default_cookies(), **cookies} if cookies else get_default_cookies() gpt4_turbo = True if model.startswith("gpt-4-turbo") else False @@ -79,32 +78,88 @@ def create_context(messages: Messages) -> str: :param messages: A list of message dictionaries. :return: A string representing the context created from the messages. """ - return "".join( - f"[{message['role']}]" + ("(#message)" if message['role'] != "system" else "(#additional_instructions)") + f"\n{message['content']}\n\n" + return "\n\n".join( + f"[{message['role']}]" + ("(#message)" if message['role'] != "system" else "(#additional_instructions)") + f"\n{message['content']}" for message in messages ) +def get_ip_address() -> str: + return f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" + class Defaults: """ Default settings and configurations for the Bing provider. """ delimiter = "\x1e" - ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" # List of allowed message types for Bing responses allowedMessageTypes = [ - "ActionRequest", "Chat", "Context", "Progress", "SemanticSerp", - "GenerateContentQuery", "SearchQuery", "RenderCardRequest" + "ActionRequest","Chat", + "ConfirmationCard", "Context", + "InternalSearchQuery", #"InternalSearchResult", + "Disengaged", #"InternalLoaderMessage", + "Progress", "RenderCardRequest", + "RenderContentRequest", "AdsQuery", + "SemanticSerp", "GenerateContentQuery", + "SearchQuery", "GeneratedCode", + "InternalTasksMessage" ] - sliceIds = [ - 'abv2', 'srdicton', 'convcssclick', 'stylewv2', 'contctxp2tf', - '802fluxv1pc_a', '806log2sphs0', '727savemem', '277teditgnds0', '207hlthgrds0' - ] + sliceIds = { + "Balanced": [ + "supllmnfe","archnewtf", + "stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl", + "thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t", + "bingfc", "0225unsticky1", "0228scss0", + "defquerycf", "defcontrol", "3022tphpv" + ], + "Creative": [ + "bgstream", "fltltst2c", + "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", + "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", + "bingfccf", "0225unsticky1", "0228scss0", + "3022tpvs0" + ], + "Precise": [ + "bgstream", "fltltst2c", + "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", + "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", + "bingfccf", "0225unsticky1", "0228scss0", + "defquerycf", "3022tpvs0" + ], + } + + optionsSets = { + "Balanced": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", "autosave", + "iyxapbing", "iycapbing", + "galileo", "saharagenconv5", "gldcl1p", + "gpt4tmncnp" + ], + "Creative": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", + "iyxapbing", "iycapbing", + "h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3", + "gpt4tmncnp" + ], + "Precise": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", + "iyxapbing", "iycapbing", + "h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot", + "clgalileo", "gencontentv3" + ], + } # Default location settings location = { "locale": "en-US", "market": "en-US", "region": "US", + "location":"lat:34.0536909;long:-118.242766;re=1000m;", "locationHints": [{ "country": "United States", "state": "California", "city": "Los Angeles", "timezoneoffset": 8, "countryConfidence": 8, @@ -134,17 +189,8 @@ class Defaults: 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', 'x-edge-shopping-flag': '1', - 'x-forwarded-for': ip_address, + 'x-forwarded-for': get_ip_address(), } - - optionsSets = [ - 'nlu_direct_response_filter', 'deepleo', 'disable_emoji_spoken_text', - 'responsible_ai_policy_235', 'enablemm', 'iyxapbing', 'iycapbing', - 'gencontentv3', 'fluxsrtrunc', 'fluxtrunc', 'fluxv1', 'rai278', - 'replaceurl', 'eredirecturl', 'nojbfedge', "fluxcopilot", "nojbf", - "dgencontentv3", "nointernalsugg", "disable_telemetry", "machine_affinity", - "streamf", "codeint", "langdtwb", "fdwtlst", "fluxprod", "deuct3" - ] def get_default_cookies(): return { @@ -156,11 +202,6 @@ def get_default_cookies(): 'SRCHHPGUSR' : f'HV={int(time.time())}', } -class ConversationStyleOptionSets(): - CREATIVE = ["h3imaginative", "clgalileo", "gencontentv3"] - BALANCED = ["galileo", "gldcl1p"] - PRECISE = ["h3precise", "clgalileo"] - def format_message(msg: dict) -> str: """ Formats a message dictionary into a JSON string with a delimiter. @@ -191,18 +232,8 @@ def create_message( :param gpt4_turbo: Flag to enable GPT-4 Turbo. :return: A formatted string message for the Bing API. """ - options_sets = Defaults.optionsSets.copy() - # Append tone-specific options - if tone == Tones.creative: - options_sets.extend(ConversationStyleOptionSets.CREATIVE) - elif tone == Tones.precise: - options_sets.extend(ConversationStyleOptionSets.PRECISE) - elif tone == Tones.balanced: - options_sets.extend(ConversationStyleOptionSets.BALANCED) - else: - options_sets.append("harmonyv3") - # Additional configurations based on parameters + options_sets = [] if not web_search: options_sets.append("nosearchall") if gpt4_turbo: @@ -210,34 +241,38 @@ def create_message( request_id = str(uuid.uuid4()) struct = { - 'arguments': [{ - 'source': 'cib', - 'optionsSets': options_sets, - 'allowedMessageTypes': Defaults.allowedMessageTypes, - 'sliceIds': Defaults.sliceIds, - 'traceId': os.urandom(16).hex(), - 'isStartOfSession': True, - 'requestId': request_id, - 'message': { - **Defaults.location, - 'author': 'user', - 'inputMethod': 'Keyboard', - 'text': prompt, - 'messageType': 'Chat', - 'requestId': request_id, - 'messageId': request_id - }, + "arguments":[{ + "source": "cib", + "optionsSets": [*Defaults.optionsSets[tone], *options_sets], + "allowedMessageTypes": Defaults.allowedMessageTypes, + "sliceIds": Defaults.sliceIds[tone], "verbosity": "verbose", "scenario": "SERP", "plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [], - 'tone': tone, - 'spokenTextMode': 'None', - 'conversationId': conversation.conversationId, - 'participant': {'id': conversation.clientId}, + "traceId": str(uuid.uuid4()), + "conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"], + "gptId": "copilot", + "isStartOfSession": True, + "requestId": request_id, + "message":{ + **Defaults.location, + "userIpAddress": get_ip_address(), + "timestamp": "2024-03-11T22:40:36+01:00", + "author": "user", + "inputMethod": "Keyboard", + "text": prompt, + "messageType": "Chat", + "requestId": request_id, + "messageId": request_id + }, + "tone": tone, + "spokenTextMode": "None", + "conversationId": conversation.conversationId, + "participant": {"id": conversation.clientId} }], - 'invocationId': '1', - 'target': 'chat', - 'type': 4 + "invocationId": "0", + "target": "chat", + "type": 4 } if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'): @@ -283,14 +318,13 @@ async def stream_generate( """ headers = Defaults.headers if cookies: - headers["Cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items()) - + headers["cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items()) async with ClientSession( - timeout=ClientTimeout(total=timeout), headers=headers, connector=connector + headers=headers, cookies=cookies, + timeout=ClientTimeout(total=timeout), connector=connector ) as session: conversation = await create_conversation(session) image_request = await upload_image(session, image, tone) if image else None - try: async with session.ws_connect( 'wss://sydney.bing.com/sydney/ChatHub', @@ -298,12 +332,13 @@ async def stream_generate( params={'sec_access_token': conversation.conversationSignature} ) as wss: await wss.send_str(format_message({'protocol': 'json', 'version': 1})) + await wss.send_str(format_message({"type": 6})) await wss.receive(timeout=timeout) await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo)) - response_txt = '' returned_text = '' final = False + message_id = None while not final: msg = await wss.receive(timeout=timeout) if not msg.data: @@ -315,13 +350,17 @@ async def stream_generate( response = json.loads(obj) if response and response.get('type') == 1 and response['arguments'][0].get('messages'): message = response['arguments'][0]['messages'][0] + # Reset memory, if we have a new message + if message_id is not None and message_id != message["messageId"]: + returned_text = '' + message_id = message["messageId"] image_response = None if (message['contentOrigin'] != 'Apology'): if 'adaptiveCards' in message: card = message['adaptiveCards'][0]['body'][0] if "text" in card: response_txt = card.get('text') - if message.get('messageType'): + if message.get('messageType') and "inlines" in card: inline_txt = card['inlines'][0].get('text') response_txt += inline_txt + '\n' elif message.get('contentType') == "IMAGE": diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py index afab034b..5aa728a1 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/ChatForAi.py @@ -2,15 +2,17 @@ from __future__ import annotations import time import hashlib +import uuid from ..typing import AsyncResult, Messages from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider +from ..errors import RateLimitError +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin - -class ChatForAi(AsyncGeneratorProvider): +class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatforai.store" working = True + default_model = "gpt-3.5-turbo" supports_message_history = True supports_gpt_35_turbo = True @@ -21,36 +23,39 @@ class ChatForAi(AsyncGeneratorProvider): messages: Messages, proxy: str = None, timeout: int = 120, + temperature: float = 0.7, + top_p: float = 1, **kwargs ) -> AsyncResult: + model = cls.get_model(model) headers = { "Content-Type": "text/plain;charset=UTF-8", "Origin": cls.url, "Referer": f"{cls.url}/?r=b", } - async with StreamSession(impersonate="chrome107", headers=headers, proxies={"https": proxy}, timeout=timeout) as session: - prompt = messages[-1]["content"] + async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session: timestamp = int(time.time() * 1e3) - conversation_id = f"id_{timestamp-123}" + conversation_id = str(uuid.uuid4()) data = { "conversationId": conversation_id, "conversationType": "chat_continuous", "botId": "chat_continuous", "globalSettings":{ "baseUrl": "https://api.openai.com", - "model": model if model else "gpt-3.5-turbo", + "model": model, "messageHistorySize": 5, - "temperature": 0.7, - "top_p": 1, + "temperature": temperature, + "top_p": top_p, **kwargs }, - "botSettings": {}, - "prompt": prompt, + "prompt": "", "messages": messages, "timestamp": timestamp, - "sign": generate_signature(timestamp, prompt, conversation_id) + "sign": generate_signature(timestamp, "", conversation_id) } async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: + if response.status == 429: + raise RateLimitError("Rate limit reached") response.raise_for_status() async for chunk in response.iter_content(): if b"https://chatforai.store" in chunk: @@ -59,5 +64,5 @@ class ChatForAi(AsyncGeneratorProvider): def generate_signature(timestamp: int, message: str, id: str): - buffer = f"{timestamp}:{id}:{message}:7YN8z6d6" + buffer = f"{id}:{timestamp}:{message}:h496Jd6b" return hashlib.sha256(buffer.encode()).hexdigest() diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py index e923a8b1..169c936d 100644 --- a/g4f/Provider/Chatgpt4Online.py +++ b/g4f/Provider/Chatgpt4Online.py @@ -13,7 +13,7 @@ class Chatgpt4Online(AsyncGeneratorProvider): url = "https://chatgpt4online.org" supports_message_history = True supports_gpt_35_turbo = True - working = False + working = True _wpnonce = None _context_id = None diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py index a38aea5e..d15140d7 100644 --- a/g4f/Provider/ChatgptAi.py +++ b/g4f/Provider/ChatgptAi.py @@ -4,14 +4,16 @@ import re, html, json, string, random from aiohttp import ClientSession from ..typing import Messages, AsyncResult +from ..errors import RateLimitError from .base_provider import AsyncGeneratorProvider - +from .helper import get_random_string class ChatgptAi(AsyncGeneratorProvider): url = "https://chatgpt.ai" - working = False + working = True supports_message_history = True - supports_gpt_35_turbo = True + supports_system_message = True, + supports_gpt_4 = True, _system = None @classmethod @@ -45,7 +47,6 @@ class ChatgptAi(AsyncGeneratorProvider): async with session.get(cls.url, proxy=proxy) as response: response.raise_for_status() text = await response.text() - result = re.search(r"data-system='(.*?)'", text) if result : cls._system = json.loads(html.unescape(result.group(1))) @@ -56,14 +57,15 @@ class ChatgptAi(AsyncGeneratorProvider): "botId": cls._system["botId"], "customId": cls._system["customId"], "session": cls._system["sessionId"], - "chatId": "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=11)), + "chatId": get_random_string(), "contextId": cls._system["contextId"], - "messages": messages, + "messages": messages[:-1], "newMessage": messages[-1]["content"], - "stream": True + "newFileId": None, + "stream":True } async with session.post( - f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", + "https://chatgate.ai/wp-json/mwai-ui/v1/chats/submit", proxy=proxy, json=data, headers={"X-Wp-Nonce": cls._system["restNonce"]} @@ -76,6 +78,10 @@ class ChatgptAi(AsyncGeneratorProvider): assert "type" in line except: raise RuntimeError(f"Broken line: {line.decode()}") + if line["type"] == "error": + if "https://chatgate.ai/login" in line["data"]: + raise RateLimitError("Rate limit reached") + raise RuntimeError(line["data"]) if line["type"] == "live": yield line["data"] elif line["type"] == "end": diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py index b9b25447..b345b48a 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/ChatgptFree.py @@ -5,8 +5,7 @@ import re from ..requests import StreamSession from ..typing import Messages from .base_provider import AsyncProvider -from .helper import format_prompt, get_cookies - +from .helper import format_prompt class ChatgptFree(AsyncProvider): url = "https://chatgptfree.ai" @@ -25,12 +24,6 @@ class ChatgptFree(AsyncProvider): cookies: dict = None, **kwargs ) -> str: - - if not cookies: - cookies = get_cookies('chatgptfree.ai') - if not cookies: - raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://chatgptfree.ai on chrome]") - headers = { 'authority': 'chatgptfree.ai', 'accept': '*/*', @@ -82,6 +75,5 @@ class ChatgptFree(AsyncProvider): "bot_id": "0" } async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response: - response.raise_for_status() return (await response.json())["data"]
\ No newline at end of file diff --git a/g4f/Provider/ChatgptNext.py b/g4f/Provider/ChatgptNext.py index 1ae37bd5..2d6f7487 100644 --- a/g4f/Provider/ChatgptNext.py +++ b/g4f/Provider/ChatgptNext.py @@ -4,13 +4,14 @@ import json from aiohttp import ClientSession from ..typing import AsyncResult, Messages -from ..providers.base_provider import AsyncGeneratorProvider - +from .base_provider import AsyncGeneratorProvider class ChatgptNext(AsyncGeneratorProvider): url = "https://www.chatgpt-free.cc" working = True supports_gpt_35_turbo = True + supports_message_history = True + supports_system_message = True @classmethod async def create_async_generator( @@ -18,6 +19,11 @@ class ChatgptNext(AsyncGeneratorProvider): model: str, messages: Messages, proxy: str = None, + max_tokens: int = None, + temperature: float = 0.7, + top_p: float = 1, + presence_penalty: float = 0, + frequency_penalty: float = 0, **kwargs ) -> AsyncResult: if not model: @@ -43,11 +49,11 @@ class ChatgptNext(AsyncGeneratorProvider): "messages": messages, "stream": True, "model": model, - "temperature": 0.5, - "presence_penalty": 0, - "frequency_penalty": 0, - "top_p": 1, - **kwargs + "temperature": temperature, + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "top_p": top_p, + "max_tokens": max_tokens, } async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response: response.raise_for_status() diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py index c8b9375a..9be0d89b 100644 --- a/g4f/Provider/ChatgptX.py +++ b/g4f/Provider/ChatgptX.py @@ -7,12 +7,12 @@ from aiohttp import ClientSession from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider from .helper import format_prompt - +from ..errors import RateLimitError class ChatgptX(AsyncGeneratorProvider): url = "https://chatgptx.de" supports_gpt_35_turbo = True - working = False + working = True @classmethod async def create_async_generator( @@ -73,6 +73,8 @@ class ChatgptX(AsyncGeneratorProvider): async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response: response.raise_for_status() chat = await response.json() + if "messages" in chat and "Anfragelimit" in chat["messages"]: + raise RateLimitError("Rate limit reached") if "response" not in chat or not chat["response"]: raise RuntimeError(f'Response: {chat}') headers = { diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py index 93e7955c..d84bd81d 100644 --- a/g4f/Provider/FlowGpt.py +++ b/g4f/Provider/FlowGpt.py @@ -5,12 +5,14 @@ from aiohttp import ClientSession from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..errors import RateLimitError class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://flowgpt.com/chat" working = True supports_gpt_35_turbo = True supports_message_history = True + supports_system_message = True default_model = "gpt-3.5-turbo" models = [ "gpt-3.5-turbo", @@ -30,6 +32,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + temperature: float = 0.7, **kwargs ) -> AsyncResult: model = cls.get_model(model) @@ -59,7 +62,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): "question": messages[-1]["content"], "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history], "system": system_message, - "temperature": kwargs.get("temperature", 0.7), + "temperature": temperature, "promptId": f"model-{model}", "documentIds": [], "chatFileDocumentIds": [], @@ -67,6 +70,8 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): "generateAudio": False } async with session.post("https://backend-k8s.flowgpt.com/v2/chat-anonymous", json=data, proxy=proxy) as response: + if response.status == 429: + raise RateLimitError("Rate limit reached") response.raise_for_status() async for chunk in response.content: if chunk.strip(): diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py index 8981ef79..c20c85d2 100644 --- a/g4f/Provider/FreeChatgpt.py +++ b/g4f/Provider/FreeChatgpt.py @@ -1,7 +1,7 @@ from __future__ import annotations -import json, random -from aiohttp import ClientSession +import json +from aiohttp import ClientSession, ClientTimeout from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin @@ -18,6 +18,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + timeout: int = 120, **kwargs ) -> AsyncResult: headers = { @@ -33,7 +34,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): "Sec-Fetch-Site": "same-origin", "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", } - async with ClientSession(headers=headers) as session: + async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session: data = { "messages": messages, "stream": True, diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 15232c8d..9c210f0b 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -5,15 +5,18 @@ import time, hashlib, random from ..typing import AsyncResult, Messages from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider +from ..errors import RateLimitError domains = [ - 'https://s.aifree.site' + "https://s.aifree.site", + "https://v.aifree.site/" ] class FreeGpt(AsyncGeneratorProvider): - url = "https://freegpts1.aifree.site/" - working = False + url = "https://freegptsnav.aifree.site" + working = True supports_message_history = True + supports_system_message = True supports_gpt_35_turbo = True @classmethod @@ -38,15 +41,14 @@ class FreeGpt(AsyncGeneratorProvider): "pass": None, "sign": generate_signature(timestamp, prompt) } - url = random.choice(domains) - async with session.post(f"{url}/api/generate", json=data) as response: + domain = random.choice(domains) + async with session.post(f"{domain}/api/generate", json=data) as response: response.raise_for_status() async for chunk in response.iter_content(): chunk = chunk.decode() if chunk == "当前地区当日额度已消耗完": - raise RuntimeError("Rate limit reached") + raise RateLimitError("Rate limit reached") yield chunk - def generate_signature(timestamp: int, message: str, secret: str = ""): data = f"{timestamp}:{message}:{secret}" diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py index 8b8fc5dc..9c2d1fb2 100644 --- a/g4f/Provider/GeminiProChat.py +++ b/g4f/Provider/GeminiProChat.py @@ -2,15 +2,18 @@ from __future__ import annotations import time from hashlib import sha256 -from aiohttp import ClientSession +from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider - +from ..errors import RateLimitError +from ..requests import raise_for_status +from ..requests.aiohttp import get_connector class GeminiProChat(AsyncGeneratorProvider): url = "https://gemini-chatbot-sigma.vercel.app" working = True + supports_message_history = True @classmethod async def create_async_generator( @@ -18,6 +21,7 @@ class GeminiProChat(AsyncGeneratorProvider): model: str, messages: Messages, proxy: str = None, + connector: BaseConnector = None, **kwargs ) -> AsyncResult: headers = { @@ -34,7 +38,7 @@ class GeminiProChat(AsyncGeneratorProvider): "Connection": "keep-alive", "TE": "trailers", } - async with ClientSession(headers=headers) as session: + async with ClientSession(connector=get_connector(connector, proxy), headers=headers) as session: timestamp = int(time.time() * 1e3) data = { "messages":[{ @@ -46,7 +50,10 @@ class GeminiProChat(AsyncGeneratorProvider): "sign": generate_signature(timestamp, messages[-1]["content"]), } async with session.post(f"{cls.url}/api/generate", json=data, proxy=proxy) as response: - response.raise_for_status() + if response.status == 500: + if "Quota exceeded" in await response.text(): + raise RateLimitError(f"Response {response.status}: Rate limit reached") + await raise_for_status(response) async for chunk in response.content.iter_any(): yield chunk.decode() diff --git a/g4f/Provider/GptTalkRu.py b/g4f/Provider/GptTalkRu.py index 16d69f3c..e8c2ffa2 100644 --- a/g4f/Provider/GptTalkRu.py +++ b/g4f/Provider/GptTalkRu.py @@ -1,10 +1,13 @@ from __future__ import annotations -from aiohttp import ClientSession +from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider - +from .helper import get_random_string, get_connector +from ..requests import raise_for_status, get_args_from_browser, WebDriver +from ..webdriver import has_seleniumwire +from ..errors import MissingRequirementsError class GptTalkRu(AsyncGeneratorProvider): url = "https://gpttalk.ru" @@ -17,33 +20,40 @@ class GptTalkRu(AsyncGeneratorProvider): model: str, messages: Messages, proxy: str = None, + connector: BaseConnector = None, + webdriver: WebDriver = None, **kwargs ) -> AsyncResult: if not model: model = "gpt-3.5-turbo" - headers = { - "Accept": "application/json, text/plain, */*", - "Accept-Language": "en-US", - "Connection": "keep-alive", - "Content-Type": "application/json", - "Origin": "https://gpttalk.ru", - "Referer": "https://gpttalk.ru/", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36", - "sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - } - async with ClientSession(headers=headers) as session: + if not has_seleniumwire: + raise MissingRequirementsError('Install "selenium-wire" package') + args = get_args_from_browser(f"{cls.url}", webdriver) + args["headers"]["accept"] = "application/json, text/plain, */*" + async with ClientSession(connector=get_connector(connector, proxy), **args) as session: + async with session.get("https://gpttalk.ru/getToken") as response: + await raise_for_status(response) + public_key = (await response.json())["response"]["key"]["publicKey"] + random_string = get_random_string(8) data = { "model": model, "modelType": 1, "prompt": messages, "responseType": "stream", + "security": { + "randomMessage": random_string, + "shifrText": encrypt(public_key, random_string) + } } async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response: - response.raise_for_status() + await raise_for_status(response) async for chunk in response.content.iter_any(): - yield chunk.decode()
\ No newline at end of file + yield chunk.decode() + +def encrypt(public_key: str, value: str) -> str: + from Crypto.Cipher import PKCS1_v1_5 + from Crypto.PublicKey import RSA + import base64 + rsa_key = RSA.importKey(public_key) + cipher = PKCS1_v1_5.new(rsa_key) + return base64.b64encode(cipher.encrypt(value.encode())).decode()
\ No newline at end of file diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py index 0e3ba13d..849bcdbe 100644 --- a/g4f/Provider/Koala.py +++ b/g4f/Provider/Koala.py @@ -1,17 +1,18 @@ from __future__ import annotations import json -from aiohttp import ClientSession +from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider -from .helper import get_random_string +from .helper import get_random_string, get_connector +from ..requests import raise_for_status class Koala(AsyncGeneratorProvider): url = "https://koala.sh" + working = True supports_gpt_35_turbo = True supports_message_history = True - working = True @classmethod async def create_async_generator( @@ -19,35 +20,36 @@ class Koala(AsyncGeneratorProvider): model: str, messages: Messages, proxy: str = None, + connector: BaseConnector = None, **kwargs ) -> AsyncResult: if not model: model = "gpt-3.5-turbo" headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0", "Accept": "text/event-stream", "Accept-Language": "de,en-US;q=0.7,en;q=0.3", "Accept-Encoding": "gzip, deflate, br", "Referer": f"{cls.url}/chat", - "Content-Type": "application/json", "Flag-Real-Time-Data": "false", "Visitor-ID": get_random_string(20), "Origin": cls.url, "Alt-Used": "koala.sh", - "Connection": "keep-alive", "Sec-Fetch-Dest": "empty", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Site": "same-origin", - "Pragma": "no-cache", - "Cache-Control": "no-cache", "TE": "trailers", } - async with ClientSession(headers=headers) as session: + async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session: + input = messages[-1]["content"] + system_messages = [message["content"] for message in messages if message["role"] == "system"] + if system_messages: + input += " ".join(system_messages) data = { - "input": messages[-1]["content"], + "input": input, "inputHistory": [ message["content"] - for message in messages + for message in messages[:-1] if message["role"] == "user" ], "outputHistory": [ @@ -58,7 +60,7 @@ class Koala(AsyncGeneratorProvider): "model": model, } async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response: - response.raise_for_status() + await raise_for_status(response) async for chunk in response.content: if chunk.startswith(b"data: "): yield json.loads(chunk[6:])
\ No newline at end of file diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 92154d7d..b5e7cbe7 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -7,7 +7,7 @@ from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import get_connector -from ..errors import RateLimitError +from ..requests import raise_for_status models = { "gpt-4": { @@ -76,6 +76,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): url = "https://liaobots.site" working = True supports_message_history = True + supports_system_message = True supports_gpt_35_turbo = True supports_gpt_4 = True default_model = "gpt-3.5-turbo" @@ -116,19 +117,17 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): data={"token": "abcdefghijklmnopqrst"}, verify_ssl=False ) as response: - response.raise_for_status() + await raise_for_status(response) async with session.post( "https://liaobots.work/api/user", proxy=proxy, json={"authcode": ""}, verify_ssl=False ) as response: - if response.status == 401: - raise RateLimitError("Rate limit reached. Use a other provider or ip address") - response.raise_for_status() + await raise_for_status(response) cls._auth_code = (await response.json(content_type=None))["authCode"] cls._cookie_jar = session.cookie_jar - + data = { "conversationId": str(uuid.uuid4()), "model": models[cls.get_model(model)], @@ -143,7 +142,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): headers={"x-auth-code": cls._auth_code}, verify_ssl=False ) as response: - response.raise_for_status() + await raise_for_status(response) async for chunk in response.content.iter_any(): if b"<html coupert-item=" in chunk: raise RuntimeError("Invalid session") diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index de2d1b71..6c80efee 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -14,7 +14,7 @@ WS_URL = "wss://labs-api.perplexity.ai/socket.io/" class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): url = "https://labs.perplexity.ai" working = True - default_model = "sonar-medium-online" + default_model = "mixtral-8x7b-instruct" models = [ "sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "mistral-7b-instruct", "codellama-70b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py index 2f7dc436..5a1e9f0e 100644 --- a/g4f/Provider/Pi.py +++ b/g4f/Provider/Pi.py @@ -4,12 +4,13 @@ import json from ..typing import CreateResult, Messages from .base_provider import AbstractProvider, format_prompt -from ..requests import Session, get_session_from_browser +from ..requests import Session, get_session_from_browser, raise_for_status class Pi(AbstractProvider): url = "https://pi.ai/talk" working = True supports_stream = True + _session = None @classmethod def create_completion( @@ -17,20 +18,19 @@ class Pi(AbstractProvider): model: str, messages: Messages, stream: bool, - session: Session = None, proxy: str = None, timeout: int = 180, conversation_id: str = None, **kwargs ) -> CreateResult: - if not session: - session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout) + if cls._session is None: + cls._session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout) if not conversation_id: - conversation_id = cls.start_conversation(session) + conversation_id = cls.start_conversation(cls._session) prompt = format_prompt(messages) else: prompt = messages[-1]["content"] - answer = cls.ask(session, prompt, conversation_id) + answer = cls.ask(cls._session, prompt, conversation_id) for line in answer: if "text" in line: yield line["text"] @@ -41,8 +41,7 @@ class Pi(AbstractProvider): 'accept': 'application/json', 'x-api-version': '3' }) - if 'Just a moment' in response.text: - raise RuntimeError('Error: Cloudflare detected') + raise_for_status(response) return response.json()['conversations'][0]['sid'] def get_chat_history(session: Session, conversation_id: str): @@ -50,8 +49,7 @@ class Pi(AbstractProvider): 'conversation': conversation_id, } response = session.get('https://pi.ai/api/chat/history', params=params) - if 'Just a moment' in response.text: - raise RuntimeError('Error: Cloudflare detected') + raise_for_status(response) return response.json() def ask(session: Session, prompt: str, conversation_id: str): @@ -61,9 +59,8 @@ class Pi(AbstractProvider): 'mode': 'BASE', } response = session.post('https://pi.ai/api/chat', json=json_data, stream=True) + raise_for_status(response) for line in response.iter_lines(): - if b'Just a moment' in line: - raise RuntimeError('Error: Cloudflare detected') if line.startswith(b'data: {"text":'): yield json.loads(line.split(b'data: ')[1]) elif line.startswith(b'data: {"title":'): diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py index e10aa232..f8faeeaf 100644 --- a/g4f/Provider/Vercel.py +++ b/g4f/Provider/Vercel.py @@ -8,17 +8,18 @@ try: except ImportError: has_requirements = False -from ..typing import Messages, TypedDict, CreateResult, Any +from ..typing import Messages, CreateResult from .base_provider import AbstractProvider -from ..errors import MissingRequirementsError +from ..requests import raise_for_status +from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError class Vercel(AbstractProvider): url = 'https://chat.vercel.ai' working = True - supports_message_history = True + supports_message_history = True + supports_system_message = True supports_gpt_35_turbo = True supports_stream = True - supports_gpt_4 = False @staticmethod def create_completion( @@ -26,6 +27,7 @@ class Vercel(AbstractProvider): messages: Messages, stream: bool, proxy: str = None, + max_retries: int = 6, **kwargs ) -> CreateResult: if not has_requirements: @@ -54,19 +56,17 @@ class Vercel(AbstractProvider): 'messages': messages, 'id' : f'{os.urandom(3).hex()}a', } - - max_retries = kwargs.get('max_retries', 6) + response = None for _ in range(max_retries): response = requests.post('https://chat.vercel.ai/api/chat', headers=headers, json=json_data, stream=True, proxies={"https": proxy}) - try: - response.raise_for_status() - except: + if not response.ok: continue for token in response.iter_content(chunk_size=None): yield token.decode() break - + raise_for_status(response) + def get_anti_bot_token() -> str: headers = { 'authority': 'sdk.vercel.ai', @@ -92,7 +92,7 @@ def get_anti_bot_token() -> str: js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`}; return (%s)(%s)''' % (raw_data['c'], raw_data['a']) - + sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"] raw_token = json.dumps({'r': sec_list, 't': raw_data['t']}, diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index 1fdaf06d..85b60452 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -4,13 +4,14 @@ import re import json import base64 import uuid -from aiohttp import ClientSession, FormData, BaseConnector +from asyncio import get_running_loop +from aiohttp import ClientSession, FormData, BaseConnector, CookieJar from ..typing import AsyncResult, Messages, ImageType, Cookies from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..providers.helper import get_connector, format_prompt +from .helper import format_prompt, get_connector from ..image import to_bytes, ImageResponse -from ..requests.defaults import DEFAULT_HEADERS +from ..requests import WebDriver, raise_for_status, get_args_from_browser class You(AsyncGeneratorProvider, ProviderModelMixin): url = "https://you.com" @@ -32,6 +33,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): model_aliases = { "claude-v2": "claude-2" } + _args: dict = None + _cookie_jar: CookieJar = None _cookies = None _cookies_used = 0 @@ -43,25 +46,34 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): image: ImageType = None, image_name: str = None, connector: BaseConnector = None, + webdriver: WebDriver = None, proxy: str = None, chat_mode: str = "default", **kwargs, ) -> AsyncResult: + if cls._args is None: + cls._args = get_args_from_browser(cls.url, webdriver, proxy) + cls._cookie_jar = CookieJar(loop=get_running_loop()) + else: + if "cookies" in cls._args: + del cls._args["cookies"] + cls._cookie_jar._loop = get_running_loop() + if image is not None: + chat_mode = "agent" + elif not model or model == cls.default_model: + chat_mode = "default" + elif model.startswith("dall-e"): + chat_mode = "create" + else: + chat_mode = "custom" + model = cls.get_model(model) async with ClientSession( connector=get_connector(connector, proxy), - headers=DEFAULT_HEADERS - ) as client: - if image is not None: - chat_mode = "agent" - elif not model or model == cls.default_model: - chat_mode = "default" - elif model.startswith("dall-e"): - chat_mode = "create" - else: - chat_mode = "custom" - model = cls.get_model(model) - cookies = await cls.get_cookies(client) if chat_mode != "default" else None - upload = json.dumps([await cls.upload_file(client, cookies, to_bytes(image), image_name)]) if image else "" + cookie_jar=cls._cookie_jar, + **cls._args + ) as session: + cookies = await cls.get_cookies(session) if chat_mode != "default" else None + upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else "" #questions = [message["content"] for message in messages if message["role"] == "user"] # chat = [ # {"question": questions[idx-1], "answer": message["content"]} @@ -70,8 +82,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): # and idx < len(questions) # ] headers = { - "Accept": "text/event-stream", - "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat", + "accept": "text/event-stream", + "referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat", } data = { "userFiles": upload, @@ -86,14 +98,14 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): } if chat_mode == "custom": params["selectedAIModel"] = model.replace("-", "_") - async with (client.post if chat_mode == "default" else client.get)( + async with (session.post if chat_mode == "default" else session.get)( f"{cls.url}/api/streamingSearch", data=data, params=params, headers=headers, cookies=cookies ) as response: - response.raise_for_status() + await raise_for_status(response) async for line in response.content: if line.startswith(b'event: '): event = line[7:-1].decode() @@ -115,7 +127,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): f"{cls.url}/api/get_nonce", cookies=cookies, ) as response: - response.raise_for_status() + await raise_for_status(response) upload_nonce = await response.text() data = FormData() data.add_field('file', file, filename=filename) @@ -127,8 +139,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): }, cookies=cookies ) as response: - if not response.ok: - raise RuntimeError(f"Response: {await response.text()}") + await raise_for_status(response) result = await response.json() result["user_filename"] = filename result["size"] = len(file) @@ -177,8 +188,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): "session_duration_minutes": 129600 } ) as response: - if not response.ok: - raise RuntimeError(f"Response: {await response.text()}") + await raise_for_status(response) session = (await response.json())["data"] return { "stytch_session": session["session_token"], diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 462fc249..8db3c0d4 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -6,56 +6,36 @@ from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider from ..providers.create_images import CreateImagesProvider from .deprecated import * +from .not_working import * from .selenium import * from .needs_auth import * from .unfinished import * -from .AiAsk import AiAsk -from .AiChatOnline import AiChatOnline -from .AItianhu import AItianhu -from .Aura import Aura -from .Bestim import Bestim -from .Bing import Bing +from .Aura import Aura +from .Bing import Bing from .BingCreateImages import BingCreateImages -from .ChatAnywhere import ChatAnywhere -from .ChatBase import ChatBase -from .ChatForAi import ChatForAi -from .Chatgpt4Online import Chatgpt4Online -from .ChatgptAi import ChatgptAi -from .ChatgptDemo import ChatgptDemo -from .ChatgptDemoAi import ChatgptDemoAi -from .ChatgptFree import ChatgptFree -from .ChatgptLogin import ChatgptLogin -from .ChatgptNext import ChatgptNext -from .ChatgptX import ChatgptX -from .Chatxyz import Chatxyz -from .DeepInfra import DeepInfra -from .FakeGpt import FakeGpt -from .FlowGpt import FlowGpt -from .FreeChatgpt import FreeChatgpt -from .FreeGpt import FreeGpt -from .GeekGpt import GeekGpt -from .GeminiPro import GeminiPro -from .GeminiProChat import GeminiProChat -from .Gpt6 import Gpt6 -from .GPTalk import GPTalk -from .GptChatly import GptChatly -from .GptForLove import GptForLove -from .GptGo import GptGo -from .GptGod import GptGod -from .GptTalkRu import GptTalkRu -from .Hashnode import Hashnode -from .HuggingChat import HuggingChat -from .HuggingFace import HuggingFace -from .Koala import Koala -from .Liaobots import Liaobots -from .Llama2 import Llama2 -from .OnlineGpt import OnlineGpt -from .PerplexityLabs import PerplexityLabs -from .Pi import Pi -from .Vercel import Vercel -from .Ylokh import Ylokh -from .You import You +from .ChatForAi import ChatForAi +from .Chatgpt4Online import Chatgpt4Online +from .ChatgptAi import ChatgptAi +from .ChatgptFree import ChatgptFree +from .ChatgptNext import ChatgptNext +from .ChatgptX import ChatgptX +from .DeepInfra import DeepInfra +from .FlowGpt import FlowGpt +from .FreeChatgpt import FreeChatgpt +from .FreeGpt import FreeGpt +from .GeminiPro import GeminiPro +from .GeminiProChat import GeminiProChat +from .GptTalkRu import GptTalkRu +from .HuggingChat import HuggingChat +from .HuggingFace import HuggingFace +from .Koala import Koala +from .Liaobots import Liaobots +from .Llama2 import Llama2 +from .PerplexityLabs import PerplexityLabs +from .Pi import Pi +from .Vercel import Vercel +from .You import You import sys diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py index 4af9e5fe..03f17ee7 100644 --- a/g4f/Provider/bing/conversation.py +++ b/g4f/Provider/bing/conversation.py @@ -2,6 +2,8 @@ from __future__ import annotations import uuid from aiohttp import ClientSession +from ...errors import ResponseStatusError +from ...requests import raise_for_status class Conversation: """ @@ -32,8 +34,11 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv Conversation: An instance representing the created conversation. """ url = 'https://www.bing.com/search?toncp=0&FORM=hpcodx&q=Bing+AI&showconv=1&cc=en' - async with session.get(url, proxy=proxy) as response: - response.raise_for_status() + headers = { + "cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar) + } + async with session.get(url, headers=headers) as response: + await raise_for_status(response) headers = { "accept": "application/json", "sec-fetch-dest": "empty", @@ -41,25 +46,21 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv "sec-fetch-site": "same-origin", "x-ms-client-request-id": str(uuid.uuid4()), "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.12.3 OS/Windows", - "referer": url, - "Cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar) + "referer": "https://www.bing.com/search?toncp=0&FORM=hpcodx&q=Bing+AI&showconv=1&cc=en", + "cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar) } - for k, v in headers.items(): - session.headers[k] = v - url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1' + url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1634.0-service-contracts" async with session.get(url, headers=headers, proxy=proxy) as response: - try: - data = await response.json() - except: - raise RuntimeError(f"Response: {await response.text()}") - - conversationId = data.get('conversationId') - clientId = data.get('clientId') - conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature') - - if not conversationId or not clientId or not conversationSignature: - raise Exception('Failed to create conversation.') - return Conversation(conversationId, clientId, conversationSignature) + if response.status == 404: + raise ResponseStatusError(f"Response {response.status}: Can't create a new chat") + await raise_for_status(response) + data = await response.json() + conversationId = data.get('conversationId') + clientId = data.get('clientId') + conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature') + if not conversationId or not clientId or not conversationSignature: + raise Exception('Failed to create conversation.') + return Conversation(conversationId, clientId, conversationSignature) async def list_conversations(session: ClientSession) -> list: """ diff --git a/g4f/Provider/AiAsk.py b/g4f/Provider/deprecated/AiAsk.py index 094ef076..6ea5f3e0 100644 --- a/g4f/Provider/AiAsk.py +++ b/g4f/Provider/deprecated/AiAsk.py @@ -1,8 +1,8 @@ from __future__ import annotations from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider class AiAsk(AsyncGeneratorProvider): url = "https://e.aiask.me" diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/deprecated/AiChatOnline.py index cc3b5b8e..e690f28e 100644 --- a/g4f/Provider/AiChatOnline.py +++ b/g4f/Provider/deprecated/AiChatOnline.py @@ -3,9 +3,9 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import get_random_string +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import get_random_string class AiChatOnline(AsyncGeneratorProvider): url = "https://aichatonline.org" diff --git a/g4f/Provider/ChatAnywhere.py b/g4f/Provider/deprecated/ChatAnywhere.py index 5f5f15de..d035eaf0 100644 --- a/g4f/Provider/ChatAnywhere.py +++ b/g4f/Provider/deprecated/ChatAnywhere.py @@ -2,8 +2,8 @@ from __future__ import annotations from aiohttp import ClientSession, ClientTimeout -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider class ChatAnywhere(AsyncGeneratorProvider): diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/deprecated/FakeGpt.py index ee14abf4..99b6bb1a 100644 --- a/g4f/Provider/FakeGpt.py +++ b/g4f/Provider/deprecated/FakeGpt.py @@ -3,9 +3,9 @@ from __future__ import annotations import uuid, time, random, json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt, get_random_string +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt, get_random_string class FakeGpt(AsyncGeneratorProvider): diff --git a/g4f/Provider/GPTalk.py b/g4f/Provider/deprecated/GPTalk.py index 5749ff2e..5b36d37b 100644 --- a/g4f/Provider/GPTalk.py +++ b/g4f/Provider/deprecated/GPTalk.py @@ -3,14 +3,14 @@ from __future__ import annotations import secrets, time, json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt class GPTalk(AsyncGeneratorProvider): url = "https://gptalk.net" - working = True + working = False supports_gpt_35_turbo = True _auth = None used_times = 0 diff --git a/g4f/Provider/GeekGpt.py b/g4f/Provider/deprecated/GeekGpt.py index f1dea9b1..7a460083 100644 --- a/g4f/Provider/GeekGpt.py +++ b/g4f/Provider/deprecated/GeekGpt.py @@ -1,8 +1,8 @@ from __future__ import annotations import requests, json -from .base_provider import AbstractProvider -from ..typing import CreateResult, Messages +from ..base_provider import AbstractProvider +from ...typing import CreateResult, Messages from json import dumps diff --git a/g4f/Provider/Hashnode.py b/g4f/Provider/deprecated/Hashnode.py index 7a0c2903..c2c0ffb7 100644 --- a/g4f/Provider/Hashnode.py +++ b/g4f/Provider/deprecated/Hashnode.py @@ -2,9 +2,9 @@ from __future__ import annotations from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import get_random_hex +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import get_random_hex class SearchTypes(): quick = "quick" diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/deprecated/Ylokh.py index 11fe497f..dbff4602 100644 --- a/g4f/Provider/Ylokh.py +++ b/g4f/Provider/deprecated/Ylokh.py @@ -2,9 +2,9 @@ from __future__ import annotations import json -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider -from ..typing import AsyncResult, Messages +from ...requests import StreamSession +from ..base_provider import AsyncGeneratorProvider +from ...typing import AsyncResult, Messages class Ylokh(AsyncGeneratorProvider): url = "https://chat.ylokh.xyz" diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index 8ec5f2fc..f6b4a1d9 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -23,4 +23,12 @@ from .Opchatgpts import Opchatgpts from .Yqcloud import Yqcloud from .Aichat import Aichat from .Berlin import Berlin -from .Phind import Phind
\ No newline at end of file +from .Phind import Phind +from .AiAsk import AiAsk +from .AiChatOnline import AiChatOnline +from .ChatAnywhere import ChatAnywhere +from .FakeGpt import FakeGpt +from .GeekGpt import GeekGpt +from .GPTalk import GPTalk +from .Hashnode import Hashnode +from .Ylokh import Ylokh
\ No newline at end of file diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index da5b99f6..338e0966 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -1,2 +1,3 @@ from ..providers.helper import * -from ..cookies import get_cookies
\ No newline at end of file +from ..cookies import get_cookies +from ..requests.aiohttp import get_connector
\ No newline at end of file diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/not_working/AItianhu.py index 34187694..501b334e 100644 --- a/g4f/Provider/AItianhu.py +++ b/g4f/Provider/not_working/AItianhu.py @@ -2,9 +2,9 @@ from __future__ import annotations import json -from ..typing import AsyncResult, Messages -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies +from ...typing import AsyncResult, Messages +from ...requests import StreamSession +from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies class AItianhu(AsyncGeneratorProvider): diff --git a/g4f/Provider/Bestim.py b/g4f/Provider/not_working/Bestim.py index 323bd713..94a4d32b 100644 --- a/g4f/Provider/Bestim.py +++ b/g4f/Provider/not_working/Bestim.py @@ -1,56 +1,56 @@ -from __future__ import annotations
-
-from ..typing import Messages
-from .base_provider import BaseProvider, CreateResult
-from ..requests import get_session_from_browser
-from uuid import uuid4
-
-class Bestim(BaseProvider):
- url = "https://chatgpt.bestim.org"
- supports_gpt_35_turbo = True
- supports_message_history = True
- working = False
- supports_stream = True
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- **kwargs
- ) -> CreateResult:
- session = get_session_from_browser(cls.url, proxy=proxy)
- headers = {
- 'Accept': 'application/json, text/event-stream',
- }
- data = {
- "messagesHistory": [{
- "id": str(uuid4()),
- "content": m["content"],
- "from": "you" if m["role"] == "user" else "bot"
- } for m in messages],
- "type": "chat",
- }
- response = session.post(
- url="https://chatgpt.bestim.org/chat/send2/",
- json=data,
- headers=headers,
- stream=True
- )
- response.raise_for_status()
- for line in response.iter_lines():
- if not line.startswith(b"event: trylimit"):
- yield line.decode().removeprefix("data: ")
-
-
-
-
-
-
-
-
-
-
-
+from __future__ import annotations + +from ...typing import Messages +from ..base_provider import BaseProvider, CreateResult +from ...requests import get_session_from_browser +from uuid import uuid4 + +class Bestim(BaseProvider): + url = "https://chatgpt.bestim.org" + working = False + supports_gpt_35_turbo = True + supports_message_history = True + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + proxy: str = None, + **kwargs + ) -> CreateResult: + session = get_session_from_browser(cls.url, proxy=proxy) + headers = { + 'Accept': 'application/json, text/event-stream', + } + data = { + "messagesHistory": [{ + "id": str(uuid4()), + "content": m["content"], + "from": "you" if m["role"] == "user" else "bot" + } for m in messages], + "type": "chat", + } + response = session.post( + url="https://chatgpt.bestim.org/chat/send2/", + json=data, + headers=headers, + stream=True + ) + response.raise_for_status() + for line in response.iter_lines(): + if not line.startswith(b"event: trylimit"): + yield line.decode().removeprefix("data: ") + + + + + + + + + + + diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/not_working/ChatBase.py index 996ca39a..ef1c8f99 100644 --- a/g4f/Provider/ChatBase.py +++ b/g4f/Provider/not_working/ChatBase.py @@ -2,15 +2,15 @@ from __future__ import annotations from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider class ChatBase(AsyncGeneratorProvider): url = "https://www.chatbase.co" + working = False supports_gpt_35_turbo = True supports_message_history = True - working = True jailbreak = True list_incorrect_responses = ["support@chatbase", "about Chatbase"] diff --git a/g4f/Provider/ChatgptDemo.py b/g4f/Provider/not_working/ChatgptDemo.py index 666b5753..593a2d29 100644 --- a/g4f/Provider/ChatgptDemo.py +++ b/g4f/Provider/not_working/ChatgptDemo.py @@ -1,16 +1,17 @@ from __future__ import annotations -import time, json, re +import time, json, re, asyncio from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ...errors import RateLimitError +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt class ChatgptDemo(AsyncGeneratorProvider): - url = "https://chat.chatgptdemo.net" - supports_gpt_35_turbo = True + url = "https://chatgptdemo.info/chat" working = False + supports_gpt_35_turbo = True @classmethod async def create_async_generator( @@ -21,10 +22,10 @@ class ChatgptDemo(AsyncGeneratorProvider): **kwargs ) -> AsyncResult: headers = { - "authority": "chat.chatgptdemo.net", - "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US", - "origin": "https://chat.chatgptdemo.net", - "referer": "https://chat.chatgptdemo.net/", + "authority": "chatgptdemo.info", + "accept-language": "en-US", + "origin": "https://chatgptdemo.info", + "referer": "https://chatgptdemo.info/chat/", "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"Linux"', @@ -36,28 +37,29 @@ class ChatgptDemo(AsyncGeneratorProvider): async with ClientSession(headers=headers) as session: async with session.get(f"{cls.url}/", proxy=proxy) as response: response.raise_for_status() - response = await response.text() - - result = re.search( - r'<div id="USERID" style="display: none">(.*?)<\/div>', - response, - ) - - if result: - user_id = result.group(1) - else: - raise RuntimeError("No user id found") - async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response: + text = await response.text() + result = re.search( + r'<div id="USERID" style="display: none">(.*?)<\/div>', + text, + ) + if result: + user_id = result.group(1) + else: + raise RuntimeError("No user id found") + async with session.post(f"https://chatgptdemo.info/chat/new_chat", json={"user_id": user_id}, proxy=proxy) as response: response.raise_for_status() chat_id = (await response.json())["id_"] if not chat_id: raise RuntimeError("Could not create new chat") + await asyncio.sleep(10) data = { "question": format_prompt(messages), "chat_id": chat_id, - "timestamp": int(time.time()*1000), + "timestamp": int((time.time())*1e3), } - async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response: + async with session.post(f"https://chatgptdemo.info/chat/chat_api_stream", json=data, proxy=proxy) as response: + if response.status == 429: + raise RateLimitError("Rate limit reached") response.raise_for_status() async for line in response.content: if line.startswith(b"data: "): diff --git a/g4f/Provider/ChatgptDemoAi.py b/g4f/Provider/not_working/ChatgptDemoAi.py index a8c98b65..6cdd0c7a 100644 --- a/g4f/Provider/ChatgptDemoAi.py +++ b/g4f/Provider/not_working/ChatgptDemoAi.py @@ -3,9 +3,9 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import get_random_string +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import get_random_string class ChatgptDemoAi(AsyncGeneratorProvider): url = "https://chat.chatgptdemo.ai" @@ -49,6 +49,7 @@ class ChatgptDemoAi(AsyncGeneratorProvider): async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: response.raise_for_status() async for chunk in response.content: + response.raise_for_status() if chunk.startswith(b"data: "): data = json.loads(chunk[6:]) if data["type"] == "live": diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/not_working/ChatgptLogin.py index 037e0a6e..6e9d57c4 100644 --- a/g4f/Provider/ChatgptLogin.py +++ b/g4f/Provider/not_working/ChatgptLogin.py @@ -5,15 +5,15 @@ import time import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt class ChatgptLogin(AsyncGeneratorProvider): url = "https://chatgptlogin.ai" - supports_gpt_35_turbo = True working = False + supports_gpt_35_turbo = True _user_id = None @classmethod diff --git a/g4f/Provider/Chatxyz.py b/g4f/Provider/not_working/Chatxyz.py index dd1216aa..a1b3638e 100644 --- a/g4f/Provider/Chatxyz.py +++ b/g4f/Provider/not_working/Chatxyz.py @@ -3,8 +3,8 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider class Chatxyz(AsyncGeneratorProvider): url = "https://chat.3211000.xyz" diff --git a/g4f/Provider/Gpt6.py b/g4f/Provider/not_working/Gpt6.py index b8a294e2..0c1bdcc5 100644 --- a/g4f/Provider/Gpt6.py +++ b/g4f/Provider/not_working/Gpt6.py @@ -3,14 +3,12 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt - +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider class Gpt6(AsyncGeneratorProvider): url = "https://gpt6.ai" - working = True + working = False supports_gpt_35_turbo = True @classmethod @@ -45,6 +43,7 @@ class Gpt6(AsyncGeneratorProvider): async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response: response.raise_for_status() async for line in response.content: + print(line) if line.startswith(b"data: [DONE]"): break elif line.startswith(b"data: "): diff --git a/g4f/Provider/GptChatly.py b/g4f/Provider/not_working/GptChatly.py index 9fb739a8..a1e3dd74 100644 --- a/g4f/Provider/GptChatly.py +++ b/g4f/Provider/not_working/GptChatly.py @@ -1,13 +1,13 @@ from __future__ import annotations -from ..requests import Session, get_session_from_browser -from ..typing import Messages -from .base_provider import AsyncProvider +from ...requests import Session, get_session_from_browser +from ...typing import Messages +from ..base_provider import AsyncProvider class GptChatly(AsyncProvider): url = "https://gptchatly.com" - working = True + working = False supports_message_history = True supports_gpt_35_turbo = True diff --git a/g4f/Provider/GptForLove.py b/g4f/Provider/not_working/GptForLove.py index cc82da21..4c578227 100644 --- a/g4f/Provider/GptForLove.py +++ b/g4f/Provider/not_working/GptForLove.py @@ -9,14 +9,14 @@ try: except ImportError: has_requirements = False -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt -from ..errors import MissingRequirementsError +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt +from ...errors import MissingRequirementsError class GptForLove(AsyncGeneratorProvider): url = "https://ai18.gptforlove.com" - working = True + working = False supports_gpt_35_turbo = True @classmethod diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/not_working/GptGo.py index 538bb7b6..363aabea 100644 --- a/g4f/Provider/GptGo.py +++ b/g4f/Provider/not_working/GptGo.py @@ -4,14 +4,14 @@ from aiohttp import ClientSession import json import base64 -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, format_prompt class GptGo(AsyncGeneratorProvider): url = "https://gptgo.ai" + working = False supports_gpt_35_turbo = True - working = True @classmethod async def create_async_generator( @@ -44,6 +44,8 @@ class GptGo(AsyncGeneratorProvider): ) as response: response.raise_for_status() token = await response.text(); + if token == "error token": + raise RuntimeError(f"Response: {token}") token = base64.b64decode(token[10:-20]).decode() async with session.get( @@ -57,6 +59,8 @@ class GptGo(AsyncGeneratorProvider): break if line.startswith(b"data: "): line = json.loads(line[6:]) + if "choices" not in line: + raise RuntimeError(f"Response: {line}") content = line["choices"][0]["delta"].get("content") if content and content != "\n#GPTGO ": yield content diff --git a/g4f/Provider/GptGod.py b/g4f/Provider/not_working/GptGod.py index 08d9269e..46b40645 100644 --- a/g4f/Provider/GptGod.py +++ b/g4f/Provider/not_working/GptGod.py @@ -4,14 +4,14 @@ import secrets import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt class GptGod(AsyncGeneratorProvider): url = "https://gptgod.site" - supports_gpt_35_turbo = True working = False + supports_gpt_35_turbo = True @classmethod async def create_async_generator( diff --git a/g4f/Provider/OnlineGpt.py b/g4f/Provider/not_working/OnlineGpt.py index 9f0d11c4..f4f3a846 100644 --- a/g4f/Provider/OnlineGpt.py +++ b/g4f/Provider/not_working/OnlineGpt.py @@ -3,14 +3,13 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import get_random_string - +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import get_random_string class OnlineGpt(AsyncGeneratorProvider): url = "https://onlinegpt.org" - working = True + working = False supports_gpt_35_turbo = True supports_message_history = False diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py new file mode 100644 index 00000000..4778c968 --- /dev/null +++ b/g4f/Provider/not_working/__init__.py @@ -0,0 +1,14 @@ + +from .AItianhu import AItianhu +from .Bestim import Bestim +from .ChatBase import ChatBase +from .ChatgptDemo import ChatgptDemo +from .ChatgptDemoAi import ChatgptDemoAi +from .ChatgptLogin import ChatgptLogin +from .Chatxyz import Chatxyz +from .Gpt6 import Gpt6 +from .GptChatly import GptChatly +from .GptForLove import GptForLove +from .GptGo import GptGo +from .GptGod import GptGod +from .OnlineGpt import OnlineGpt
\ No newline at end of file |