From a3af9fac3ee152399ba031e2124149fdcf47bc33 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 22 Oct 2023 01:22:25 +0200 Subject: Add FakeGpt Provider Update providers in models --- g4f/Provider/FakeGpt.py | 94 ++++++++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/__init__.py | 3 ++ g4f/models.py | 26 ++++++-------- 3 files changed, 108 insertions(+), 15 deletions(-) create mode 100644 g4f/Provider/FakeGpt.py (limited to 'g4f') diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/FakeGpt.py new file mode 100644 index 00000000..43298a4c --- /dev/null +++ b/g4f/Provider/FakeGpt.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +import uuid, time, random, string, json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider +from .helper import format_prompt + + +class FakeGpt(AsyncGeneratorProvider): + url = "https://chat-shared2.zhile.io" + supports_gpt_35_turbo = True + working = True + _access_token = None + _cookie_jar = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "Accept-Language": "en-US", + "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36", + "Referer": "https://chat-shared2.zhile.io/?v=2", + "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', + "sec-ch-ua-platform": '"Linux"', + "sec-ch-ua-mobile": "?0", + } + async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session: + if not cls._access_token: + async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response: + response.raise_for_status() + list = (await response.json())["loads"] + token_ids = [t["token_id"] for t in list if t["count"] == 0] + data = { + "token_key": random.choice(token_ids), + "session_password": random_string() + } + async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response: + response.raise_for_status() + async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response: + response.raise_for_status() + cls._access_token = (await response.json())["accessToken"] + cls._cookie_jar = session.cookie_jar + headers = { + "Content-Type": "application/json", + "Accept": "text/event-stream", + "X-Authorization": f"Bearer {cls._access_token}", + } + prompt = format_prompt(messages) + data = { + "action": "next", + "messages": [ + { + "id": str(uuid.uuid4()), + "author": {"role": "user"}, + "content": {"content_type": "text", "parts": [prompt]}, + "metadata": {}, + } + ], + "parent_message_id": str(uuid.uuid4()), + "model": "text-davinci-002-render-sha", + "plugin_ids": [], + "timezone_offset_min": -120, + "suggestions": [], + "history_and_training_disabled": True, + "arkose_token": "", + "force_paragen": False, + } + last_message = "" + async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response: + async for line in response.content: + if line.startswith(b"data: "): + line = line[6:] + if line == b"[DONE]": + break + try: + line = json.loads(line) + if line["message"]["metadata"]["message_type"] == "next": + new_message = line["message"]["content"]["parts"][0] + yield new_message[len(last_message):] + last_message = new_message + except: + continue + if not last_message: + raise RuntimeError("No valid response") + +def random_string(length: int = 10): + return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 1dfa6a8d..a465b428 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -17,6 +17,7 @@ from .ChatgptFree import ChatgptFree from .ChatgptLogin import ChatgptLogin from .ChatgptX import ChatgptX from .Cromicle import Cromicle +from .FakeGpt import FakeGpt from .FreeGpt import FreeGpt from .GPTalk import GPTalk from .GptChatly import GptChatly @@ -73,6 +74,7 @@ class ProviderUtils: 'Equing': Equing, 'FastGpt': FastGpt, 'Forefront': Forefront, + 'FakeGpt': FakeGpt, 'FreeGpt': FreeGpt, 'GPTalk': GPTalk, 'GptChatly': GptChatly, @@ -143,6 +145,7 @@ __all__ = [ 'DfeHub', 'EasyChat', 'Forefront', + 'FakeGpt', 'FreeGpt', 'GPTalk', 'GptChatly', diff --git a/g4f/models.py b/g4f/models.py index 0ebe7395..22a04ffb 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -4,19 +4,18 @@ from .typing import Union from .Provider import BaseProvider, RetryProvider from .Provider import ( ChatgptLogin, - ChatgptDemo, ChatgptDuo, GptForLove, - Opchatgpts, ChatgptAi, GptChatly, Liaobots, ChatgptX, + ChatBase, Yqcloud, GeekGpt, + FakeGpt, Myshell, FreeGpt, - Cromicle, NoowAi, Vercel, Aichat, @@ -30,9 +29,6 @@ from .Provider import ( Bing, You, H2o, - - ChatForAi, - ChatBase ) @dataclass(unsafe_hash=True) @@ -50,9 +46,8 @@ default = Model( base_provider = "", best_provider = RetryProvider([ Bing, # Not fully GPT 3 or 4 - Yqcloud, # Answers short questions in chinese - ChatgptDuo, # Include search results - Aibn, Aichat, ChatgptAi, ChatgptLogin, FreeGpt, GptGo, Myshell, Ylokh, GeekGpt + AiAsk, Aichat, ChatgptAi, FreeGpt, GptGo, GeekGpt, + Phind, You ]) ) @@ -61,9 +56,10 @@ gpt_35_long = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider = RetryProvider([ - AiAsk, Aichat, ChatgptDemo, FreeGpt, Liaobots, You, - GPTalk, ChatgptLogin, GptChatly, GptForLove, Opchatgpts, - NoowAi, GeekGpt, Phind + AiAsk, Aichat, FreeGpt, You, + GptChatly, GptForLove, + NoowAi, GeekGpt, Phind, + FakeGpt ]) ) @@ -72,8 +68,8 @@ gpt_35_turbo = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider=RetryProvider([ - ChatgptX, ChatgptDemo, GptGo, You, - NoowAi, GPTalk, GptForLove, Phind, ChatBase, Cromicle + ChatgptX, GptGo, You, + NoowAi, GPTalk, GptForLove, Phind, ChatBase ]) ) @@ -81,7 +77,7 @@ gpt_4 = Model( name = 'gpt-4', base_provider = 'openai', best_provider = RetryProvider([ - Bing, GeekGpt, Liaobots, Phind + Bing, GeekGpt, Phind ]) ) -- cgit v1.2.3 From 13e89d6ab9e3016741fdcbd03cc4996faec54f9a Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 22 Oct 2023 08:57:31 +0200 Subject: Fix MyShell Provider --- g4f/Provider/FakeGpt.py | 4 +- g4f/Provider/Geekgpt.py | 13 +-- g4f/Provider/Liaobots.py | 2 +- g4f/Provider/MyShell.py | 89 +++++++++++++++ g4f/Provider/Myshell.py | 219 ------------------------------------ g4f/Provider/Vercel.py | 8 +- g4f/Provider/Yqcloud.py | 11 +- g4f/Provider/__init__.py | 4 +- g4f/Provider/deprecated/Myshell.py | 219 ++++++++++++++++++++++++++++++++++++ g4f/Provider/deprecated/__init__.py | 3 +- g4f/models.py | 7 -- 11 files changed, 332 insertions(+), 247 deletions(-) create mode 100644 g4f/Provider/MyShell.py delete mode 100644 g4f/Provider/Myshell.py create mode 100644 g4f/Provider/deprecated/Myshell.py (limited to 'g4f') diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/FakeGpt.py index 43298a4c..5bce1280 100644 --- a/g4f/Provider/FakeGpt.py +++ b/g4f/Provider/FakeGpt.py @@ -83,8 +83,8 @@ class FakeGpt(AsyncGeneratorProvider): line = json.loads(line) if line["message"]["metadata"]["message_type"] == "next": new_message = line["message"]["content"]["parts"][0] - yield new_message[len(last_message):] - last_message = new_message + yield new_message[len(last_message):] + last_message = new_message except: continue if not last_message: diff --git a/g4f/Provider/Geekgpt.py b/g4f/Provider/Geekgpt.py index 1a82757c..3bbc0e75 100644 --- a/g4f/Provider/Geekgpt.py +++ b/g4f/Provider/Geekgpt.py @@ -21,12 +21,12 @@ class GeekGpt(BaseProvider): json_data = { 'messages': messages, - 'model': model, - 'temperature': kwargs.get('temperature', 0.9), - 'presence_penalty': kwargs.get('presence_penalty', 0), - 'top_p': kwargs.get('top_p', 1), - 'frequency_penalty': kwargs.get('frequency_penalty', 0), - 'stream': True + 'model': model, + 'temperature': kwargs.get('temperature', 0.9), + 'presence_penalty': kwargs.get('presence_penalty', 0), + 'top_p': kwargs.get('top_p', 1), + 'frequency_penalty': kwargs.get('frequency_penalty', 0), + 'stream': True } data = dumps(json_data, separators=(',', ':')) @@ -61,7 +61,6 @@ class GeekGpt(BaseProvider): try: content = json.loads(json_data)["choices"][0]["delta"].get("content") - except Exception as e: raise RuntimeError(f'error | {e} :', json_data) diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 72731728..740be856 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -30,7 +30,7 @@ models = { class Liaobots(AsyncGeneratorProvider): url = "https://liaobots.site" - working = False + working = True supports_gpt_35_turbo = True supports_gpt_4 = True _auth_code = None diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py new file mode 100644 index 00000000..fefd08f4 --- /dev/null +++ b/g4f/Provider/MyShell.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import time, random, json + +from ..requests import StreamSession +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider +from .helper import format_prompt + +class MyShell(AsyncGeneratorProvider): + url = "https://api.myshell.ai/v1/bot/chat/send_message" + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + timeout: int = 120, + **kwargs + ) -> AsyncResult: + user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" + headers = { + "User-Agent": user_agent, + "Myshell-Service-Name": "organics-api", + "Visitor-Id": generate_visitor_id(user_agent) + } + async with StreamSession( + impersonate="chrome107", + proxies={"https": proxy}, + timeout=timeout, + headers=headers + ) as session: + prompt = format_prompt(messages) + data = { + "botId": "1", + "conversation_scenario": 3, + "message": prompt, + "messageType": 1 + } + async with session.post(cls.url, json=data) as response: + response.raise_for_status() + event = None + async for line in response.iter_lines(): + if line.startswith(b"event: "): + event = line[7:] + elif event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT": + if line.startswith(b"data: "): + yield json.loads(line[6:])["content"] + if event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT_STREAM_PUSH_FINISHED": + break + + +def xor_hash(B: str): + r = [] + i = 0 + + def o(e, t): + o_val = 0 + for i in range(len(t)): + o_val |= r[i] << (8 * i) + return e ^ o_val + + for e in range(len(B)): + t = ord(B[e]) + r.insert(0, 255 & t) + + if len(r) >= 4: + i = o(i, r) + r = [] + + if len(r) > 0: + i = o(i, r) + + return hex(i)[2:] + +def performance() -> str: + t = int(time.time() * 1000) + e = 0 + while t == int(time.time() * 1000): + e += 1 + return hex(t)[2:] + hex(e)[2:] + +def generate_visitor_id(user_agent: str) -> str: + f = performance() + r = hex(int(random.random() * (16**16)))[2:-2] + d = xor_hash(user_agent) + e = hex(1080 * 1920)[2:] + return f"{f}-{r}-{d}-{e}-{f}" \ No newline at end of file diff --git a/g4f/Provider/Myshell.py b/g4f/Provider/Myshell.py deleted file mode 100644 index 096545f9..00000000 --- a/g4f/Provider/Myshell.py +++ /dev/null @@ -1,219 +0,0 @@ -# not using WS anymore - -from __future__ import annotations - -import json, uuid, hashlib, time, random - -from aiohttp import ClientSession -from aiohttp.http import WSMsgType -import asyncio - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, format_prompt - - -models = { - "samantha": "1e3be7fe89e94a809408b1154a2ee3e1", - "gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd", - "gpt-4": "01c8de4fbfc548df903712b0922a4e01", -} - - -class Myshell(AsyncGeneratorProvider): - url = "https://app.myshell.ai/chat" - working = False - supports_gpt_35_turbo = True - supports_gpt_4 = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - timeout: int = 90, - **kwargs - ) -> AsyncResult: - if not model: - bot_id = models["samantha"] - elif model in models: - bot_id = models[model] - else: - raise ValueError(f"Model are not supported: {model}") - - user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36' - visitor_id = generate_visitor_id(user_agent) - - async with ClientSession( - headers={'User-Agent': user_agent} - ) as session: - async with session.ws_connect( - "wss://api.myshell.ai/ws/?EIO=4&transport=websocket", - autoping=False, - timeout=timeout, - proxy=proxy - ) as wss: - # Send and receive hello message - await wss.receive_str() - message = json.dumps({"token": None, "visitorId": visitor_id}) - await wss.send_str(f"40/chat,{message}") - await wss.receive_str() - - # Fix "need_verify_captcha" issue - await asyncio.sleep(5) - - # Create chat message - text = format_prompt(messages) - chat_data = json.dumps(["text_chat",{ - "reqId": str(uuid.uuid4()), - "botUid": bot_id, - "sourceFrom": "myshellWebsite", - "text": text, - **generate_signature(text) - }]) - - # Send chat message - chat_start = "42/chat," - chat_message = f"{chat_start}{chat_data}" - await wss.send_str(chat_message) - - # Receive messages - async for message in wss: - if message.type != WSMsgType.TEXT: - continue - # Ping back - if message.data == "2": - await wss.send_str("3") - continue - # Is not chat message - if not message.data.startswith(chat_start): - continue - data_type, data = json.loads(message.data[len(chat_start):]) - if data_type == "text_stream": - if data["data"]["text"]: - yield data["data"]["text"] - elif data["data"]["isFinal"]: - break - elif data_type in ("message_replied", "need_verify_captcha"): - raise RuntimeError(f"Received unexpected message: {data_type}") - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - -def generate_timestamp() -> str: - return str( - int( - str(int(time.time() * 1000))[:-1] - + str( - sum( - 2 * int(digit) - if idx % 2 == 0 - else 3 * int(digit) - for idx, digit in enumerate(str(int(time.time() * 1000))[:-1]) - ) - % 10 - ) - ) - ) - -def generate_signature(text: str): - timestamp = generate_timestamp() - version = 'v1.0.0' - secret = '8@VXGK3kKHr!u2gA' - data = f"{version}#{text}#{timestamp}#{secret}" - signature = hashlib.md5(data.encode()).hexdigest() - signature = signature[::-1] - return { - "signature": signature, - "timestamp": timestamp, - "version": version - } - -def xor_hash(B: str): - r = [] - i = 0 - - def o(e, t): - o_val = 0 - for i in range(len(t)): - o_val |= r[i] << (8 * i) - return e ^ o_val - - for e in range(len(B)): - t = ord(B[e]) - r.insert(0, 255 & t) - - if len(r) >= 4: - i = o(i, r) - r = [] - - if len(r) > 0: - i = o(i, r) - - return hex(i)[2:] - -def performance() -> str: - t = int(time.time() * 1000) - e = 0 - while t == int(time.time() * 1000): - e += 1 - return hex(t)[2:] + hex(e)[2:] - -def generate_visitor_id(user_agent: str) -> str: - f = performance() - r = hex(int(random.random() * (16**16)))[2:-2] - d = xor_hash(user_agent) - e = hex(1080 * 1920)[2:] - return f"{f}-{r}-{d}-{e}-{f}" - - - -# update -# from g4f.requests import StreamSession - -# async def main(): -# headers = { -# 'authority': 'api.myshell.ai', -# 'accept': 'application/json', -# 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', -# 'content-type': 'application/json', -# 'myshell-service-name': 'organics-api', -# 'origin': 'https://app.myshell.ai', -# 'referer': 'https://app.myshell.ai/', -# 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', -# 'sec-ch-ua-mobile': '?0', -# 'sec-ch-ua-platform': '"macOS"', -# 'sec-fetch-dest': 'empty', -# 'sec-fetch-mode': 'cors', -# 'sec-fetch-site': 'same-site', -# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', -# 'visitor-id': '18ae8fe5d916d3-0213f29594b17f-18525634-157188-18ae8fe5d916d3', -# } - -# json_data = { -# 'conversation_scenario': 3, -# 'botId': '4738', -# 'message': 'hi', -# 'messageType': 1, -# } - -# async with StreamSession(headers=headers, impersonate="chrome110") as session: -# async with session.post(f'https://api.myshell.ai/v1/bot/chat/send_message', -# json=json_data) as response: - -# response.raise_for_status() -# async for chunk in response.iter_content(): -# print(chunk.decode("utf-8")) - -# import asyncio -# asyncio.run(main()) \ No newline at end of file diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py index 10130320..7c3b8c55 100644 --- a/g4f/Provider/Vercel.py +++ b/g4f/Provider/Vercel.py @@ -4,7 +4,6 @@ import json, base64, requests, execjs, random, uuid from ..typing import Messages, TypedDict, CreateResult, Any from .base_provider import BaseProvider -from abc import abstractmethod from ..debug import logging @@ -15,12 +14,13 @@ class Vercel(BaseProvider): supports_stream = True @staticmethod - @abstractmethod def create_completion( model: str, messages: Messages, stream: bool, - proxy: str = None, **kwargs) -> CreateResult: + proxy: str = None, + **kwargs + ) -> CreateResult: if not model: model = "gpt-3.5-turbo" @@ -65,7 +65,7 @@ class Vercel(BaseProvider): headers=headers, json=json_data, stream=True, proxies={"https": proxy}) try: response.raise_for_status() - except Exception: + except: continue for token in response.iter_content(chunk_size=None): yield token.decode() diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py index d6ce21a9..b567f3a0 100644 --- a/g4f/Provider/Yqcloud.py +++ b/g4f/Provider/Yqcloud.py @@ -1,7 +1,7 @@ from __future__ import annotations import random -from aiohttp import ClientSession +from ..requests import StreamSession from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, format_prompt @@ -19,13 +19,13 @@ class Yqcloud(AsyncGeneratorProvider): proxy: str = None, **kwargs, ) -> AsyncResult: - async with ClientSession( - headers=_create_header() + async with StreamSession( + headers=_create_header(), proxies={"https": proxy} ) as session: payload = _create_payload(messages, **kwargs) - async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response: + async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response: response.raise_for_status() - async for chunk in response.content.iter_any(): + async for chunk in response.iter_content(): if chunk: chunk = chunk.decode() if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk: @@ -38,6 +38,7 @@ def _create_header(): "accept" : "application/json, text/plain, */*", "content-type" : "application/json", "origin" : "https://chat9.yqcloud.top", + "referer" : "https://chat9.yqcloud.top/" } diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index a465b428..97351120 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -26,7 +26,7 @@ from .GptGo import GptGo from .GptGod import GptGod from .Liaobots import Liaobots from .Llama2 import Llama2 -from .Myshell import Myshell +from .MyShell import MyShell from .NoowAi import NoowAi from .Opchatgpts import Opchatgpts from .Phind import Phind @@ -90,6 +90,7 @@ class ProviderUtils: 'Lockchat': Lockchat, 'MikuChat': MikuChat, 'Myshell': Myshell, + 'MyShell': MyShell, 'NoowAi': NoowAi, 'Opchatgpts': Opchatgpts, 'OpenAssistant': OpenAssistant, @@ -159,6 +160,7 @@ __all__ = [ 'Llama2', 'Lockchat', 'Myshell', + 'MyShell', 'NoowAi', 'Opchatgpts', 'Raycast', diff --git a/g4f/Provider/deprecated/Myshell.py b/g4f/Provider/deprecated/Myshell.py new file mode 100644 index 00000000..b1aa2b2d --- /dev/null +++ b/g4f/Provider/deprecated/Myshell.py @@ -0,0 +1,219 @@ +# not using WS anymore + +from __future__ import annotations + +import json, uuid, hashlib, time, random + +from aiohttp import ClientSession +from aiohttp.http import WSMsgType +import asyncio + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, format_prompt + + +models = { + "samantha": "1e3be7fe89e94a809408b1154a2ee3e1", + "gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd", + "gpt-4": "01c8de4fbfc548df903712b0922a4e01", +} + + +class Myshell(AsyncGeneratorProvider): + url = "https://app.myshell.ai/chat" + working = False + supports_gpt_35_turbo = True + supports_gpt_4 = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + timeout: int = 90, + **kwargs + ) -> AsyncResult: + if not model: + bot_id = models["samantha"] + elif model in models: + bot_id = models[model] + else: + raise ValueError(f"Model are not supported: {model}") + + user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36' + visitor_id = generate_visitor_id(user_agent) + + async with ClientSession( + headers={'User-Agent': user_agent} + ) as session: + async with session.ws_connect( + "wss://api.myshell.ai/ws/?EIO=4&transport=websocket", + autoping=False, + timeout=timeout, + proxy=proxy + ) as wss: + # Send and receive hello message + await wss.receive_str() + message = json.dumps({"token": None, "visitorId": visitor_id}) + await wss.send_str(f"40/chat,{message}") + await wss.receive_str() + + # Fix "need_verify_captcha" issue + await asyncio.sleep(5) + + # Create chat message + text = format_prompt(messages) + chat_data = json.dumps(["text_chat",{ + "reqId": str(uuid.uuid4()), + "botUid": bot_id, + "sourceFrom": "myshellWebsite", + "text": text, + **generate_signature(text) + }]) + + # Send chat message + chat_start = "42/chat," + chat_message = f"{chat_start}{chat_data}" + await wss.send_str(chat_message) + + # Receive messages + async for message in wss: + if message.type != WSMsgType.TEXT: + continue + # Ping back + if message.data == "2": + await wss.send_str("3") + continue + # Is not chat message + if not message.data.startswith(chat_start): + continue + data_type, data = json.loads(message.data[len(chat_start):]) + if data_type == "text_stream": + if data["data"]["text"]: + yield data["data"]["text"] + elif data["data"]["isFinal"]: + break + elif data_type in ("message_replied", "need_verify_captcha"): + raise RuntimeError(f"Received unexpected message: {data_type}") + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + + +def generate_timestamp() -> str: + return str( + int( + str(int(time.time() * 1000))[:-1] + + str( + sum( + 2 * int(digit) + if idx % 2 == 0 + else 3 * int(digit) + for idx, digit in enumerate(str(int(time.time() * 1000))[:-1]) + ) + % 10 + ) + ) + ) + +def generate_signature(text: str): + timestamp = generate_timestamp() + version = 'v1.0.0' + secret = '8@VXGK3kKHr!u2gA' + data = f"{version}#{text}#{timestamp}#{secret}" + signature = hashlib.md5(data.encode()).hexdigest() + signature = signature[::-1] + return { + "signature": signature, + "timestamp": timestamp, + "version": version + } + +def xor_hash(B: str): + r = [] + i = 0 + + def o(e, t): + o_val = 0 + for i in range(len(t)): + o_val |= r[i] << (8 * i) + return e ^ o_val + + for e in range(len(B)): + t = ord(B[e]) + r.insert(0, 255 & t) + + if len(r) >= 4: + i = o(i, r) + r = [] + + if len(r) > 0: + i = o(i, r) + + return hex(i)[2:] + +def performance() -> str: + t = int(time.time() * 1000) + e = 0 + while t == int(time.time() * 1000): + e += 1 + return hex(t)[2:] + hex(e)[2:] + +def generate_visitor_id(user_agent: str) -> str: + f = performance() + r = hex(int(random.random() * (16**16)))[2:-2] + d = xor_hash(user_agent) + e = hex(1080 * 1920)[2:] + return f"{f}-{r}-{d}-{e}-{f}" + + + +# update +# from g4f.requests import StreamSession + +# async def main(): +# headers = { +# 'authority': 'api.myshell.ai', +# 'accept': 'application/json', +# 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', +# 'content-type': 'application/json', +# 'myshell-service-name': 'organics-api', +# 'origin': 'https://app.myshell.ai', +# 'referer': 'https://app.myshell.ai/', +# 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', +# 'sec-ch-ua-mobile': '?0', +# 'sec-ch-ua-platform': '"macOS"', +# 'sec-fetch-dest': 'empty', +# 'sec-fetch-mode': 'cors', +# 'sec-fetch-site': 'same-site', +# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', +# 'visitor-id': '18ae8fe5d916d3-0213f29594b17f-18525634-157188-18ae8fe5d916d3', +# } + +# json_data = { +# 'conversation_scenario': 3, +# 'botId': '4738', +# 'message': 'hi', +# 'messageType': 1, +# } + +# async with StreamSession(headers=headers, impersonate="chrome110") as session: +# async with session.post(f'https://api.myshell.ai/v1/bot/chat/send_message', +# json=json_data) as response: + +# response.raise_for_status() +# async for chunk in response.iter_content(): +# print(chunk.decode("utf-8")) + +# import asyncio +# asyncio.run(main()) \ No newline at end of file diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index db48c3fb..f8e35b37 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -12,4 +12,5 @@ from .V50 import V50 from .FastGpt import FastGpt from .Aivvm import Aivvm from .Vitalentum import Vitalentum -from .H2o import H2o \ No newline at end of file +from .H2o import H2o +from .Myshell import Myshell \ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index 22a04ffb..7eee917a 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -3,18 +3,13 @@ from dataclasses import dataclass from .typing import Union from .Provider import BaseProvider, RetryProvider from .Provider import ( - ChatgptLogin, - ChatgptDuo, GptForLove, ChatgptAi, GptChatly, - Liaobots, ChatgptX, ChatBase, - Yqcloud, GeekGpt, FakeGpt, - Myshell, FreeGpt, NoowAi, Vercel, @@ -23,9 +18,7 @@ from .Provider import ( AiAsk, GptGo, Phind, - Ylokh, Bard, - Aibn, Bing, You, H2o, -- cgit v1.2.3 From 4225a39a4987e28ad41df02ee67e468def7a7061 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 22 Oct 2023 09:04:14 +0200 Subject: Enable Liaobots and ChatForAi again --- g4f/Provider/ChatForAi.py | 2 +- g4f/Provider/MyShell.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py index 0ccc8444..718affeb 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/ChatForAi.py @@ -10,7 +10,7 @@ from .base_provider import AsyncGeneratorProvider class ChatForAi(AsyncGeneratorProvider): url = "https://chatforai.store" - working = False + working = True supports_gpt_35_turbo = True @classmethod diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py index fefd08f4..fe8604bb 100644 --- a/g4f/Provider/MyShell.py +++ b/g4f/Provider/MyShell.py @@ -8,7 +8,7 @@ from .base_provider import AsyncGeneratorProvider from .helper import format_prompt class MyShell(AsyncGeneratorProvider): - url = "https://api.myshell.ai/v1/bot/chat/send_message" + url = "https://app.myshell.ai/chat" @classmethod async def create_async_generator( @@ -38,7 +38,7 @@ class MyShell(AsyncGeneratorProvider): "message": prompt, "messageType": 1 } - async with session.post(cls.url, json=data) as response: + async with session.post("https://api.myshell.ai/v1/bot/chat/send_message", json=data) as response: response.raise_for_status() event = None async for line in response.iter_lines(): -- cgit v1.2.3 From 63cda8d779f9aaccbdac7cea39f496eca44a96ad Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 22 Oct 2023 14:22:33 +0200 Subject: Fix increase timeout Add Hashnode Provider Fix Yqcloud Provider --- g4f/Provider/Hashnode.py | 79 ++++++++++++++++++++++++++++++++++++++ g4f/Provider/Phind.py | 4 +- g4f/Provider/Yqcloud.py | 5 ++- g4f/Provider/__init__.py | 3 ++ g4f/Provider/deprecated/Myshell.py | 44 +-------------------- g4f/Provider/retry_provider.py | 13 +++---- 6 files changed, 94 insertions(+), 54 deletions(-) create mode 100644 g4f/Provider/Hashnode.py (limited to 'g4f') diff --git a/g4f/Provider/Hashnode.py b/g4f/Provider/Hashnode.py new file mode 100644 index 00000000..7f308d7e --- /dev/null +++ b/g4f/Provider/Hashnode.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import secrets +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider + +class SearchTypes(): + quick = "quick" + code = "code" + websearch = "websearch" + +class Hashnode(AsyncGeneratorProvider): + url = "https://hashnode.com" + supports_gpt_35_turbo = True + working = True + _sources = [] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + search_type: str = SearchTypes.websearch, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/rix", + "Content-Type": "application/json", + "Origin": cls.url, + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Pragma": "no-cache", + "Cache-Control": "no-cache", + "TE": "trailers", + } + async with ClientSession(headers=headers) as session: + prompt = messages[-1]["content"] + cls._sources = [] + if search_type == "websearch": + async with session.post( + f"{cls.url}/api/ai/rix/search", + json={"prompt": prompt}, + proxy=proxy, + ) as response: + response.raise_for_status() + cls._sources = (await response.json())["result"] + data = { + "chatId": secrets.token_hex(16).zfill(32), + "history": messages, + "prompt": prompt, + "searchType": search_type, + "urlToScan": None, + "searchResults": cls._sources, + } + async with session.post( + f"{cls.url}/api/ai/rix/completion", + json=data, + proxy=proxy, + ) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + if chunk: + yield chunk.decode() + + @classmethod + def get_sources(cls) -> list: + return [{ + "title": source["name"], + "url": source["url"] + } for source in cls._sources] \ No newline at end of file diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py index d7c6f7c7..0e698cba 100644 --- a/g4f/Provider/Phind.py +++ b/g4f/Provider/Phind.py @@ -1,6 +1,6 @@ from __future__ import annotations -import random +import random, string from datetime import datetime from ..typing import AsyncResult, Messages @@ -22,7 +22,7 @@ class Phind(AsyncGeneratorProvider): timeout: int = 120, **kwargs ) -> AsyncResult: - chars = 'abcdefghijklmnopqrstuvwxyz0123456789' + chars = string.ascii_lowercase + string.digits user_id = ''.join(random.choice(chars) for _ in range(24)) data = { "question": format_prompt(messages), diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py index b567f3a0..2829c5bf 100644 --- a/g4f/Provider/Yqcloud.py +++ b/g4f/Provider/Yqcloud.py @@ -9,7 +9,7 @@ from .base_provider import AsyncGeneratorProvider, format_prompt class Yqcloud(AsyncGeneratorProvider): url = "https://chat9.yqcloud.top/" - working = False + working = True supports_gpt_35_turbo = True @staticmethod @@ -17,10 +17,11 @@ class Yqcloud(AsyncGeneratorProvider): model: str, messages: Messages, proxy: str = None, + timeout: int = 120, **kwargs, ) -> AsyncResult: async with StreamSession( - headers=_create_header(), proxies={"https": proxy} + headers=_create_header(), proxies={"https": proxy}, timeout=timeout ) as session: payload = _create_payload(messages, **kwargs) async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response: diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 97351120..653b6026 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -24,6 +24,7 @@ from .GptChatly import GptChatly from .GptForLove import GptForLove from .GptGo import GptGo from .GptGod import GptGod +from .Hashnode import Hashnode from .Liaobots import Liaobots from .Llama2 import Llama2 from .MyShell import MyShell @@ -82,6 +83,7 @@ class ProviderUtils: 'GptForLove': GptForLove, 'GptGo': GptGo, 'GptGod': GptGod, + 'Hashnode': Hashnode, 'H2o': H2o, 'HuggingChat': HuggingChat, 'Komo': Komo, @@ -154,6 +156,7 @@ __all__ = [ 'GetGpt', 'GptGo', 'GptGod', + 'Hashnode', 'H2o', 'HuggingChat', 'Liaobots', diff --git a/g4f/Provider/deprecated/Myshell.py b/g4f/Provider/deprecated/Myshell.py index b1aa2b2d..85731325 100644 --- a/g4f/Provider/deprecated/Myshell.py +++ b/g4f/Provider/deprecated/Myshell.py @@ -174,46 +174,4 @@ def generate_visitor_id(user_agent: str) -> str: r = hex(int(random.random() * (16**16)))[2:-2] d = xor_hash(user_agent) e = hex(1080 * 1920)[2:] - return f"{f}-{r}-{d}-{e}-{f}" - - - -# update -# from g4f.requests import StreamSession - -# async def main(): -# headers = { -# 'authority': 'api.myshell.ai', -# 'accept': 'application/json', -# 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', -# 'content-type': 'application/json', -# 'myshell-service-name': 'organics-api', -# 'origin': 'https://app.myshell.ai', -# 'referer': 'https://app.myshell.ai/', -# 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', -# 'sec-ch-ua-mobile': '?0', -# 'sec-ch-ua-platform': '"macOS"', -# 'sec-fetch-dest': 'empty', -# 'sec-fetch-mode': 'cors', -# 'sec-fetch-site': 'same-site', -# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', -# 'visitor-id': '18ae8fe5d916d3-0213f29594b17f-18525634-157188-18ae8fe5d916d3', -# } - -# json_data = { -# 'conversation_scenario': 3, -# 'botId': '4738', -# 'message': 'hi', -# 'messageType': 1, -# } - -# async with StreamSession(headers=headers, impersonate="chrome110") as session: -# async with session.post(f'https://api.myshell.ai/v1/bot/chat/send_message', -# json=json_data) as response: - -# response.raise_for_status() -# async for chunk in response.iter_content(): -# print(chunk.decode("utf-8")) - -# import asyncio -# asyncio.run(main()) \ No newline at end of file + return f"{f}-{r}-{d}-{e}-{f}" \ No newline at end of file diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py index ee342315..39d61c35 100644 --- a/g4f/Provider/retry_provider.py +++ b/g4f/Provider/retry_provider.py @@ -71,11 +71,10 @@ class RetryProvider(AsyncProvider): self.exceptions: Dict[str, Exception] = {} for provider in providers: try: - return await asyncio.wait_for(provider.create_async(model, messages, **kwargs), timeout=60) - except asyncio.TimeoutError as e: - self.exceptions[provider.__name__] = e - if self.logging: - print(f"{provider.__name__}: TimeoutError: {e}") + return await asyncio.wait_for( + provider.create_async(model, messages, **kwargs), + timeout=kwargs.get("timeout", 60) + ) except Exception as e: self.exceptions[provider.__name__] = e if self.logging: @@ -85,8 +84,8 @@ class RetryProvider(AsyncProvider): def raise_exceptions(self) -> None: if self.exceptions: - raise RuntimeError("\n".join(["All providers failed:"] + [ + raise RuntimeError("\n".join(["RetryProvider failed:"] + [ f"{p}: {self.exceptions[p].__class__.__name__}: {self.exceptions[p]}" for p in self.exceptions ])) - raise RuntimeError("No provider found") \ No newline at end of file + raise RuntimeError("RetryProvider: No provider found") \ No newline at end of file -- cgit v1.2.3 From 78f93bb737bc72c7a47dabe71adbc5b0349c8072 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 22 Oct 2023 15:15:43 +0200 Subject: Add rate limit error messages --- g4f/Provider/FreeGpt.py | 5 ++++- g4f/Provider/NoowAi.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'g4f') diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 6638b67f..a6d31644 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -41,7 +41,10 @@ class FreeGpt(AsyncGeneratorProvider): async with session.post(f"{url}/api/generate", json=data) as response: response.raise_for_status() async for chunk in response.iter_content(): - yield chunk.decode() + chunk = chunk.decode() + if chunk == "当前地区当日额度已消耗完": + raise RuntimeError("Rate limit reached") + yield chunk @classmethod @property diff --git a/g4f/Provider/NoowAi.py b/g4f/Provider/NoowAi.py index 93748258..9dc26d35 100644 --- a/g4f/Provider/NoowAi.py +++ b/g4f/Provider/NoowAi.py @@ -61,6 +61,8 @@ class NoowAi(AsyncGeneratorProvider): yield line["data"] elif line["type"] == "end": break + elif line["type"] == "error": + raise RuntimeError(line["data"]) def random_string(length: int = 10): return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) \ No newline at end of file -- cgit v1.2.3 From c400d02024e4b3a85bcdc530b4dcb7a1e59fdac4 Mon Sep 17 00:00:00 2001 From: Luneye <73485421+Luneye@users.noreply.github.com> Date: Sun, 22 Oct 2023 15:59:56 +0200 Subject: Major Update for Bing - Supports latest bundle version and image analysis Here it is, a much-needed update to this service which offers numerous functionalities that the old code was unable to deliver to us. As you may know, ChatGPT Plus subscribers now have the opportunity to request image analysis directly from GPT within the chat bar. Bing has also integrated this feature into its chatbot. With this new code, you can now provide an image using a data URI, with all the following supported extensions: jpg, jpeg, png, and gif! **What is a data URI and how can I provide an image to Bing?** Just to clarify, a data URI is a method for encoding data directly into a URI (Uniform Resource Identifier). It is typically used for embedding small data objects like images, text, or other resources within web pages or documents. Data URIs are widely used in web applications. To provide an image from your desktop and retrieve it as a data URI, you can use this code: [GitHub link](https://gist.github.com/jsocol/1089733). Now, here is a code snippet you can use to provide images to Bing: ```python import g4f provider = g4f.Provider.Bing user_message = [{"role": "user", "content": "Hi, describe this image."}] response = g4f.ChatCompletion.create( model = g4f.models.gpt_4, provider = g4f.provider, # Corrected the provider value messages = user_message, stream = True, image = "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4RiSRXhpZgAASUkqAAg..." # Insert your full data URI image here ) for message in response: print(message, flush=True, end='') ``` If you don't want to analyze the image, just do not specify the image parameter. Regarding the implementation, the image is preprocessed within the Bing.py code, which can be resource-intensive for a server-side implementation. When using the Bing chatbot in your web browser, the image is preprocessed on your computer before being sent to the server. This preprocessing includes tasks like image rotation and compression. Although this implementation works, it would be more efficient to delegate image preprocessing to the client as it happens in reality. I will try to provide a JavaScript code for that at a later time. As you saw, I did mention in the title that it is in Beta. The way the code is written, Bing can sometimes mess up its answers. Indeed, Bing does not really stream its responses as the other providers do. Bing sends its answers like this on each iteration: "Hi," "Hi, this," "Hi, this is," "Hi, this is Bing." Instead of sending each segment one at a time, it already adds them on each iteration. So, to simulate a normal streaming response, other contributors made the code wait for the next iteration to retrieve the newer segments and yield them. However, this method ignores something that Bing does. Bing processes its responses in a markdown detector, which searches for links while the AI answers. If it finds a link, it saves it and waits until the AI finishes its answer to put all the found links at the very end of the answer. So if the AI is writing a link, but then on the next iteration, it finishes writing this link, it will then be deleted from the answer and appear later at the very end. Example: "Here is your link reference [" "Here is your link reference [^" "Here is your link reference [^1" "Here is your link reference [^1^" And then the response would get stuck there because the markdown detector would have deleted this link reference in the next response and waited until the AI is finished to put it at the very end. For this reason, I am working on an update to anticipate the markdown detector. So please, if you guys notice any bugs with this new implementation, I would greatly appreciate it if you could report them on the issue tab of this repo. Thanks in advance, and I hope that all these explanations were clear to you! --- g4f/Provider/Bing.py | 264 +++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 222 insertions(+), 42 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index f1b50f7c..91e9dc2c 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -1,10 +1,16 @@ from __future__ import annotations +import string import random import json import os +import re +import io +import base64 +import numpy as np import uuid import urllib.parse +from PIL import Image from aiohttp import ClientSession, ClientTimeout from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider @@ -35,6 +41,7 @@ class Bing(AsyncGeneratorProvider): proxy: str = None, cookies: dict = None, tone: str = Tones.creative, + image: str = None, **kwargs ) -> AsyncResult: if len(messages) < 2: @@ -46,7 +53,7 @@ class Bing(AsyncGeneratorProvider): if not cookies or "SRCHD" not in cookies: cookies = default_cookies - return stream_generate(prompt, tone, context, proxy, cookies) + return stream_generate(prompt, tone, image, context, proxy, cookies) def create_context(messages: Messages): context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages) @@ -54,14 +61,14 @@ def create_context(messages: Messages): return context class Conversation(): - def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None: + def __init__(self, conversationId: str, clientId: str, conversationSignature: str, imageInfo: dict=None) -> None: self.conversationId = conversationId self.clientId = clientId self.conversationSignature = conversationSignature + self.imageInfo = imageInfo -async def create_conversation(session: ClientSession, proxy: str = None) -> Conversation: - url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3' - +async def create_conversation(session: ClientSession, tone: str, image: str = None, proxy: str = None) -> Conversation: + url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1199.4' async with await session.get(url, proxy=proxy) as response: data = await response.json() @@ -71,8 +78,65 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv if not conversationId or not clientId or not conversationSignature: raise Exception('Failed to create conversation.') - - return Conversation(conversationId, clientId, conversationSignature) + conversation = Conversation(conversationId, clientId, conversationSignature, None) + if isinstance(image,str): + try: + config = { + "visualSearch": { + "maxImagePixels": 360000, + "imageCompressionRate": 0.7, + "enableFaceBlurDebug": 0, + } + } + is_data_uri_an_image(image) + img_binary_data = extract_data_uri(image) + is_accepted_format(img_binary_data) + img = Image.open(io.BytesIO(img_binary_data)) + width, height = img.size + max_image_pixels = config['visualSearch']['maxImagePixels'] + compression_rate = config['visualSearch']['imageCompressionRate'] + + if max_image_pixels / (width * height) < 1: + new_width = int(width * np.sqrt(max_image_pixels / (width * height))) + new_height = int(height * np.sqrt(max_image_pixels / (width * height))) + else: + new_width = width + new_height = height + try: + orientation = get_orientation(img) + except Exception: + orientation = None + new_img = process_image(orientation, img, new_width, new_height) + new_img_binary_data = compress_image_to_base64(new_img, compression_rate) + data, boundary = build_image_upload_api_payload(new_img_binary_data, conversation, tone) + headers = session.headers.copy() + headers["content-type"] = 'multipart/form-data; boundary=' + boundary + headers["referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx' + headers["origin"] = 'https://www.bing.com' + async with await session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as image_upload_response: + if image_upload_response.status == 200: + image_info = await image_upload_response.json() + result = {} + if image_info.get('blobId'): + result['bcid'] = image_info.get('blobId', "") + result['blurredBcid'] = image_info.get('processedBlobId', "") + if result['blurredBcid'] != "": + result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid'] + elif result['bcid'] != "": + result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['bcid'] + if config['visualSearch']["enableFaceBlurDebug"]: + result['originalImageUrl'] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid'] + else: + result['originalImageUrl'] = "https://www.bing.com/images/blob?bcid=" + result['bcid'] + conversation.imageInfo = result + else: + raise Exception("Failed to parse image info.") + else: + raise Exception("Failed to upload image.") + + except Exception as e: + print(f"An error happened while trying to send image: {str(e)}") + return conversation async def list_conversations(session: ClientSession) -> list: url = "https://www.bing.com/turing/conversation/chats" @@ -98,37 +162,47 @@ class Defaults: ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" allowedMessageTypes = [ + "ActionRequest", "Chat", + "Context", "Disengaged", + "Progress", "AdsQuery", "SemanticSerp", "GenerateContentQuery", "SearchQuery", - "ActionRequest", - "Context", - "Progress", - "AdsQuery", - "SemanticSerp", + # The following message types should not be added so that it does not flood with + # useless messages (such as "Analyzing images" or "Searching the web") while it's retrieving the AI response + # "InternalSearchQuery", + # "InternalSearchResult", + # Not entirely certain about these two, but these parameters may be used for real-time markdown rendering. + # Keeping them could potentially complicate the retrieval of the messages because link references written while + # the AI is responding would then be moved to the very end of its message. + # "RenderCardRequest", + # "RenderContentRequest" ] sliceIds = [ - "winmuid3tf", - "osbsdusgreccf", - "ttstmout", - "crchatrev", - "winlongmsgtf", - "ctrlworkpay", - "norespwtf", - "tempcacheread", - "temptacache", - "505scss0", - "508jbcars0", - "515enbotdets0", - "5082tsports", - "515vaoprvs", - "424dagslnv1s0", - "kcimgattcf", - "427startpms0", + "wrapuxslimt5", + "wrapalgo", + "wraptopalgo", + "st14", + "arankr1_1_9_9", + "0731ziv2s0", + "voiceall", + "1015onstblg", + "vsspec", + "cacdiscf", + "909ajcopus0", + "scpbfmob", + "rwt1", + "cacmuidarb", + "sappdlpt", + "917fluxv14", + "delaygc", + "remsaconn3p", + "splitcss3p", + "sydconfigoptt" ] location = { @@ -173,27 +247,128 @@ class Defaults: } optionsSets = [ - 'saharasugg', - 'enablenewsfc', - 'clgalileo', - 'gencontentv3', "nlu_direct_response_filter", "deepleo", "disable_emoji_spoken_text", "responsible_ai_policy_235", "enablemm", - "h3precise" - "dtappid", - "cricinfo", - "cricinfov2", "dv3sugg", - "nojbfedge" + "iyxapbing", + "iycapbing", + "h3imaginative", + "clgalileo", + "gencontentv3", + "fluxv14", + "eredirecturl" ] def format_message(msg: dict) -> str: return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter +def build_image_upload_api_payload(image_bin: str, conversation: Conversation, tone: str): + payload = { + 'invokedSkills': ["ImageById"], + 'subscriptionId': "Bing.Chat.Multimodal", + 'invokedSkillsRequestData': { + 'enableFaceBlur': True + }, + 'convoData': { + 'convoid': "", + 'convotone': tone + } + } + knowledge_request = { + 'imageInfo': {}, + 'knowledgeRequest': payload + } + boundary="----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16)) + data = '--' + boundary + '\r\nContent-Disposition: form-data; name="knowledgeRequest"\r\n\r\n' + json.dumps(knowledge_request,ensure_ascii=False) + "\r\n--" + boundary + '\r\nContent-Disposition: form-data; name="imageBase64"\r\n\r\n' + image_bin + "\r\n--" + boundary + "--\r\n" + return data, boundary + +def is_data_uri_an_image(data_uri): + try: + # Check if the data URI starts with 'data:image' and contains an image format (e.g., jpeg, png, gif) + if not re.match(r'data:image/(\w+);base64,', data_uri): + raise ValueError("Invalid data URI image.") + # Extract the image format from the data URI + image_format = re.match(r'data:image/(\w+);base64,', data_uri).group(1) + # Check if the image format is one of the allowed formats (jpg, jpeg, png, gif) + if image_format.lower() not in ['jpeg', 'jpg', 'png', 'gif']: + raise ValueError("Invalid image format (from mime file type).") + except Exception as e: + raise e + +def is_accepted_format(binary_data): + try: + check = False + if binary_data.startswith(b'\xFF\xD8\xFF'): + check = True # It's a JPEG image + elif binary_data.startswith(b'\x89PNG\r\n\x1a\n'): + check = True # It's a PNG image + elif binary_data.startswith(b'GIF87a') or binary_data.startswith(b'GIF89a'): + check = True # It's a GIF image + elif binary_data.startswith(b'\x89JFIF') or binary_data.startswith(b'JFIF\x00'): + check = True # It's a JPEG image + elif binary_data.startswith(b'\xFF\xD8'): + check = True # It's a JPEG image + elif binary_data.startswith(b'RIFF') and binary_data[8:12] == b'WEBP': + check = True # It's a WebP image + # else we raise ValueError + if not check: + raise ValueError("Invalid image format (from magic code).") + except Exception as e: + raise e + +def extract_data_uri(data_uri): + try: + data = data_uri.split(",")[1] + data = base64.b64decode(data) + return data + except Exception as e: + raise e + +def get_orientation(data: bytes): + try: + if data[0:2] != b'\xFF\xD8': + raise Exception('NotJpeg') + with Image.open(data) as img: + exif_data = img._getexif() + if exif_data is not None: + orientation = exif_data.get(274) # 274 corresponds to the orientation tag in EXIF + if orientation is not None: + return orientation + except Exception: + pass + +def process_image(orientation, img, new_width, new_height): + try: + # Initialize the canvas + new_img = Image.new("RGB", (new_width, new_height), color="#FFFFFF") + if orientation: + if orientation > 4: + img = img.transpose(Image.FLIP_LEFT_RIGHT) + if orientation == 3 or orientation == 4: + img = img.transpose(Image.ROTATE_180) + if orientation == 5 or orientation == 6: + img = img.transpose(Image.ROTATE_270) + if orientation == 7 or orientation == 8: + img = img.transpose(Image.ROTATE_90) + new_img.paste(img, (0, 0)) + return new_img + except Exception as e: + raise e + +def compress_image_to_base64(img, compression_rate): + try: + output_buffer = io.BytesIO() + img.save(output_buffer, format="JPEG", quality=int(compression_rate * 100)) + base64_image = base64.b64encode(output_buffer.getvalue()).decode('utf-8') + return base64_image + except Exception as e: + raise e + def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str: + request_id = str(uuid.uuid4()) struct = { 'arguments': [ @@ -213,6 +388,7 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context: 'requestId': request_id, 'messageId': request_id, }, + "scenario": "SERP", 'tone': tone, 'spokenTextMode': 'None', 'conversationId': conversation.conversationId, @@ -225,7 +401,11 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context: 'target': 'chat', 'type': 4 } - + if conversation.imageInfo != None and "imageUrl" in conversation.imageInfo and "originalImageUrl" in conversation.imageInfo: + struct['arguments'][0]['message']['originalImageUrl'] = conversation.imageInfo['originalImageUrl'] + struct['arguments'][0]['message']['imageUrl'] = conversation.imageInfo['imageUrl'] + struct['arguments'][0]['experienceType'] = None + struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None} if context: struct['arguments'][0]['previousMessages'] = [{ "author": "user", @@ -239,6 +419,7 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context: async def stream_generate( prompt: str, tone: str, + image: str = None, context: str = None, proxy: str = None, cookies: dict = None @@ -248,7 +429,7 @@ async def stream_generate( cookies=cookies, headers=Defaults.headers, ) as session: - conversation = await create_conversation(session, proxy) + conversation = await create_conversation(session, tone, image, proxy) try: async with session.ws_connect( f'wss://sydney.bing.com/sydney/ChatHub', @@ -264,7 +445,6 @@ async def stream_generate( response_txt = '' returned_text = '' final = False - while not final: msg = await wss.receive(timeout=900) objects = msg.data.split(Defaults.delimiter) @@ -299,4 +479,4 @@ async def stream_generate( raise Exception(f"{result['value']}: {result['message']}") return finally: - await delete_conversation(session, conversation, proxy) \ No newline at end of file + await delete_conversation(session, conversation, proxy) -- cgit v1.2.3 From fc15181110680354364daad131f05ceb7caa8068 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 22 Oct 2023 17:13:13 +0200 Subject: Fix ChatgptAi Provider --- g4f/Provider/ChatgptAi.py | 72 +++++++++++++++++++++++++---------------------- g4f/Provider/Geekgpt.py | 14 +++++---- 2 files changed, 47 insertions(+), 39 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py index cf45909c..d5fd5bff 100644 --- a/g4f/Provider/ChatgptAi.py +++ b/g4f/Provider/ChatgptAi.py @@ -1,36 +1,34 @@ from __future__ import annotations -import re +import re, html, json, string, random from aiohttp import ClientSession -from ..typing import Messages -from .base_provider import AsyncProvider, format_prompt +from ..typing import Messages, AsyncResult +from .base_provider import AsyncGeneratorProvider -class ChatgptAi(AsyncProvider): - url: str = "https://chatgpt.ai/" +class ChatgptAi(AsyncGeneratorProvider): + url: str = "https://chatgpt.ai" working = True supports_gpt_35_turbo = True - _nonce = None - _post_id = None - _bot_id = None + _system = None @classmethod - async def create_async( + async def create_async_generator( cls, model: str, messages: Messages, proxy: str = None, **kwargs - ) -> str: + ) -> AsyncResult: headers = { "authority" : "chatgpt.ai", "accept" : "*/*", - "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "accept-language" : "en-US", "cache-control" : "no-cache", - "origin" : "https://chatgpt.ai", + "origin" : cls.url, "pragma" : "no-cache", - "referer" : cls.url, + "referer" : f"{cls.url}/", "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', "sec-ch-ua-mobile" : "?0", "sec-ch-ua-platform" : '"Windows"', @@ -42,34 +40,40 @@ class ChatgptAi(AsyncProvider): async with ClientSession( headers=headers ) as session: - if not cls._nonce: + if not cls._system: async with session.get(cls.url, proxy=proxy) as response: response.raise_for_status() text = await response.text() - result = re.search(r'data-nonce="(.*?)"', text) + result = re.search(r"data-system='(.*?)'", text) if result: - cls._nonce = result.group(1) - result = re.search(r'data-post-id="(.*?)"', text) - if result: - cls._post_id = result.group(1) - result = re.search(r'data-bot-id="(.*?)"', text) - if result: - cls._bot_id = result.group(1) - if not cls._nonce or not cls._post_id or not cls._bot_id: - raise RuntimeError("Nonce, post-id or bot-id not found") - + cls._system = json.loads(html.unescape(result.group(1))) + if not cls._system: + raise RuntimeError("System args not found") + data = { - "_wpnonce": cls._nonce, - "post_id": cls._post_id, - "url": "https://chatgpt.ai", - "action": "wpaicg_chat_shortcode_message", - "message": format_prompt(messages), - "bot_id": cls._bot_id + "botId": cls._system["botId"], + "customId": cls._system["customId"], + "session": cls._system["sessionId"], + "chatId": "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=11)), + "contextId": cls._system["contextId"], + "messages": messages, + "newMessage": messages[-1]["content"], + "stream": True } async with session.post( - "https://chatgpt.ai/wp-admin/admin-ajax.php", + f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", proxy=proxy, - data=data + json=data ) as response: response.raise_for_status() - return (await response.json())["data"] \ No newline at end of file + async for line in response.content: + if line.startswith(b"data: "): + try: + line = json.loads(line[6:]) + assert "type" in line + except: + raise RuntimeError(f"Broken line: {line.decode()}") + if line["type"] == "live": + yield line["data"] + elif line["type"] == "end": + break \ No newline at end of file diff --git a/g4f/Provider/Geekgpt.py b/g4f/Provider/Geekgpt.py index 3bbc0e75..3c577cf8 100644 --- a/g4f/Provider/Geekgpt.py +++ b/g4f/Provider/Geekgpt.py @@ -14,11 +14,15 @@ class GeekGpt(BaseProvider): supports_gpt_4 = True @classmethod - def create_completion(cls, - model: str, - messages: Messages, - stream: bool, **kwargs) -> CreateResult: - + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + **kwargs + ) -> CreateResult: + if not model: + model = "gpt-3.5-turbo" json_data = { 'messages': messages, 'model': model, -- cgit v1.2.3 From ff88afa1da962aba02bc66c42796426f2649f884 Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Sun, 22 Oct 2023 18:53:27 +0100 Subject: ~ | g4f `v-0.1.7.4` --- g4f/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'g4f') diff --git a/g4f/__init__.py b/g4f/__init__.py index 20f71e0d..0f7e4459 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -5,7 +5,7 @@ from .Provider import BaseProvider, RetryProvider from .typing import Messages, CreateResult, Union, List from .debug import logging -version = '0.1.7.3' +version = '0.1.7.4' version_check = True def check_pypi_version() -> None: -- cgit v1.2.3 From 3ae90b57edf5ebd2b2f7dc8ad43f0f560edded84 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 22 Oct 2023 20:01:14 +0200 Subject: Improve get_cookies helper --- g4f/Provider/ChatgptFree.py | 16 +++++++++++----- g4f/Provider/helper.py | 23 ++++++++++++++--------- 2 files changed, 25 insertions(+), 14 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py index 8b7d04c4..806ff7cc 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/ChatgptFree.py @@ -14,7 +14,7 @@ from .helper import format_prompt, get_cookies class ChatgptFree(AsyncProvider): url = "https://chatgptfree.ai" supports_gpt_35_turbo = True - working = True + working = False _post_id = None _nonce = None @@ -24,6 +24,7 @@ class ChatgptFree(AsyncProvider): model: str, messages: Messages, proxy: str = None, + timeout: int = 120, cookies: dict = None, **kwargs ) -> str: @@ -45,14 +46,19 @@ class ChatgptFree(AsyncProvider): 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', } - async with StreamSession(headers=headers, - impersonate="chrome107", proxies={"https": proxy}, timeout=10) as session: + async with StreamSession( + headers=headers, + cookies=cookies, + impersonate="chrome107", + proxies={"https": proxy}, + timeout=timeout + ) as session: if not cls._nonce: - async with session.get(f"{cls.url}/", cookies=cookies) as response: + async with session.get(f"{cls.url}/") as response: response.raise_for_status() response = await response.text() diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 096293b2..20500777 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -3,10 +3,11 @@ from __future__ import annotations import sys import asyncio import webbrowser -import http.cookiejar from os import path from asyncio import AbstractEventLoop +from platformdirs import user_config_dir + from ..typing import Dict, Messages from browser_cookie3 import chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox, BrowserCookieError @@ -72,16 +73,22 @@ def init_cookies(): # Load cookies for a domain from all supported browsers. # Cache the results in the "_cookies" variable. def get_cookies(domain_name=''): - cj = http.cookiejar.CookieJar() - for cookie_fn in [chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]: + def g4f(domain_name): + user_data_dir = user_config_dir("g4f") + cookie_file = path.join(user_data_dir, "Default", "Cookies") + if not path.exists(cookie_file): + return [] + return chrome(cookie_file, domain_name) + cookie_jar = [] + for cookie_fn in [g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]: try: - for cookie in cookie_fn(domain_name=domain_name): - cj.set_cookie(cookie) + cookie_jar = cookie_fn(domain_name=domain_name) + if len(cookie_jar) > 0: + break except BrowserCookieError: pass - _cookies[domain_name] = {cookie.name: cookie.value for cookie in cj} - + _cookies[domain_name] = {cookie.name: cookie.value for cookie in cookie_jar} return _cookies[domain_name] @@ -100,10 +107,8 @@ def format_prompt(messages: Messages, add_special_tokens=False) -> str: def get_browser(user_data_dir: str = None): from undetected_chromedriver import Chrome - from platformdirs import user_config_dir if not user_data_dir: user_data_dir = user_config_dir("g4f") - user_data_dir = path.join(user_data_dir, "Default") return Chrome(user_data_dir=user_data_dir) \ No newline at end of file -- cgit v1.2.3 From 598255fa26e39d57e51fb41957a381a9241cb032 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 22 Oct 2023 23:53:18 +0200 Subject: Debug logging support Async browse access token --- g4f/Provider/AItianhuSpace.py | 4 +-- g4f/Provider/helper.py | 19 ++++++++------ g4f/Provider/needs_auth/OpenaiChat.py | 47 ++++++++++++++++++++--------------- g4f/Provider/retry_provider.py | 8 +++--- g4f/__init__.py | 5 ++-- 5 files changed, 46 insertions(+), 37 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py index 51297bcd..46856060 100644 --- a/g4f/Provider/AItianhuSpace.py +++ b/g4f/Provider/AItianhuSpace.py @@ -1,7 +1,7 @@ from __future__ import annotations import random, json -from ..debug import logging +from .. import debug from ..typing import AsyncResult, Messages from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies @@ -36,7 +36,7 @@ class AItianhuSpace(AsyncGeneratorProvider): rand = ''.join(random.choice(chars) for _ in range(6)) domain = f"{rand}.{domains[model]}" - if logging: + if debug.logging: print(f"AItianhuSpace | using domain: {domain}") if not cookies: diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 20500777..1b00ace5 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -10,7 +10,7 @@ from platformdirs import user_config_dir from ..typing import Dict, Messages from browser_cookie3 import chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox, BrowserCookieError - +from .. import debug # Change event loop policy on windows if sys.platform == 'win32': @@ -45,7 +45,6 @@ def get_event_loop() -> AbstractEventLoop: ) def init_cookies(): - urls = [ 'https://chat-gpt.org', 'https://www.aitianhu.com', @@ -73,22 +72,26 @@ def init_cookies(): # Load cookies for a domain from all supported browsers. # Cache the results in the "_cookies" variable. def get_cookies(domain_name=''): + if domain_name in _cookies: + return _cookies[domain_name] def g4f(domain_name): user_data_dir = user_config_dir("g4f") cookie_file = path.join(user_data_dir, "Default", "Cookies") if not path.exists(cookie_file): return [] return chrome(cookie_file, domain_name) - cookie_jar = [] + cookies = {} for cookie_fn in [g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]: try: cookie_jar = cookie_fn(domain_name=domain_name) - if len(cookie_jar) > 0: - break - except BrowserCookieError: + if len(cookie_jar) and debug.logging: + print(f"Read cookies from {cookie_fn.__name__} for {domain_name}") + for cookie in cookie_jar: + if cookie.name not in cookies: + cookies[cookie.name] = cookie.value + except BrowserCookieError as e: pass - - _cookies[domain_name] = {cookie.name: cookie.value for cookie in cookie_jar} + _cookies[domain_name] = cookies return _cookies[domain_name] diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index b4b4a670..14619958 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -3,7 +3,7 @@ from __future__ import annotations import uuid, json, time from ..base_provider import AsyncGeneratorProvider -from ..helper import get_browser, get_cookies, format_prompt +from ..helper import get_browser, get_cookies, format_prompt, get_event_loop from ...typing import AsyncResult, Messages from ...requests import StreamSession @@ -73,26 +73,33 @@ class OpenaiChat(AsyncGeneratorProvider): last_message = new_message @classmethod - def browse_access_token(cls) -> str: - try: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC + async def browse_access_token(cls) -> str: + def browse() -> str: + try: + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC - driver = get_browser() - except ImportError: - return + driver = get_browser() + except ImportError: + return - driver.get(f"{cls.url}/") - try: - WebDriverWait(driver, 1200).until( - EC.presence_of_element_located((By.ID, "prompt-textarea")) - ) - javascript = "return (await (await fetch('/api/auth/session')).json())['accessToken']" - return driver.execute_script(javascript) - finally: - time.sleep(1) - driver.quit() + driver.get(f"{cls.url}/") + try: + WebDriverWait(driver, 1200).until( + EC.presence_of_element_located((By.ID, "prompt-textarea")) + ) + javascript = "return (await (await fetch('/api/auth/session')).json())['accessToken']" + return driver.execute_script(javascript) + finally: + driver.close() + time.sleep(0.1) + driver.quit() + loop = get_event_loop() + return await loop.run_in_executor( + None, + browse + ) @classmethod async def fetch_access_token(cls, cookies: dict, proxies: dict = None) -> str: @@ -110,7 +117,7 @@ class OpenaiChat(AsyncGeneratorProvider): if cookies: cls._access_token = await cls.fetch_access_token(cookies, proxies) if not cls._access_token: - cls._access_token = cls.browse_access_token() + cls._access_token = await cls.browse_access_token() if not cls._access_token: raise RuntimeError("Read access token failed") return cls._access_token diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py index 39d61c35..5979e15b 100644 --- a/g4f/Provider/retry_provider.py +++ b/g4f/Provider/retry_provider.py @@ -5,13 +5,13 @@ import random from typing import List, Type, Dict from ..typing import CreateResult, Messages from .base_provider import BaseProvider, AsyncProvider +from .. import debug class RetryProvider(AsyncProvider): __name__: str = "RetryProvider" working: bool = True supports_stream: bool = True - logging: bool = False def __init__( self, @@ -39,7 +39,7 @@ class RetryProvider(AsyncProvider): started: bool = False for provider in providers: try: - if self.logging: + if debug.logging: print(f"Using {provider.__name__} provider") for token in provider.create_completion(model, messages, stream, **kwargs): @@ -51,7 +51,7 @@ class RetryProvider(AsyncProvider): except Exception as e: self.exceptions[provider.__name__] = e - if self.logging: + if debug.logging: print(f"{provider.__name__}: {e.__class__.__name__}: {e}") if started: raise e @@ -77,7 +77,7 @@ class RetryProvider(AsyncProvider): ) except Exception as e: self.exceptions[provider.__name__] = e - if self.logging: + if debug.logging: print(f"{provider.__name__}: {e.__class__.__name__}: {e}") self.raise_exceptions() diff --git a/g4f/__init__.py b/g4f/__init__.py index 20f71e0d..ef8b1cc1 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -3,7 +3,7 @@ from requests import get from .models import Model, ModelUtils, _all_models from .Provider import BaseProvider, RetryProvider from .typing import Messages, CreateResult, Union, List -from .debug import logging +from . import debug version = '0.1.7.3' version_check = True @@ -46,8 +46,7 @@ def get_model_and_provider(model : Union[Model, str], if not provider.supports_stream and stream: raise ValueError(f'{provider.__name__} does not support "stream" argument') - if logging: - RetryProvider.logging = True + if debug.logging: print(f'Using {provider.__name__} provider') return model, provider -- cgit v1.2.3 From b1dbf66587234397172388961371b2c298510865 Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Sun, 22 Oct 2023 23:00:31 +0100 Subject: =?UTF-8?q?~=C2=A0|=20g4f=20`v-0.1.7.5`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- g4f/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'g4f') diff --git a/g4f/__init__.py b/g4f/__init__.py index 813c2364..fff92f4a 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -3,9 +3,9 @@ from requests import get from .models import Model, ModelUtils, _all_models from .Provider import BaseProvider, RetryProvider from .typing import Messages, CreateResult, Union, List -from . import debug +from . import debug -version = '0.1.7.4' +version = '0.1.7.5' version_check = True def check_pypi_version() -> None: -- cgit v1.2.3