From 86e36efe6bbae10286767b44c6a79913e5199de1 Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Sat, 28 Dec 2024 16:50:08 +0100 Subject: Add Path and PathLike support when uploading images (#2514) * Add Path and PathLike support when uploading images Improve raise_for_status in special cases Move ImageResponse to providers.response module Improve OpenaiChat and OpenaiAccount providers Add Sources for web_search in OpenaiChat Add JsonConversation for import and export conversations to js Add RequestLogin response type Add TitleGeneration support in OpenaiChat and gui * Improve Docker Container Guide in README.md * Add tool calls api support, add search tool support --- g4f/Provider/Copilot.py | 9 +- g4f/Provider/Mhystical.py | 58 +--- g4f/Provider/PollinationsAI.py | 2 +- g4f/Provider/bing/conversation.py | 93 ------ g4f/Provider/bing/upload_image.py | 150 --------- g4f/Provider/deprecated/Bing.py | 523 ------------------------------- g4f/Provider/deprecated/__init__.py | 3 +- g4f/Provider/needs_auth/Gemini.py | 27 +- g4f/Provider/needs_auth/HuggingChat.py | 24 +- g4f/Provider/needs_auth/HuggingFace.py | 7 +- g4f/Provider/needs_auth/OpenaiAPI.py | 16 +- g4f/Provider/needs_auth/OpenaiAccount.py | 2 +- g4f/Provider/needs_auth/OpenaiChat.py | 160 ++++++---- 13 files changed, 167 insertions(+), 907 deletions(-) delete mode 100644 g4f/Provider/bing/conversation.py delete mode 100644 g4f/Provider/bing/upload_image.py delete mode 100644 g4f/Provider/deprecated/Bing.py (limited to 'g4f/Provider') diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index c5618add..7999c0f4 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -18,18 +18,19 @@ try: except ImportError: has_nodriver = False -from .base_provider import AbstractProvider, ProviderModelMixin, BaseConversation +from .base_provider import AbstractProvider, ProviderModelMixin from .helper import format_prompt_max_length +from .openai.har_file import get_headers, get_har_files from ..typing import CreateResult, Messages, ImagesType from ..errors import MissingRequirementsError, NoValidHarFileError from ..requests.raise_for_status import raise_for_status +from ..providers.response import JsonConversation, RequestLogin from ..providers.asyncio import get_running_loop -from .openai.har_file import get_headers, get_har_files from ..requests import get_nodriver from ..image import ImageResponse, to_bytes, is_accepted_format from .. import debug -class Conversation(BaseConversation): +class Conversation(JsonConversation): conversation_id: str def __init__(self, conversation_id: str): @@ -80,7 +81,7 @@ class Copilot(AbstractProvider, ProviderModelMixin): if has_nodriver: login_url = os.environ.get("G4F_LOGIN_URL") if login_url: - yield f"[Login to {cls.label}]({login_url})\n\n" + yield RequestLogin(cls.label, login_url) get_running_loop(check_nested=True) cls._access_token, cls._cookies = asyncio.run(get_access_token_and_cookies(cls.url, proxy)) else: diff --git a/g4f/Provider/Mhystical.py b/g4f/Provider/Mhystical.py index 14412c07..380da18d 100644 --- a/g4f/Provider/Mhystical.py +++ b/g4f/Provider/Mhystical.py @@ -1,12 +1,7 @@ from __future__ import annotations -import json -import logging -from aiohttp import ClientSession from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt +from .needs_auth.OpenaiAPI import OpenaiAPI """ Mhystical.cc @@ -19,39 +14,31 @@ from .helper import format_prompt """ -logger = logging.getLogger(__name__) - -class Mhystical(AsyncGeneratorProvider, ProviderModelMixin): +class Mhystical(OpenaiAPI): url = "https://api.mhystical.cc" api_endpoint = "https://api.mhystical.cc/v1/completions" working = True + needs_auth = False supports_stream = False # Set to False, as streaming is not specified in ChatifyAI supports_system_message = False - supports_message_history = True default_model = 'gpt-4' models = [default_model] - model_aliases = {} @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases.get(model, cls.default_model) - else: - return cls.default_model + def get_model(cls, model: str, **kwargs) -> str: + cls.last_model = cls.default_model + return cls.default_model @classmethod - async def create_async_generator( + def create_async_generator( cls, model: str, messages: Messages, - proxy: str = None, + stream: bool = False, **kwargs ) -> AsyncResult: model = cls.get_model(model) - headers = { "x-api-key": "mhystical", "Content-Type": "application/json", @@ -61,24 +48,11 @@ class Mhystical(AsyncGeneratorProvider, ProviderModelMixin): "referer": f"{cls.url}/", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" } - - async with ClientSession(headers=headers) as session: - data = { - "model": model, - "messages": [{"role": "user", "content": format_prompt(messages)}] - } - async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: - await raise_for_status(response) - response_text = await response.text() - filtered_response = cls.filter_response(response_text) - yield filtered_response - - @staticmethod - def filter_response(response_text: str) -> str: - try: - json_response = json.loads(response_text) - message_content = json_response["choices"][0]["message"]["content"] - return message_content - except (KeyError, IndexError, json.JSONDecodeError) as e: - logger.error("Error parsing response: %s", e) - return "Error: Failed to parse response from API." + return super().create_async_generator( + model=model, + messages=messages, + stream=cls.supports_stream, + api_endpoint=cls.api_endpoint, + headers=headers, + **kwargs + ) \ No newline at end of file diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index 72efd088..dece4c39 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -72,7 +72,7 @@ class PollinationsAI(OpenaiAPI): **kwargs ) -> AsyncResult: model = cls.get_model(model) - if model in cls.image_models: + if cls.get_models() and model in cls.image_models: async for response in cls._generate_image(model, messages, prompt, proxy, seed, width, height): yield response elif model in cls.models: diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py deleted file mode 100644 index 43bcbb4d..00000000 --- a/g4f/Provider/bing/conversation.py +++ /dev/null @@ -1,93 +0,0 @@ -from __future__ import annotations - -from ...requests import StreamSession, raise_for_status -from ...errors import RateLimitError -from ...providers.response import BaseConversation - -class Conversation(BaseConversation): - """ - Represents a conversation with specific attributes. - """ - def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None: - """ - Initialize a new conversation instance. - - Args: - conversationId (str): Unique identifier for the conversation. - clientId (str): Client identifier. - conversationSignature (str): Signature for the conversation. - """ - self.conversationId = conversationId - self.clientId = clientId - self.conversationSignature = conversationSignature - -async def create_conversation(session: StreamSession, headers: dict, tone: str) -> Conversation: - """ - Create a new conversation asynchronously. - - Args: - session (ClientSession): An instance of aiohttp's ClientSession. - proxy (str, optional): Proxy URL. Defaults to None. - - Returns: - Conversation: An instance representing the created conversation. - """ - if tone == "Copilot": - url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1809.0" - else: - url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1809.0" - async with session.get(url, headers=headers) as response: - if response.status == 404: - raise RateLimitError("Response 404: Do less requests and reuse conversations") - await raise_for_status(response, "Failed to create conversation") - data = await response.json() - if not data: - raise RuntimeError('Empty response: Failed to create conversation') - conversationId = data.get('conversationId') - clientId = data.get('clientId') - conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature') - if not conversationId or not clientId or not conversationSignature: - raise RuntimeError('Empty fields: Failed to create conversation') - return Conversation(conversationId, clientId, conversationSignature) - -async def list_conversations(session: StreamSession) -> list: - """ - List all conversations asynchronously. - - Args: - session (ClientSession): An instance of aiohttp's ClientSession. - - Returns: - list: A list of conversations. - """ - url = "https://www.bing.com/turing/conversation/chats" - async with session.get(url) as response: - response = await response.json() - return response["chats"] - -async def delete_conversation(session: StreamSession, conversation: Conversation, headers: dict) -> bool: - """ - Delete a conversation asynchronously. - - Args: - session (ClientSession): An instance of aiohttp's ClientSession. - conversation (Conversation): The conversation to delete. - proxy (str, optional): Proxy URL. Defaults to None. - - Returns: - bool: True if deletion was successful, False otherwise. - """ - url = "https://sydney.bing.com/sydney/DeleteSingleConversation" - json = { - "conversationId": conversation.conversationId, - "conversationSignature": conversation.conversationSignature, - "participant": {"id": conversation.clientId}, - "source": "cib", - "optionsSets": ["autosave"] - } - try: - async with session.post(url, json=json, headers=headers) as response: - response = await response.json() - return response["result"]["value"] == "Success" - except: - return False diff --git a/g4f/Provider/bing/upload_image.py b/g4f/Provider/bing/upload_image.py deleted file mode 100644 index c517e493..00000000 --- a/g4f/Provider/bing/upload_image.py +++ /dev/null @@ -1,150 +0,0 @@ -""" -Module to handle image uploading and processing for Bing AI integrations. -""" -from __future__ import annotations - -import json -import math -from aiohttp import ClientSession, FormData - -from ...typing import ImageType, Tuple -from ...image import to_image, process_image, to_base64_jpg, ImageRequest, Image -from ...requests import raise_for_status - -IMAGE_CONFIG = { - "maxImagePixels": 360000, - "imageCompressionRate": 0.7, - "enableFaceBlurDebug": False, -} - -async def upload_image( - session: ClientSession, - image_data: ImageType, - tone: str, - headers: dict -) -> ImageRequest: - """ - Uploads an image to Bing's AI service and returns the image response. - - Args: - session (ClientSession): The active session. - image_data (bytes): The image data to be uploaded. - tone (str): The tone of the conversation. - proxy (str, optional): Proxy if any. Defaults to None. - - Raises: - RuntimeError: If the image upload fails. - - Returns: - ImageRequest: The response from the image upload. - """ - image = to_image(image_data) - new_width, new_height = calculate_new_dimensions(image) - image = process_image(image, new_width, new_height) - img_binary_data = to_base64_jpg(image, IMAGE_CONFIG['imageCompressionRate']) - - data = build_image_upload_payload(img_binary_data, tone) - - async with session.post("https://www.bing.com/images/kblob", data=data, headers=prepare_headers(headers)) as response: - await raise_for_status(response, "Failed to upload image") - return parse_image_response(await response.json()) - -def calculate_new_dimensions(image: Image) -> Tuple[int, int]: - """ - Calculates the new dimensions for the image based on the maximum allowed pixels. - - Args: - image (Image): The PIL Image object. - - Returns: - Tuple[int, int]: The new width and height for the image. - """ - width, height = image.size - max_image_pixels = IMAGE_CONFIG['maxImagePixels'] - if max_image_pixels / (width * height) < 1: - scale_factor = math.sqrt(max_image_pixels / (width * height)) - return int(width * scale_factor), int(height * scale_factor) - return width, height - -def build_image_upload_payload(image_bin: str, tone: str) -> FormData: - """ - Builds the payload for image uploading. - - Args: - image_bin (str): Base64 encoded image binary data. - tone (str): The tone of the conversation. - - Returns: - Tuple[str, str]: The data and boundary for the payload. - """ - data = FormData() - knowledge_request = json.dumps(build_knowledge_request(tone), ensure_ascii=False) - data.add_field('knowledgeRequest', knowledge_request, content_type="application/json") - data.add_field('imageBase64', image_bin) - return data - -def build_knowledge_request(tone: str) -> dict: - """ - Builds the knowledge request payload. - - Args: - tone (str): The tone of the conversation. - - Returns: - dict: The knowledge request payload. - """ - return { - "imageInfo": {}, - "knowledgeRequest": { - 'invokedSkills': ["ImageById"], - 'subscriptionId': "Bing.Chat.Multimodal", - 'invokedSkillsRequestData': { - 'enableFaceBlur': True - }, - 'convoData': { - 'convoid': "", - 'convotone': tone - } - } - } - -def prepare_headers(headers: dict) -> dict: - """ - Prepares the headers for the image upload request. - - Args: - session (ClientSession): The active session. - boundary (str): The boundary string for the multipart/form-data. - - Returns: - dict: The headers for the request. - """ - headers["Referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx' - headers["Origin"] = 'https://www.bing.com' - return headers - -def parse_image_response(response: dict) -> ImageRequest: - """ - Parses the response from the image upload. - - Args: - response (dict): The response dictionary. - - Raises: - RuntimeError: If parsing the image info fails. - - Returns: - ImageRequest: The parsed image response. - """ - if not response.get('blobId'): - raise RuntimeError("Failed to parse image info.") - - result = {'bcid': response.get('blobId', ""), 'blurredBcid': response.get('processedBlobId', "")} - result["imageUrl"] = f"https://www.bing.com/images/blob?bcid={result['blurredBcid'] or result['bcid']}" - - result['originalImageUrl'] = ( - f"https://www.bing.com/images/blob?bcid={result['blurredBcid']}" - if IMAGE_CONFIG["enableFaceBlurDebug"] else - f"https://www.bing.com/images/blob?bcid={result['bcid']}" - ) - return ImageRequest(result) \ No newline at end of file diff --git a/g4f/Provider/deprecated/Bing.py b/g4f/Provider/deprecated/Bing.py deleted file mode 100644 index 49bce146..00000000 --- a/g4f/Provider/deprecated/Bing.py +++ /dev/null @@ -1,523 +0,0 @@ -from __future__ import annotations - -import random -import json -import uuid -import time -import asyncio -from urllib import parse -from datetime import datetime, date - -from ...typing import AsyncResult, Messages, ImageType, Cookies -from ...image import ImageRequest -from ...errors import ResponseError, ResponseStatusError, RateLimitError -from ...requests import DEFAULT_HEADERS -from ...requests.aiohttp import StreamSession -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..helper import get_random_hex -from ..bing.upload_image import upload_image -from ..bing.conversation import Conversation, create_conversation, delete_conversation -from ..needs_auth.BingCreateImages import BingCreateImages -from ... import debug - -class Tones: - """ - Defines the different tone options for the Bing provider. - """ - creative = "Creative" - balanced = "Balanced" - precise = "Precise" - copilot = "Copilot" - -class Bing(AsyncGeneratorProvider, ProviderModelMixin): - """ - Bing provider for generating responses using the Bing API. - """ - label = "Microsoft Copilot in Bing" - url = "https://bing.com/chat" - working = False - supports_message_history = True - default_model = "Balanced" - default_vision_model = "gpt-4-vision" - models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")] - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - timeout: int = 900, - api_key: str = None, - cookies: Cookies = None, - tone: str = None, - image: ImageType = None, - web_search: bool = False, - context: str = None, - **kwargs - ) -> AsyncResult: - """ - Creates an asynchronous generator for producing responses from Bing. - - :param model: The model to use. - :param messages: Messages to process. - :param proxy: Proxy to use for requests. - :param timeout: Timeout for requests. - :param cookies: Cookies for the session. - :param tone: The tone of the response. - :param image: The image type to be used. - :param web_search: Flag to enable or disable web search. - :return: An asynchronous result object. - """ - prompt = messages[-1]["content"] - if context is None: - context = create_context(messages[:-1]) if len(messages) > 1 else None - if tone is None: - tone = tone if model.startswith("gpt-4") else model - tone = cls.get_model("" if tone is None else tone) - gpt4_turbo = True if model.startswith("gpt-4-turbo") else False - - return stream_generate( - prompt, tone, image, context, cookies, api_key, - proxy, web_search, gpt4_turbo, timeout, - **kwargs - ) - -def create_context(messages: Messages) -> str: - """ - Creates a context string from a list of messages. - - :param messages: A list of message dictionaries. - :return: A string representing the context created from the messages. - """ - return "".join( - f"[{message['role']}]" + ("(#message)" - if message['role'] != "system" - else "(#additional_instructions)") + f"\n{message['content']}" - for message in messages - ) + "\n\n" - -def get_ip_address() -> str: - return f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" - -def get_default_cookies(): - #muid = get_random_hex().upper() - sid = get_random_hex().upper() - guid = get_random_hex().upper() - isodate = date.today().isoformat() - timestamp = int(time.time()) - zdate = "0001-01-01T00:00:00.0000000" - return { - "_C_Auth": "", - #"MUID": muid, - #"MUIDB": muid, - "_EDGE_S": f"F=1&SID={sid}", - "_EDGE_V": "1", - "SRCHD": "AF=hpcodx", - "SRCHUID": f"V=2&GUID={guid}&dmnchg=1", - "_RwBf": ( - f"r=0&ilt=1&ihpd=0&ispd=0&rc=3&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=" - f"&clo=0&v=1&l={isodate}&lft={zdate}&aof=0&ard={zdate}" - f"&rwdbt={zdate}&rwflt={zdate}&o=2&p=&c=&t=0&s={zdate}" - f"&ts={isodate}&rwred=0&wls=&wlb=" - "&wle=&ccp=&cpt=&lka=0&lkt=0&aad=0&TH=" - ), - '_Rwho': f'u=d&ts={isodate}', - "_SS": f"SID={sid}&R=3&RB=0&GB=0&RG=200&RP=0", - "SRCHUSR": f"DOB={date.today().strftime('%Y%m%d')}&T={timestamp}", - "SRCHHPGUSR": f"HV={int(time.time())}", - "BCP": "AD=1&AL=1&SM=1", - "ipv6": f"hit={timestamp}", - '_C_ETH' : '1', - } - -async def create_headers(cookies: Cookies = None, api_key: str = None) -> dict: - if cookies is None: - # import nodriver as uc - # browser = await uc.start(headless=False) - # page = await browser.get(Defaults.home) - # await asyncio.sleep(10) - # cookies = {} - # for c in await page.browser.cookies.get_all(): - # if c.domain.endswith(".bing.com"): - # cookies[c.name] = c.value - # user_agent = await page.evaluate("window.navigator.userAgent") - # await page.close() - cookies = get_default_cookies() - if api_key is not None: - cookies["_U"] = api_key - headers = Defaults.headers.copy() - headers["cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items()) - return headers - -class Defaults: - """ - Default settings and configurations for the Bing provider. - """ - delimiter = "\x1e" - - # List of allowed message types for Bing responses - allowedMessageTypes = [ - "ActionRequest","Chat", - "ConfirmationCard", "Context", - "InternalSearchQuery", #"InternalSearchResult", - #"Disengaged", "InternalLoaderMessage", - "Progress", "RenderCardRequest", - "RenderContentRequest", "AdsQuery", - "SemanticSerp", "GenerateContentQuery", - "SearchQuery", "GeneratedCode", - "InternalTasksMessage" - ] - - sliceIds = { - "balanced": [ - "supllmnfe","archnewtf", - "stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl", - "thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t", - "bingfc", "0225unsticky1", "0228scss0", - "defquerycf", "defcontrol", "3022tphpv" - ], - "creative": [ - "bgstream", "fltltst2c", - "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", - "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", - "bingfccf", "0225unsticky1", "0228scss0", - "3022tpvs0" - ], - "precise": [ - "bgstream", "fltltst2c", - "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", - "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", - "bingfccf", "0225unsticky1", "0228scss0", - "defquerycf", "3022tpvs0" - ], - "copilot": [] - } - - optionsSets = { - "balanced": { - "default": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", "autosave", - "iyxapbing", "iycapbing", - "galileo", "saharagenconv5", "gldcl1p", - "gpt4tmncnp" - ], - "nosearch": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", "autosave", - "iyxapbing", "iycapbing", - "galileo", "sunoupsell", "base64filter", "uprv4p1upd", - "hourthrot", "noctprf", "gndlogcf", "nosearchall" - ] - }, - "creative": { - "default": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", - "iyxapbing", "iycapbing", - "h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3", - "gpt4tmncnp" - ], - "nosearch": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", "autosave", - "iyxapbing", "iycapbing", - "h3imaginative", "sunoupsell", "base64filter", "uprv4p1upd", - "hourthrot", "noctprf", "gndlogcf", "nosearchall", - "clgalileo", "nocache", "up4rp14bstcst" - ] - }, - "precise": { - "default": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", - "iyxapbing", "iycapbing", - "h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot", - "clgalileo", "gencontentv3" - ], - "nosearch": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", "autosave", - "iyxapbing", "iycapbing", - "h3precise", "sunoupsell", "base64filter", "uprv4p1upd", - "hourthrot", "noctprf", "gndlogcf", "nosearchall", - "clgalileo", "nocache", "up4rp14bstcst" - ] - }, - "copilot": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", - "iyxapbing", "iycapbing", - "h3precise", "clgalileo", "gencontentv3", "prjupy" - ], - } - - # Default location settings - location = { - "locale": "en-US", "market": "en-US", "region": "US", - "location":"lat:34.0536909;long:-118.242766;re=1000m;", - "locationHints": [{ - "country": "United States", "state": "California", "city": "Los Angeles", - "timezoneoffset": 8, "countryConfidence": 8, - "Center": {"Latitude": 34.0536909, "Longitude": -118.242766}, - "RegionType": 2, "SourceType": 1 - }], - } - - # Default headers for requests - home = "https://www.bing.com/chat?q=Microsoft+Copilot&FORM=hpcodx" - headers = { - **DEFAULT_HEADERS, - "accept": "application/json", - "referer": home, - "x-ms-client-request-id": str(uuid.uuid4()), - "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.15.1 OS/Windows", - } - -def format_message(msg: dict) -> str: - """ - Formats a message dictionary into a JSON string with a delimiter. - - :param msg: The message dictionary to format. - :return: A formatted string representation of the message. - """ - return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter - -def create_message( - conversation: Conversation, - prompt: str, - tone: str, - context: str = None, - image_request: ImageRequest = None, - web_search: bool = False, - gpt4_turbo: bool = False, - new_conversation: bool = True -) -> str: - """ - Creates a message for the Bing API with specified parameters. - - :param conversation: The current conversation object. - :param prompt: The user's input prompt. - :param tone: The desired tone for the response. - :param context: Additional context for the prompt. - :param image_request: The image request with the url. - :param web_search: Flag to enable web search. - :param gpt4_turbo: Flag to enable GPT-4 Turbo. - :return: A formatted string message for the Bing API. - """ - - options_sets = Defaults.optionsSets[tone.lower()] - if not web_search and "nosearch" in options_sets: - options_sets = options_sets["nosearch"] - elif "default" in options_sets: - options_sets = options_sets["default"] - options_sets = options_sets.copy() - if gpt4_turbo: - options_sets.append("dlgpt4t") - - request_id = str(uuid.uuid4()) - struct = { - "arguments":[{ - "source": "cib", - "optionsSets": options_sets, - "allowedMessageTypes": Defaults.allowedMessageTypes, - "sliceIds": Defaults.sliceIds[tone.lower()], - "verbosity": "verbose", - "scenario": "CopilotMicrosoftCom" if tone == Tones.copilot else "SERP", - "plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [], - "traceId": get_random_hex(40), - "conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"], - "gptId": "copilot", - "isStartOfSession": new_conversation, - "requestId": request_id, - "message":{ - **Defaults.location, - "userIpAddress": get_ip_address(), - "timestamp": datetime.now().isoformat(), - "author": "user", - "inputMethod": "Keyboard", - "text": prompt, - "messageType": "Chat", - "requestId": request_id, - "messageId": request_id - }, - "tone": "Balanced" if tone == Tones.copilot else tone, - "spokenTextMode": "None", - "conversationId": conversation.conversationId, - "participant": {"id": conversation.clientId} - }], - "invocationId": "0", - "target": "chat", - "type": 4 - } - - if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'): - struct['arguments'][0]['message']['originalImageUrl'] = image_request.get('originalImageUrl') - struct['arguments'][0]['message']['imageUrl'] = image_request.get('imageUrl') - struct['arguments'][0]['experienceType'] = None - struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None} - - if context: - struct['arguments'][0]['previousMessages'] = [{ - "author": "user", - "description": context, - "contextType": "ClientApp", - "messageType": "Context", - "messageId": "discover-web--page-ping-mriduna-----" - }] - - return format_message(struct) - -async def stream_generate( - prompt: str, - tone: str, - image: ImageType = None, - context: str = None, - cookies: dict = None, - api_key: str = None, - proxy: str = None, - web_search: bool = False, - gpt4_turbo: bool = False, - timeout: int = 900, - conversation: Conversation = None, - return_conversation: bool = False, - raise_apology: bool = False, - max_retries: int = None, - sleep_retry: int = 15, - **kwargs -): - """ - Asynchronously streams generated responses from the Bing API. - - :param prompt: The user's input prompt. - :param tone: The desired tone for the response. - :param image: The image type involved in the response. - :param context: Additional context for the prompt. - :param cookies: Cookies for the session. - :param web_search: Flag to enable web search. - :param gpt4_turbo: Flag to enable GPT-4 Turbo. - :param timeout: Timeout for the request. - :return: An asynchronous generator yielding responses. - """ - headers = await create_headers(cookies, api_key) - new_conversation = conversation is None - max_retries = (5 if new_conversation else 0) if max_retries is None else max_retries - first = True - while first or conversation is None: - async with StreamSession(timeout=timeout, proxy=proxy) as session: - first = False - do_read = True - try: - if conversation is None: - conversation = await create_conversation(session, headers, tone) - if return_conversation: - yield conversation - except (ResponseStatusError, RateLimitError) as e: - max_retries -= 1 - if max_retries < 1: - raise e - if debug.logging: - print(f"Bing: Retry: {e}") - headers = await create_headers() - await asyncio.sleep(sleep_retry) - continue - - image_request = await upload_image( - session, - image, - "Balanced" if tone == Tones.copilot else tone, - headers - ) if image else None - async with session.ws_connect( - 'wss://s.copilot.microsoft.com/sydney/ChatHub' - if tone == "Copilot" else - 'wss://sydney.bing.com/sydney/ChatHub', - autoping=False, - params={'sec_access_token': conversation.conversationSignature}, - headers=headers - ) as wss: - await wss.send_str(format_message({'protocol': 'json', 'version': 1})) - await wss.send_str(format_message({"type": 6})) - await wss.receive_str() - await wss.send_str(create_message( - conversation, prompt, tone, - context if new_conversation else None, - image_request, web_search, gpt4_turbo, - new_conversation - )) - response_txt = '' - returned_text = '' - message_id = None - while do_read: - try: - msg = await wss.receive_str() - except TypeError: - continue - objects = msg.split(Defaults.delimiter) - for obj in objects: - if not obj: - continue - try: - response = json.loads(obj) - except ValueError: - continue - if response and response.get('type') == 1 and response['arguments'][0].get('messages'): - message = response['arguments'][0]['messages'][0] - if message_id is not None and message_id != message["messageId"]: - returned_text = '' - message_id = message["messageId"] - image_response = None - if (raise_apology and message['contentOrigin'] == 'Apology'): - raise ResponseError("Apology Response Error") - if 'adaptiveCards' in message: - card = message['adaptiveCards'][0]['body'][0] - if "text" in card: - response_txt = card.get('text') - if message.get('messageType') and "inlines" in card: - inline_txt = card['inlines'][0].get('text') - response_txt += f"{inline_txt}\n" - elif message.get('contentType') == "IMAGE": - prompt = message.get('text') - try: - image_client = BingCreateImages(cookies, proxy, api_key) - image_response = await image_client.create_async(prompt) - except Exception as e: - if debug.logging: - print(f"Bing: Failed to create images: {e}") - image_response = f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}" - if response_txt.startswith(returned_text): - new = response_txt[len(returned_text):] - if new not in ("", "\n"): - yield new - returned_text = response_txt - if image_response is not None: - yield image_response - elif response.get('type') == 2: - result = response['item']['result'] - do_read = False - if result.get('error'): - max_retries -= 1 - if max_retries < 1: - if result["value"] == "CaptchaChallenge": - raise RateLimitError(f"{result['value']}: Use other cookies or/and ip address") - else: - raise RuntimeError(f"{result['value']}: {result['message']}") - if debug.logging: - print(f"Bing: Retry: {result['value']}: {result['message']}") - headers = await create_headers() - conversation = None - await asyncio.sleep(sleep_retry) - break - elif response.get('type') == 3: - do_read = False - break - if conversation is not None: - await delete_conversation(session, conversation, headers) diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index afb636e8..a8c55f21 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -31,5 +31,4 @@ from .GeekGpt import GeekGpt from .GPTalk import GPTalk from .Hashnode import Hashnode from .Ylokh import Ylokh -from .OpenAssistant import OpenAssistant -from .Bing import Bing \ No newline at end of file +from .OpenAssistant import OpenAssistant \ No newline at end of file diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index 62cc5f8a..1b5f17d6 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -17,8 +17,9 @@ except ImportError: from ... import debug from ...typing import Messages, Cookies, ImagesType, AsyncResult, AsyncIterator -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation, SynthesizeData +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..helper import format_prompt, get_cookies +from ...providers.response import JsonConversation, SynthesizeData, RequestLogin from ...requests.raise_for_status import raise_for_status from ...requests.aiohttp import get_connector from ...requests import get_nodriver @@ -81,7 +82,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): browser = await get_nodriver(proxy=proxy, user_data_dir="gemini") login_url = os.environ.get("G4F_LOGIN_URL") if login_url: - yield f"[Login to {cls.label}]({login_url})\n\n" + yield RequestLogin(cls.label, login_url) page = await browser.get(f"{cls.url}/app") await page.select("div.ql-editor.textarea", 240) cookies = {} @@ -305,37 +306,37 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): if sid_match: cls._sid = sid_match.group(1) -class Conversation(BaseConversation): +class Conversation(JsonConversation): def __init__(self, - conversation_id: str = "", - response_id: str = "", - choice_id: str = "" + conversation_id: str, + response_id: str, + choice_id: str ) -> None: self.conversation_id = conversation_id self.response_id = response_id self.choice_id = choice_id -async def iter_filter_base64(response_iter: AsyncIterator[bytes]) -> AsyncIterator[bytes]: +async def iter_filter_base64(chunks: AsyncIterator[bytes]) -> AsyncIterator[bytes]: search_for = b'[["wrb.fr","XqA3Ic","[\\"' end_with = b'\\' is_started = False - async for chunk in response_iter: + async for chunk in chunks: if is_started: if end_with in chunk: - yield chunk.split(end_with, 1).pop(0) + yield chunk.split(end_with, 1, maxsplit=1).pop(0) break else: yield chunk elif search_for in chunk: is_started = True - yield chunk.split(search_for, 1).pop() + yield chunk.split(search_for, 1, maxsplit=1).pop() else: - raise RuntimeError(f"Response: {chunk}") + raise ValueError(f"Response: {chunk}") -async def iter_base64_decode(response_iter: AsyncIterator[bytes]) -> AsyncIterator[bytes]: +async def iter_base64_decode(chunks: AsyncIterator[bytes]) -> AsyncIterator[bytes]: buffer = b"" rest = 0 - async for chunk in response_iter: + async for chunk in chunks: chunk = buffer + chunk rest = len(chunk) % 4 buffer = chunk[-rest:] diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 431273f6..c261595b 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -8,19 +8,19 @@ try: except ImportError: has_curl_cffi = False +from ..base_provider import ProviderModelMixin, AbstractProvider +from ..helper import format_prompt from ...typing import CreateResult, Messages, Cookies from ...errors import MissingRequirementsError from ...requests.raise_for_status import raise_for_status +from ...providers.response import JsonConversation, ImageResponse, Sources from ...cookies import get_cookies -from ...image import ImageResponse -from ..base_provider import ProviderModelMixin, AbstractProvider, BaseConversation -from ..helper import format_prompt from ... import debug -class Conversation(BaseConversation): +class Conversation(JsonConversation): def __init__(self, conversation_id: str, message_id: str): - self.conversation_id = conversation_id - self.message_id = message_id + self.conversation_id: str = conversation_id + self.message_id: str = message_id class HuggingChat(AbstractProvider, ProviderModelMixin): url = "https://huggingface.co/chat" @@ -152,33 +152,35 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): raise_for_status(response) full_response = "" + sources = None for line in response.iter_lines(): if not line: continue try: line = json.loads(line) except json.JSONDecodeError as e: - print(f"Failed to decode JSON: {line}, error: {e}") + debug.log(f"Failed to decode JSON: {line}, error: {e}") continue - if "type" not in line: raise RuntimeError(f"Response: {line}") - elif line["type"] == "stream": token = line["token"].replace('\u0000', '') full_response += token if stream: yield token - elif line["type"] == "finalAnswer": break elif line["type"] == "file": url = f"https://huggingface.co/chat/conversation/{conversation.conversation_id}/output/{line['sha']}" yield ImageResponse(url, alt=messages[-1]["content"], options={"cookies": cookies}) + elif line["type"] == "webSearch" and "sources" in line: + sources = Sources(line["sources"]) - full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip() + full_response = full_response.replace('<|im_end|', '').strip() if not stream: yield full_response + if sources is not None: + yield sources @classmethod def create_conversation(cls, session: Session, model: str): diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index 6887ac4d..19f33fd0 100644 --- a/g4f/Provider/needs_auth/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -48,6 +48,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): max_new_tokens: int = 1024, temperature: float = 0.7, prompt: str = None, + extra_data: dict = {}, **kwargs ) -> AsyncResult: try: @@ -73,16 +74,16 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): if api_key is not None: headers["Authorization"] = f"Bearer {api_key}" payload = None - if model in cls.image_models: + if cls.get_models() and model in cls.image_models: stream = False prompt = messages[-1]["content"] if prompt is None else prompt - payload = {"inputs": prompt, "parameters": {"seed": random.randint(0, 2**32)}} + payload = {"inputs": prompt, "parameters": {"seed": random.randint(0, 2**32), **extra_data}} else: params = { "return_full_text": False, "max_new_tokens": max_new_tokens, "temperature": temperature, - **kwargs + **extra_data } async with StreamSession( headers=headers, diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py index 37b7cf62..6471895e 100644 --- a/g4f/Provider/needs_auth/OpenaiAPI.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -4,9 +4,10 @@ import json import requests from ..helper import filter_none -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ...typing import Union, Optional, AsyncResult, Messages, ImagesType from ...requests import StreamSession, raise_for_status +from ...providers.response import FinishReason, ToolCalls, Usage from ...errors import MissingAuthError, ResponseError from ...image import to_data_uri from ... import debug @@ -50,6 +51,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin): timeout: int = 120, images: ImagesType = None, api_key: str = None, + api_endpoint: str = None, api_base: str = None, temperature: float = None, max_tokens: int = None, @@ -58,6 +60,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin): stream: bool = False, headers: dict = None, impersonate: str = None, + tools: Optional[list] = None, extra_data: dict = {}, **kwargs ) -> AsyncResult: @@ -92,16 +95,23 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin): top_p=top_p, stop=stop, stream=stream, + tools=tools, **extra_data ) - async with session.post(f"{api_base.rstrip('/')}/chat/completions", json=data) as response: + if api_endpoint is None: + api_endpoint = f"{api_base.rstrip('/')}/chat/completions" + async with session.post(api_endpoint, json=data) as response: await raise_for_status(response) if not stream: data = await response.json() cls.raise_error(data) choice = data["choices"][0] - if "content" in choice["message"]: + if "content" in choice["message"] and choice["message"]["content"]: yield choice["message"]["content"].strip() + elif "tool_calls" in choice["message"]: + yield ToolCalls(choice["message"]["tool_calls"]) + if "usage" in data: + yield Usage(**data["usage"]) finish = cls.read_finish_reason(choice) if finish is not None: yield finish diff --git a/g4f/Provider/needs_auth/OpenaiAccount.py b/g4f/Provider/needs_auth/OpenaiAccount.py index e7ed2982..5e6c9449 100644 --- a/g4f/Provider/needs_auth/OpenaiAccount.py +++ b/g4f/Provider/needs_auth/OpenaiAccount.py @@ -8,5 +8,5 @@ class OpenaiAccount(OpenaiChat): image_models = ["dall-e-3", "gpt-4", "gpt-4o"] default_vision_model = "gpt-4o" default_image_model = "dall-e-3" - models = [*OpenaiChat.fallback_models, default_image_model] + fallback_models = [*OpenaiChat.fallback_models, default_image_model] model_aliases = {default_image_model: default_vision_model} \ No newline at end of file diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index a0c73006..9c869fef 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -9,24 +9,23 @@ import base64 import time import requests import random -from typing import AsyncIterator +from typing import AsyncIterator, Iterator, Optional, Generator, Dict, List from copy import copy try: import nodriver - from nodriver.cdp.network import get_response_body has_nodriver = True except ImportError: has_nodriver = False from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ...typing import AsyncResult, Messages, Cookies, ImagesType, AsyncIterator +from ...typing import AsyncResult, Messages, Cookies, ImagesType from ...requests.raise_for_status import raise_for_status -from ...requests import StreamSession +from ...requests import StreamSession, Session from ...requests import get_nodriver from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format from ...errors import MissingAuthError, NoValidHarFileError -from ...providers.response import BaseConversation, FinishReason, SynthesizeData +from ...providers.response import JsonConversation, FinishReason, SynthesizeData, Sources, TitleGeneration, RequestLogin, quote_url from ..helper import format_cookies from ..openai.har_file import get_request_config from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, backend_anon_url @@ -106,15 +105,30 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): _expires: int = None @classmethod - def get_models(cls): + def get_models(cls, proxy: str = None, timeout: int = 180) -> List[str]: if not cls.models: - try: - response = requests.get(f"{cls.url}/backend-anon/models") - response.raise_for_status() - data = response.json() - cls.models = [model.get("slug") for model in data.get("models")] - except Exception: - cls.models = cls.fallback_models + # try: + # headers = { + # **(cls.get_default_headers() if cls._headers is None else cls._headers), + # "accept": "application/json", + # } + # with Session( + # proxy=proxy, + # impersonate="chrome", + # timeout=timeout, + # headers=headers + # ) as session: + # response = session.get( + # f"{cls.url}/backend-anon/models" + # if cls._api_key is None else + # f"{cls.url}/backend-api/models" + # ) + # raise_for_status(response) + # data = response.json() + # cls.models = [model.get("slug") for model in data.get("models")] + # except Exception as e: + # debug.log(f"OpenaiChat: Failed to get models: {type(e).__name__}: {e}") + cls.models = cls.fallback_models return cls.models @classmethod @@ -199,13 +213,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): """ # Create a message object with the user role and the content messages = [{ + "id": str(uuid.uuid4()), "author": {"role": message["role"]}, "content": {"content_type": "text", "parts": [message["content"]]}, - "id": str(uuid.uuid4()), - "create_time": int(time.time()), - "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}, "system_hints": system_hints}, + "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}, **({"system_hints": system_hints} if system_hints else {})}, + "create_time": time.time(), } for message in messages] - # Check if there is an image response if image_requests: # Change content in last user message @@ -236,24 +249,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): @classmethod async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict, prompt: str = None) -> ImageResponse: - """ - Retrieves the image response based on the message content. - - This method processes the message content to extract image information and retrieves the - corresponding image from the backend API. It then returns an ImageResponse object containing - the image URL and the prompt used to generate the image. - - Args: - session (StreamSession): The StreamSession object used for making HTTP requests. - headers (dict): HTTP headers to be used for the request. - line (dict): A dictionary representing the line of response that contains image information. - - Returns: - ImageResponse: An object containing the image URL and the prompt, or None if no image is found. - - Raises: - RuntimeError: If there'san error in downloading the image, including issues with the HTTP request or response. - """ try: prompt = element["metadata"]["dalle"]["prompt"] file_id = element["asset_pointer"].split("file-service://", 1)[1] @@ -347,6 +342,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): if cls._api_key is None: auto_continue = False conversation.finish_reason = None + sources = Sources([]) while conversation.finish_reason is None: async with session.post( f"{cls.url}/backend-anon/sentinel/chat-requirements" @@ -387,11 +383,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): )] data = { "action": action, - "messages": None, "parent_message_id": conversation.message_id, "model": model, "timezone_offset_min":-60, "timezone":"Europe/Berlin", + "suggestions":[], "history_and_training_disabled": history_disabled and not auto_continue and not return_conversation or not cls.needs_auth, "conversation_mode":{"kind":"primary_assistant","plugin_ids":None}, "force_paragen":False, @@ -433,17 +429,40 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): headers=headers ) as response: cls._update_request_args(session) - if response.status == 403 and max_retries > 0: + if response.status in (403, 404) and max_retries > 0: max_retries -= 1 debug.log(f"Retry: Error {response.status}: {await response.text()}") + conversation.conversation_id = None await asyncio.sleep(5) continue await raise_for_status(response) - if return_conversation: - yield conversation + buffer = u"" async for line in response.iter_lines(): - async for chunk in cls.iter_messages_line(session, line, conversation): - yield chunk + async for chunk in cls.iter_messages_line(session, line, conversation, sources): + if isinstance(chunk, str): + chunk = chunk.replace("\ue203", "").replace("\ue204", "").replace("\ue206", "") + buffer += chunk + if buffer.find(u"\ue200") != -1: + if buffer.find(u"\ue201") != -1: + buffer = buffer.replace("\ue200", "").replace("\ue202", "\n").replace("\ue201", "") + buffer = buffer.replace("navlist\n", "#### ") + def replacer(match): + link = None + if len(sources.list) > int(match.group(1)): + link = sources.list[int(match.group(1))]["url"] + return f"[[{int(match.group(1))+1}]]({link})" + return f" [{int(match.group(1))+1}]" + buffer = re.sub(r'(?:cite\nturn0search|cite\nturn0news|turn0news)(\d+)', replacer, buffer) + else: + continue + yield buffer + buffer = "" + else: + yield chunk + if sources.list: + yield sources + if return_conversation: + yield conversation if not history_disabled and cls._api_key is not None: yield SynthesizeData(cls.__name__, { "conversation_id": conversation.conversation_id, @@ -459,7 +478,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): yield FinishReason(conversation.finish_reason) @classmethod - async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: Conversation) -> AsyncIterator: + async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: Conversation, sources: Sources) -> AsyncIterator: if not line.startswith(b"data: "): return elif line.startswith(b"data: [DONE]"): @@ -470,15 +489,26 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): line = json.loads(line[6:]) except: return - if isinstance(line, dict) and "v" in line: + if not isinstance(line, dict): + return + if "type" in line: + if line["type"] == "title_generation": + yield TitleGeneration(line["title"]) + if "v" in line: v = line.get("v") if isinstance(v, str) and fields.is_recipient: if "p" not in line or line.get("p") == "/message/content/parts/0": yield v - elif isinstance(v, list) and fields.is_recipient: + elif isinstance(v, list): for m in v: - if m.get("p") == "/message/content/parts/0": + if m.get("p") == "/message/content/parts/0" and fields.is_recipient: yield m.get("v") + elif m.get("p") == "/message/metadata/search_result_groups": + for entry in [p.get("entries") for p in m.get("v")]: + for link in entry: + sources.add_source(link) + elif re.match(r"^/message/metadata/content_references/\d+$", m.get("p")): + sources.add_source(m.get("v")) elif m.get("p") == "/message/metadata": fields.finish_reason = m.get("v", {}).get("finish_details", {}).get("type") break @@ -529,14 +559,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): try: await get_request_config(proxy) cls._create_request_args(RequestConfig.cookies, RequestConfig.headers) - if RequestConfig.access_token is not None: - cls._set_api_key(RequestConfig.access_token) + if RequestConfig.access_token is not None or cls.needs_auth: + if not cls._set_api_key(RequestConfig.access_token): + raise NoValidHarFileError(f"Access token is not valid: {RequestConfig.access_token}") except NoValidHarFileError: if has_nodriver: if cls._api_key is None: login_url = os.environ.get("G4F_LOGIN_URL") if login_url: - yield f"[Login to {cls.label}]({login_url})\n\n" + yield RequestLogin(cls.label, login_url) await cls.nodriver_auth(proxy) else: raise @@ -563,7 +594,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): arkBx=None, arkHeader=event.request.headers, arkBody=event.request.post_data, - userAgent=event.request.headers.get("user-agent") + userAgent=event.request.headers.get("User-Agent") ) await page.send(nodriver.cdp.network.enable()) page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request) @@ -585,14 +616,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): break await asyncio.sleep(1) RequestConfig.data_build = await page.evaluate("document.documentElement.getAttribute('data-build')") - for c in await page.send(get_cookies([cls.url])): - RequestConfig.cookies[c["name"]] = c["value"] + RequestConfig.cookies = await page.send(get_cookies([cls.url])) await page.close() cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent) cls._set_api_key(cls._api_key) @staticmethod - def get_default_headers() -> dict: + def get_default_headers() -> Dict[str, str]: return { **DEFAULT_HEADERS, "content-type": "application/json", @@ -609,22 +639,30 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): @classmethod def _update_request_args(cls, session: StreamSession): for c in session.cookie_jar if hasattr(session, "cookie_jar") else session.cookies.jar: - cls._cookies[c.key if hasattr(c, "key") else c.name] = c.value + cls._cookies[getattr(c, "key", getattr(c, "name", ""))] = c.value cls._update_cookie_header() @classmethod def _set_api_key(cls, api_key: str): - cls._api_key = api_key - cls._expires = int(time.time()) + 60 * 60 * 4 if api_key: - cls._headers["authorization"] = f"Bearer {api_key}" + exp = api_key.split(".")[1] + exp = (exp + "=" * (4 - len(exp) % 4)).encode() + cls._expires = json.loads(base64.b64decode(exp)).get("exp") + debug.log(f"OpenaiChat: API key expires at\n {cls._expires} we have:\n {time.time()}") + if time.time() > cls._expires: + debug.log(f"OpenaiChat: API key is expired") + else: + cls._api_key = api_key + cls._headers["authorization"] = f"Bearer {api_key}" + return True + return False @classmethod def _update_cookie_header(cls): if cls._cookies: cls._headers["cookie"] = format_cookies(cls._cookies) -class Conversation(BaseConversation): +class Conversation(JsonConversation): """ Class to encapsulate response fields. """ @@ -633,10 +671,10 @@ class Conversation(BaseConversation): self.message_id = message_id self.finish_reason = finish_reason self.is_recipient = False - + def get_cookies( - urls: list[str] = None - ): + urls: Optional[Iterator[str]] = None +) -> Generator[Dict, Dict, Dict[str, str]]: params = {} if urls is not None: params['urls'] = [i for i in urls] @@ -645,4 +683,4 @@ def get_cookies( 'params': params, } json = yield cmd_dict - return json['cookies'] \ No newline at end of file + return {c["name"]: c["value"] for c in json['cookies']} if 'cookies' in json else {} \ No newline at end of file -- cgit v1.2.3