From e98793d0a7af43878cf023fb045dd945a82507cf Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 6 Nov 2024 17:25:09 +0200 Subject: Update (g4f/models.py g4f/Provider/ docs/providers-and-models.md) --- g4f/Provider/needs_auth/DeepInfraImage.py | 80 +++++++ g4f/Provider/needs_auth/HuggingFace.py | 104 +++++++++ g4f/Provider/needs_auth/MetaAI.py | 238 +++++++++++++++++++++ g4f/Provider/needs_auth/MetaAIAccount.py | 23 ++ g4f/Provider/needs_auth/OpenRouter.py | 32 --- g4f/Provider/needs_auth/Replicate.py | 88 ++++++++ g4f/Provider/needs_auth/__init__.py | 8 +- g4f/Provider/needs_auth/gigachat/GigaChat.py | 92 ++++++++ g4f/Provider/needs_auth/gigachat/__init__.py | 2 + .../gigachat/russian_trusted_root_ca_pem.crt | 33 +++ 10 files changed, 667 insertions(+), 33 deletions(-) create mode 100644 g4f/Provider/needs_auth/DeepInfraImage.py create mode 100644 g4f/Provider/needs_auth/HuggingFace.py create mode 100644 g4f/Provider/needs_auth/MetaAI.py create mode 100644 g4f/Provider/needs_auth/MetaAIAccount.py delete mode 100644 g4f/Provider/needs_auth/OpenRouter.py create mode 100644 g4f/Provider/needs_auth/Replicate.py create mode 100644 g4f/Provider/needs_auth/gigachat/GigaChat.py create mode 100644 g4f/Provider/needs_auth/gigachat/__init__.py create mode 100644 g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt (limited to 'g4f/Provider/needs_auth') diff --git a/g4f/Provider/needs_auth/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py new file mode 100644 index 00000000..2310c1c8 --- /dev/null +++ b/g4f/Provider/needs_auth/DeepInfraImage.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +import requests + +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ...requests import StreamSession, raise_for_status +from ...image import ImageResponse + +class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://deepinfra.com" + parent = "DeepInfra" + working = True + needs_auth = True + default_model = '' + image_models = [default_model] + + @classmethod + def get_models(cls): + if not cls.models: + url = 'https://api.deepinfra.com/models/featured' + models = requests.get(url).json() + cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"] + cls.image_models = cls.models + return cls.models + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + **kwargs + ) -> AsyncResult: + yield await cls.create_async(messages[-1]["content"], model, **kwargs) + + @classmethod + async def create_async( + cls, + prompt: str, + model: str, + api_key: str = None, + api_base: str = "https://api.deepinfra.com/v1/inference", + proxy: str = None, + timeout: int = 180, + extra_data: dict = {}, + **kwargs + ) -> ImageResponse: + headers = { + 'Accept-Encoding': 'gzip, deflate, br', + 'Accept-Language': 'en-US', + 'Connection': 'keep-alive', + 'Origin': 'https://deepinfra.com', + 'Referer': 'https://deepinfra.com/', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-site', + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', + 'X-Deepinfra-Source': 'web-embed', + 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + } + if api_key is not None: + headers["Authorization"] = f"Bearer {api_key}" + async with StreamSession( + proxies={"all": proxy}, + headers=headers, + timeout=timeout + ) as session: + model = cls.get_model(model) + data = {"prompt": prompt, **extra_data} + data = {"input": data} if model == cls.default_model else data + async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response: + await raise_for_status(response) + data = await response.json() + images = data["output"] if "output" in data else data["images"] + if not images: + raise RuntimeError(f"Response: {data}") + images = images[0] if len(images) == 1 else images + return ImageResponse(images, prompt) diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py new file mode 100644 index 00000000..ecc75d1c --- /dev/null +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession, BaseConnector + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_connector +from ...errors import RateLimitError, ModelNotFoundError +from ...requests.raise_for_status import raise_for_status + +from ..HuggingChat import HuggingChat + +class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://huggingface.co/chat" + working = True + needs_auth = True + supports_message_history = True + default_model = HuggingChat.default_model + models = HuggingChat.models + model_aliases = HuggingChat.model_aliases + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = True, + proxy: str = None, + connector: BaseConnector = None, + api_base: str = "https://api-inference.huggingface.co", + api_key: str = None, + max_new_tokens: int = 1024, + temperature: float = 0.7, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + headers = { + 'accept': '*/*', + 'accept-language': 'en', + 'cache-control': 'no-cache', + 'origin': 'https://huggingface.co', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://huggingface.co/chat/', + 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', + } + if api_key is not None: + headers["Authorization"] = f"Bearer {api_key}" + + params = { + "return_full_text": False, + "max_new_tokens": max_new_tokens, + "temperature": temperature, + **kwargs + } + payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream} + + async with ClientSession( + headers=headers, + connector=get_connector(connector, proxy) + ) as session: + async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response: + if response.status == 404: + raise ModelNotFoundError(f"Model is not supported: {model}") + await raise_for_status(response) + if stream: + first = True + async for line in response.content: + if line.startswith(b"data:"): + data = json.loads(line[5:]) + if not data["token"]["special"]: + chunk = data["token"]["text"] + if first: + first = False + chunk = chunk.lstrip() + yield chunk + else: + yield (await response.json())[0]["generated_text"].strip() + +def format_prompt(messages: Messages) -> str: + system_messages = [message["content"] for message in messages if message["role"] == "system"] + question = " ".join([messages[-1]["content"], *system_messages]) + history = "".join([ + f"[INST]{messages[idx-1]['content']} [/INST] {message['content']}" + for idx, message in enumerate(messages) + if message["role"] == "assistant" + ]) + return f"{history}[INST] {question} [/INST]" diff --git a/g4f/Provider/needs_auth/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py new file mode 100644 index 00000000..4b730abd --- /dev/null +++ b/g4f/Provider/needs_auth/MetaAI.py @@ -0,0 +1,238 @@ +from __future__ import annotations + +import json +import uuid +import random +import time +from typing import Dict, List + +from aiohttp import ClientSession, BaseConnector + +from ...typing import AsyncResult, Messages, Cookies +from ...requests import raise_for_status, DEFAULT_HEADERS +from ...image import ImageResponse, ImagePreview +from ...errors import ResponseError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, get_connector, format_cookies + +class Sources(): + def __init__(self, link_list: List[Dict[str, str]]) -> None: + self.list = link_list + + def __str__(self) -> str: + return "\n\n" + ("\n".join([f"[{link['title']}]({link['link']})" for link in self.list])) + +class AbraGeoBlockedError(Exception): + pass + +class MetaAI(AsyncGeneratorProvider, ProviderModelMixin): + label = "Meta AI" + url = "https://www.meta.ai" + working = True + default_model = '' + + def __init__(self, proxy: str = None, connector: BaseConnector = None): + self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS) + self.cookies: Cookies = None + self.access_token: str = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + async for chunk in cls(proxy).prompt(format_prompt(messages)): + yield chunk + + async def update_access_token(self, birthday: str = "1999-01-01"): + url = "https://www.meta.ai/api/graphql/" + payload = { + "lsd": self.lsd, + "fb_api_caller_class": "RelayModern", + "fb_api_req_friendly_name": "useAbraAcceptTOSForTempUserMutation", + "variables": json.dumps({ + "dob": birthday, + "icebreaker_type": "TEXT", + "__relay_internal__pv__WebPixelRatiorelayprovider": 1, + }), + "doc_id": "7604648749596940", + } + headers = { + "x-fb-friendly-name": "useAbraAcceptTOSForTempUserMutation", + "x-fb-lsd": self.lsd, + "x-asbd-id": "129477", + "alt-used": "www.meta.ai", + "sec-fetch-site": "same-origin" + } + async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response: + await raise_for_status(response, "Fetch access_token failed") + auth_json = await response.json(content_type=None) + self.access_token = auth_json["data"]["xab_abra_accept_terms_of_service"]["new_temp_user_auth"]["access_token"] + + async def prompt(self, message: str, cookies: Cookies = None) -> AsyncResult: + if self.cookies is None: + await self.update_cookies(cookies) + if cookies is not None: + self.access_token = None + if self.access_token is None and cookies is None: + await self.update_access_token() + + if self.access_token is None: + url = "https://www.meta.ai/api/graphql/" + payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg} + headers = {'x-fb-lsd': self.lsd} + else: + url = "https://graph.meta.ai/graphql?locale=user" + payload = {"access_token": self.access_token} + headers = {} + headers = { + 'content-type': 'application/x-www-form-urlencoded', + 'cookie': format_cookies(self.cookies), + 'origin': 'https://www.meta.ai', + 'referer': 'https://www.meta.ai/', + 'x-asbd-id': '129477', + 'x-fb-friendly-name': 'useAbraSendMessageMutation', + **headers + } + payload = { + **payload, + 'fb_api_caller_class': 'RelayModern', + 'fb_api_req_friendly_name': 'useAbraSendMessageMutation', + "variables": json.dumps({ + "message": {"sensitive_string_value": message}, + "externalConversationId": str(uuid.uuid4()), + "offlineThreadingId": generate_offline_threading_id(), + "suggestedPromptIndex": None, + "flashVideoRecapInput": {"images": []}, + "flashPreviewInput": None, + "promptPrefix": None, + "entrypoint": "ABRA__CHAT__TEXT", + "icebreaker_type": "TEXT", + "__relay_internal__pv__AbraDebugDevOnlyrelayprovider": False, + "__relay_internal__pv__WebPixelRatiorelayprovider": 1, + }), + 'server_timestamps': 'true', + 'doc_id': '7783822248314888' + } + async with self.session.post(url, headers=headers, data=payload) as response: + await raise_for_status(response, "Fetch response failed") + last_snippet_len = 0 + fetch_id = None + async for line in response.content: + if b"

Something Went Wrong

" in line: + raise ResponseError("Response: Something Went Wrong") + try: + json_line = json.loads(line) + except json.JSONDecodeError: + continue + bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {}) + streaming_state = bot_response_message.get("streaming_state") + fetch_id = bot_response_message.get("fetch_id") or fetch_id + if streaming_state in ("STREAMING", "OVERALL_DONE"): + imagine_card = bot_response_message.get("imagine_card") + if imagine_card is not None: + imagine_session = imagine_card.get("session") + if imagine_session is not None: + imagine_medias = imagine_session.get("media_sets", {}).pop().get("imagine_media") + if imagine_medias is not None: + image_class = ImageResponse if streaming_state == "OVERALL_DONE" else ImagePreview + yield image_class([media["uri"] for media in imagine_medias], imagine_medias[0]["prompt"]) + snippet = bot_response_message["snippet"] + new_snippet_len = len(snippet) + if new_snippet_len > last_snippet_len: + yield snippet[last_snippet_len:] + last_snippet_len = new_snippet_len + #if last_streamed_response is None: + # if attempts > 3: + # raise Exception("MetaAI is having issues and was not able to respond (Server Error)") + # access_token = await self.get_access_token() + # return await self.prompt(message=message, attempts=attempts + 1) + if fetch_id is not None: + sources = await self.fetch_sources(fetch_id) + if sources is not None: + yield sources + + async def update_cookies(self, cookies: Cookies = None): + async with self.session.get("https://www.meta.ai/", cookies=cookies) as response: + await raise_for_status(response, "Fetch home failed") + text = await response.text() + if "AbraGeoBlockedError" in text: + raise AbraGeoBlockedError("Meta AI isn't available yet in your country") + if cookies is None: + cookies = { + "_js_datr": self.extract_value(text, "_js_datr"), + "abra_csrf": self.extract_value(text, "abra_csrf"), + "datr": self.extract_value(text, "datr"), + } + self.lsd = self.extract_value(text, start_str='"LSD",[],{"token":"', end_str='"}') + self.dtsg = self.extract_value(text, start_str='"DTSGInitialData",[],{"token":"', end_str='"}') + self.cookies = cookies + + async def fetch_sources(self, fetch_id: str) -> Sources: + if self.access_token is None: + url = "https://www.meta.ai/api/graphql/" + payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg} + headers = {'x-fb-lsd': self.lsd} + else: + url = "https://graph.meta.ai/graphql?locale=user" + payload = {"access_token": self.access_token} + headers = {} + payload = { + **payload, + "fb_api_caller_class": "RelayModern", + "fb_api_req_friendly_name": "AbraSearchPluginDialogQuery", + "variables": json.dumps({"abraMessageFetchID": fetch_id}), + "server_timestamps": "true", + "doc_id": "6946734308765963", + } + headers = { + "authority": "graph.meta.ai", + "x-fb-friendly-name": "AbraSearchPluginDialogQuery", + **headers + } + async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response: + await raise_for_status(response, "Fetch sources failed") + text = await response.text() + if "

Something Went Wrong

" in text: + raise ResponseError("Response: Something Went Wrong") + try: + response_json = json.loads(text) + message = response_json["data"]["message"] + if message is not None: + searchResults = message["searchResults"] + if searchResults is not None: + return Sources(searchResults["references"]) + except (KeyError, TypeError, json.JSONDecodeError): + raise RuntimeError(f"Response: {text}") + + @staticmethod + def extract_value(text: str, key: str = None, start_str = None, end_str = '",') -> str: + if start_str is None: + start_str = f'{key}":{{"value":"' + start = text.find(start_str) + if start >= 0: + start+= len(start_str) + end = text.find(end_str, start) + if end >= 0: + return text[start:end] + +def generate_offline_threading_id() -> str: + """ + Generates an offline threading ID. + + Returns: + str: The generated offline threading ID. + """ + # Generate a random 64-bit integer + random_value = random.getrandbits(64) + + # Get the current timestamp in milliseconds + timestamp = int(time.time() * 1000) + + # Combine timestamp and random value + threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1)) + + return str(threading_id) diff --git a/g4f/Provider/needs_auth/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py new file mode 100644 index 00000000..2d54f3e0 --- /dev/null +++ b/g4f/Provider/needs_auth/MetaAIAccount.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from ...typing import AsyncResult, Messages, Cookies +from ..helper import format_prompt, get_cookies +from ..MetaAI import MetaAI + +class MetaAIAccount(MetaAI): + needs_auth = True + parent = "MetaAI" + image_models = ["meta"] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + cookies: Cookies = None, + **kwargs + ) -> AsyncResult: + cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies + async for chunk in cls(proxy).prompt(format_prompt(messages), cookies): + yield chunk diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py deleted file mode 100644 index 5e0bf336..00000000 --- a/g4f/Provider/needs_auth/OpenRouter.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import annotations - -import requests - -from .Openai import Openai -from ...typing import AsyncResult, Messages - -class OpenRouter(Openai): - label = "OpenRouter" - url = "https://openrouter.ai" - working = False - default_model = "mistralai/mistral-7b-instruct:free" - - @classmethod - def get_models(cls): - if not cls.models: - url = 'https://openrouter.ai/api/v1/models' - models = requests.get(url).json()["data"] - cls.models = [model['id'] for model in models] - return cls.models - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - api_base: str = "https://openrouter.ai/api/v1", - **kwargs - ) -> AsyncResult: - return super().create_async_generator( - model, messages, api_base=api_base, **kwargs - ) diff --git a/g4f/Provider/needs_auth/Replicate.py b/g4f/Provider/needs_auth/Replicate.py new file mode 100644 index 00000000..ec993aa4 --- /dev/null +++ b/g4f/Provider/needs_auth/Replicate.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, filter_none +from ...typing import AsyncResult, Messages +from ...requests import raise_for_status +from ...requests.aiohttp import StreamSession +from ...errors import ResponseError, MissingAuthError + +class Replicate(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://replicate.com" + working = True + needs_auth = True + default_model = "meta/meta-llama-3-70b-instruct" + model_aliases = { + "meta-llama/Meta-Llama-3-70B-Instruct": default_model + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + api_key: str = None, + proxy: str = None, + timeout: int = 180, + system_prompt: str = None, + max_new_tokens: int = None, + temperature: float = None, + top_p: float = None, + top_k: float = None, + stop: list = None, + extra_data: dict = {}, + headers: dict = { + "accept": "application/json", + }, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + if cls.needs_auth and api_key is None: + raise MissingAuthError("api_key is missing") + if api_key is not None: + headers["Authorization"] = f"Bearer {api_key}" + api_base = "https://api.replicate.com/v1/models/" + else: + api_base = "https://replicate.com/api/models/" + async with StreamSession( + proxy=proxy, + headers=headers, + timeout=timeout + ) as session: + data = { + "stream": True, + "input": { + "prompt": format_prompt(messages), + **filter_none( + system_prompt=system_prompt, + max_new_tokens=max_new_tokens, + temperature=temperature, + top_p=top_p, + top_k=top_k, + stop_sequences=",".join(stop) if stop else None + ), + **extra_data + }, + } + url = f"{api_base.rstrip('/')}/{model}/predictions" + async with session.post(url, json=data) as response: + message = "Model not found" if response.status == 404 else None + await raise_for_status(response, message) + result = await response.json() + if "id" not in result: + raise ResponseError(f"Invalid response: {result}") + async with session.get(result["urls"]["stream"], headers={"Accept": "text/event-stream"}) as response: + await raise_for_status(response) + event = None + async for line in response.iter_lines(): + if line.startswith(b"event: "): + event = line[7:] + if event == b"done": + break + elif event == b"output": + if line.startswith(b"data: "): + new_text = line[6:].decode() + if new_text: + yield new_text + else: + yield "\n" diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index aa3547a5..0626a837 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -1,4 +1,7 @@ +from .gigachat import * + from .DeepInfra import DeepInfra +from .DeepInfraImage import DeepInfraImage from .Gemini import Gemini from .Raycast import Raycast from .Theb import Theb @@ -7,6 +10,9 @@ from .OpenaiChat import OpenaiChat from .Poe import Poe from .Openai import Openai from .Groq import Groq -from .OpenRouter import OpenRouter #from .OpenaiAccount import OpenaiAccount from .PerplexityApi import PerplexityApi +from .Replicate import Replicate +from .MetaAI import MetaAI +#from .MetaAIAccount import MetaAIAccount +from .HuggingFace import HuggingFace diff --git a/g4f/Provider/needs_auth/gigachat/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py new file mode 100644 index 00000000..c9f1c011 --- /dev/null +++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import os +import ssl +import time +import uuid + +import json +from aiohttp import ClientSession, TCPConnector, BaseConnector +from g4f.requests import raise_for_status + +from ....typing import AsyncResult, Messages +from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ....errors import MissingAuthError +from ...helper import get_connector + +access_token = "" +token_expires_at = 0 + +class GigaChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://developers.sber.ru/gigachat" + working = True + supports_message_history = True + supports_system_message = True + supports_stream = True + needs_auth = True + default_model = "GigaChat:latest" + models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = True, + proxy: str = None, + api_key: str = None, + connector: BaseConnector = None, + scope: str = "GIGACHAT_API_PERS", + update_interval: float = 0, + **kwargs + ) -> AsyncResult: + global access_token, token_expires_at + model = cls.get_model(model) + if not api_key: + raise MissingAuthError('Missing "api_key"') + + cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt") + ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None + if connector is None and ssl_context is not None: + connector = TCPConnector(ssl_context=ssl_context) + async with ClientSession(connector=get_connector(connector, proxy)) as session: + if token_expires_at - int(time.time() * 1000) < 60000: + async with session.post(url="https://ngw.devices.sberbank.ru:9443/api/v2/oauth", + headers={"Authorization": f"Bearer {api_key}", + "RqUID": str(uuid.uuid4()), + "Content-Type": "application/x-www-form-urlencoded"}, + data={"scope": scope}) as response: + await raise_for_status(response) + data = await response.json() + access_token = data['access_token'] + token_expires_at = data['expires_at'] + + async with session.post(url="https://gigachat.devices.sberbank.ru/api/v1/chat/completions", + headers={"Authorization": f"Bearer {access_token}"}, + json={ + "model": model, + "messages": messages, + "stream": stream, + "update_interval": update_interval, + **kwargs + }) as response: + await raise_for_status(response) + + async for line in response.content: + if not stream: + yield json.loads(line.decode("utf-8"))['choices'][0]['message']['content'] + return + + if line and line.startswith(b"data:"): + line = line[6:-1] # remove "data: " prefix and "\n" suffix + if line.strip() == b"[DONE]": + return + else: + msg = json.loads(line.decode("utf-8"))['choices'][0] + content = msg['delta']['content'] + + if content: + yield content + + if 'finish_reason' in msg: + return diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py new file mode 100644 index 00000000..c9853742 --- /dev/null +++ b/g4f/Provider/needs_auth/gigachat/__init__.py @@ -0,0 +1,2 @@ +from .GigaChat import GigaChat + diff --git a/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt new file mode 100644 index 00000000..4c143a21 --- /dev/null +++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx +PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu +ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg +Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS +VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg +YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n +qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q +XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U +zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX +YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y +Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD +U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD +4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9 +G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH +BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX +ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa +OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf +BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS +BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF +AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH +tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq +W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+ +/3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS +AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj +C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV +4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d +WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ +D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC +EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq +391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4= +-----END CERTIFICATE----- \ No newline at end of file -- cgit v1.2.3