diff options
author | H Lohaus <hlohaus@users.noreply.github.com> | 2024-04-07 11:27:26 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-07 11:27:26 +0200 |
commit | d327afc60620913f5d2b0a9985b03a7934468ad4 (patch) | |
tree | 395de9142af3e6b9c0e5e3968ee7f8234b8b25e2 /g4f/Provider/DeepInfra.py | |
parent | Update Gemini.py (diff) | |
parent | Update provider.py (diff) | |
download | gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.gz gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.bz2 gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.lz gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.xz gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.zst gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.zip |
Diffstat (limited to 'g4f/Provider/DeepInfra.py')
-rw-r--r-- | g4f/Provider/DeepInfra.py | 70 |
1 files changed, 20 insertions, 50 deletions
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py index 6cf52694..53c8d6b9 100644 --- a/g4f/Provider/DeepInfra.py +++ b/g4f/Provider/DeepInfra.py @@ -1,42 +1,41 @@ from __future__ import annotations -import json import requests from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..requests import StreamSession, raise_for_status +from .needs_auth.Openai import Openai -class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin): +class DeepInfra(Openai): url = "https://deepinfra.com" working = True + needs_auth = False supports_stream = True supports_message_history = True default_model = 'meta-llama/Llama-2-70b-chat-hf' - + @classmethod def get_models(cls): if not cls.models: url = 'https://api.deepinfra.com/models/featured' models = requests.get(url).json() - cls.models = [model['model_name'] for model in models] + cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"] return cls.models @classmethod - async def create_async_generator( + def create_async_generator( cls, model: str, messages: Messages, stream: bool, - proxy: str = None, - timeout: int = 120, - auth: str = None, + api_base: str = "https://api.deepinfra.com/v1/openai", + temperature: float = 0.7, + max_tokens: int = 1028, **kwargs ) -> AsyncResult: headers = { 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US', 'Connection': 'keep-alive', - 'Content-Type': 'application/json', + 'Content-Type': None, 'Origin': 'https://deepinfra.com', 'Referer': 'https://deepinfra.com/', 'Sec-Fetch-Dest': 'empty', @@ -44,46 +43,17 @@ class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin): 'Sec-Fetch-Site': 'same-site', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', 'X-Deepinfra-Source': 'web-embed', - 'accept': 'text/event-stream', + 'Accept': None, 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"macOS"', } - if auth: - headers['Authorization'] = f"bearer {auth}" - - async with StreamSession(headers=headers, - timeout=timeout, - proxies={"https": proxy}, - impersonate="chrome110" - ) as session: - json_data = { - 'model' : cls.get_model(model), - 'messages': messages, - 'temperature': kwargs.get("temperature", 0.7), - 'max_tokens': kwargs.get("max_tokens", 512), - 'stop': kwargs.get("stop", []), - 'stream' : True - } - async with session.post('https://api.deepinfra.com/v1/openai/chat/completions', - json=json_data) as response: - await raise_for_status(response) - first = True - async for line in response.iter_lines(): - if not line.startswith(b"data: "): - continue - try: - json_line = json.loads(line[6:]) - choices = json_line.get("choices", [{}]) - finish_reason = choices[0].get("finish_reason") - if finish_reason: - break - token = choices[0].get("delta", {}).get("content") - if token: - if first: - token = token.lstrip() - if token: - first = False - yield token - except Exception: - raise RuntimeError(f"Response: {line}") + return super().create_async_generator( + model, messages, + stream=stream, + api_base=api_base, + temperature=temperature, + max_tokens=max_tokens, + headers=headers, + **kwargs + )
\ No newline at end of file |