summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-04-10 08:14:50 +0200
committerGitHub <noreply@github.com>2024-04-10 08:14:50 +0200
commit00951eb79114adf74ad1a3f1ce596e9e0fa932bf (patch)
treefea75e7745d69b09d91b0003e5dbf12b77380223 /g4f/Provider
parentUpdate Dockerfile (diff)
downloadgpt4free-00951eb79114adf74ad1a3f1ce596e9e0fa932bf.tar
gpt4free-00951eb79114adf74ad1a3f1ce596e9e0fa932bf.tar.gz
gpt4free-00951eb79114adf74ad1a3f1ce596e9e0fa932bf.tar.bz2
gpt4free-00951eb79114adf74ad1a3f1ce596e9e0fa932bf.tar.lz
gpt4free-00951eb79114adf74ad1a3f1ce596e9e0fa932bf.tar.xz
gpt4free-00951eb79114adf74ad1a3f1ce596e9e0fa932bf.tar.zst
gpt4free-00951eb79114adf74ad1a3f1ce596e9e0fa932bf.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Bing.py2
-rw-r--r--g4f/Provider/DeepInfraImage.py74
-rw-r--r--g4f/Provider/You.py11
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/Provider/needs_auth/OpenRouter.py31
-rw-r--r--g4f/Provider/needs_auth/Openai.py13
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py2
-rw-r--r--g4f/Provider/needs_auth/__init__.py3
8 files changed, 120 insertions, 17 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 1e462084..955717a2 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -47,7 +47,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
timeout: int = 900,
api_key: str = None,
- cookies: Cookies = None,
+ cookies: Cookies = {},
connector: BaseConnector = None,
tone: str = None,
image: ImageType = None,
diff --git a/g4f/Provider/DeepInfraImage.py b/g4f/Provider/DeepInfraImage.py
new file mode 100644
index 00000000..6099b793
--- /dev/null
+++ b/g4f/Provider/DeepInfraImage.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+import requests
+
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..typing import AsyncResult, Messages
+from ..requests import StreamSession, raise_for_status
+from ..image import ImageResponse
+
+class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://deepinfra.com"
+ working = True
+ default_model = 'stability-ai/sdxl'
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ url = 'https://api.deepinfra.com/models/featured'
+ models = requests.get(url).json()
+ cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"]
+ return cls.models
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ **kwargs
+ ) -> AsyncResult:
+ yield await cls.create_async(messages[-1]["content"], model, **kwargs)
+
+ @classmethod
+ async def create_async(
+ cls,
+ prompt: str,
+ model: str,
+ api_key: str = None,
+ api_base: str = "https://api.deepinfra.com/v1/inference",
+ proxy: str = None,
+ timeout: int = 180,
+ extra_data: dict = {},
+ **kwargs
+ ) -> ImageResponse:
+ headers = {
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept-Language': 'en-US',
+ 'Connection': 'keep-alive',
+ 'Origin': 'https://deepinfra.com',
+ 'Referer': 'https://deepinfra.com/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
+ 'X-Deepinfra-Source': 'web-embed',
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+ async with StreamSession(
+ proxies={"all": proxy},
+ headers=headers,
+ timeout=timeout
+ ) as session:
+ model = cls.get_model(model)
+ data = {"prompt": prompt, **extra_data}
+ data = {"input": data} if model == cls.default_model else data
+ async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response:
+ await raise_for_status(response)
+ data = await response.json()
+ images = data["output"] if "output" in data else data["images"]
+ images = images[0] if len(images) == 1 else images
+ return ImageResponse(images, prompt) \ No newline at end of file
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 6256cda9..cfa2c7bf 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -8,8 +8,9 @@ import uuid
from ..typing import AsyncResult, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-from ..image import to_bytes, ImageResponse
+from ..image import ImageResponse, to_bytes, is_accepted_format
from ..requests import StreamSession, FormData, raise_for_status
+from ..errors import MissingRequirementsError
from .you.har_file import get_dfp_telemetry_id
@@ -46,6 +47,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image: ImageType = None,
image_name: str = None,
proxy: str = None,
+ timeout: int = 240,
chat_mode: str = "default",
**kwargs,
) -> AsyncResult:
@@ -55,12 +57,14 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
...
elif model.startswith("dall-e"):
chat_mode = "create"
+ messages = [messages[-1]]
else:
chat_mode = "custom"
model = cls.get_model(model)
async with StreamSession(
proxies={"all": proxy},
- impersonate="chrome"
+ impersonate="chrome",
+ timeout=(30, timeout)
) as session:
cookies = await cls.get_cookies(session) if chat_mode != "default" else None
upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else ""
@@ -73,7 +77,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
"q": format_prompt(messages),
"domain": "youchat",
"selectedChatMode": chat_mode,
- #"chat": json.dumps(chat),
}
params = {
"userFiles": upload,
@@ -113,7 +116,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
await raise_for_status(response)
upload_nonce = await response.text()
data = FormData()
- data.add_field('file', file, filename=filename)
+ data.add_field('file', file, content_type=is_accepted_format(file), filename=filename)
async with client.post(
f"{cls.url}/api/upload",
data=data,
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index b567305c..7a39d023 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -21,6 +21,7 @@ from .ChatgptFree import ChatgptFree
from .ChatgptNext import ChatgptNext
from .ChatgptX import ChatgptX
from .DeepInfra import DeepInfra
+from .DeepInfraImage import DeepInfraImage
from .DuckDuckGo import DuckDuckGo
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py
new file mode 100644
index 00000000..e5f87076
--- /dev/null
+++ b/g4f/Provider/needs_auth/OpenRouter.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+import requests
+
+from .Openai import Openai
+from ...typing import AsyncResult, Messages
+
+class OpenRouter(Openai):
+ url = "https://openrouter.ai"
+ working = True
+ default_model = "openrouter/auto"
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ url = 'https://openrouter.ai/api/v1/models'
+ models = requests.get(url).json()["data"]
+ cls.models = [model['id'] for model in models]
+ return cls.models
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://openrouter.ai/api/v1",
+ **kwargs
+ ) -> AsyncResult:
+ return super().create_async_generator(
+ model, messages, api_base=api_base, **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/Openai.py
index 6cd2cf86..ea09e950 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/Openai.py
@@ -2,10 +2,10 @@ from __future__ import annotations
import json
+from ..helper import filter_none
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
from ...typing import Union, Optional, AsyncResult, Messages
-from ...requests.raise_for_status import raise_for_status
-from ...requests import StreamSession
+from ...requests import StreamSession, raise_for_status
from ...errors import MissingAuthError, ResponseError
class Openai(AsyncGeneratorProvider, ProviderModelMixin):
@@ -98,11 +98,4 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
else {}
),
**({} if headers is None else headers)
- }
-
-def filter_none(**kwargs) -> dict:
- return {
- key: value
- for key, value in kwargs.items()
- if value is not None
- } \ No newline at end of file
+ } \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 64e3aeac..7491725f 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -334,7 +334,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
RuntimeError: If an error occurs during processing.
"""
async with StreamSession(
- proxies={"https": proxy},
+ proxies={"all": proxy},
impersonate="chrome",
timeout=timeout
) as session:
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 581335e1..7b793223 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -5,4 +5,5 @@ from .ThebApi import ThebApi
from .OpenaiChat import OpenaiChat
from .Poe import Poe
from .Openai import Openai
-from .Groq import Groq \ No newline at end of file
+from .Groq import Groq
+from .OpenRouter import OpenRouter \ No newline at end of file