summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-04-07 11:27:26 +0200
committerGitHub <noreply@github.com>2024-04-07 11:27:26 +0200
commitd327afc60620913f5d2b0a9985b03a7934468ad4 (patch)
tree395de9142af3e6b9c0e5e3968ee7f8234b8b25e2 /g4f
parentUpdate Gemini.py (diff)
parentUpdate provider.py (diff)
downloadgpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar
gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.gz
gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.bz2
gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.lz
gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.xz
gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.tar.zst
gpt4free-d327afc60620913f5d2b0a9985b03a7934468ad4.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Aura.py2
-rw-r--r--g4f/Provider/DeepInfra.py70
-rw-r--r--g4f/Provider/GeminiPro.py2
-rw-r--r--g4f/Provider/Local.py42
-rw-r--r--g4f/Provider/You.py8
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/Provider/deprecated/OpenAssistant.py (renamed from g4f/Provider/needs_auth/OpenAssistant.py)1
-rw-r--r--g4f/Provider/deprecated/__init__.py3
-rw-r--r--g4f/Provider/needs_auth/Gemini.py2
-rw-r--r--g4f/Provider/needs_auth/Openai.py96
-rw-r--r--g4f/Provider/needs_auth/ThebApi.py57
-rw-r--r--g4f/Provider/needs_auth/__init__.py1
-rw-r--r--g4f/Provider/you/__init__.py0
-rw-r--r--g4f/Provider/you/har_file.py72
-rw-r--r--g4f/__init__.py126
-rw-r--r--g4f/api/__init__.py28
-rw-r--r--g4f/client/__init__.py3
-rw-r--r--g4f/client/async_client.py (renamed from g4f/client/async.py)104
-rw-r--r--g4f/client/client.py (renamed from g4f/client.py)114
-rw-r--r--g4f/client/helper.py20
-rw-r--r--g4f/client/image_models.py10
-rw-r--r--g4f/client/service.py114
-rw-r--r--g4f/client/types.py12
-rw-r--r--g4f/gui/__init__.py8
-rw-r--r--g4f/gui/client/index.html38
-rw-r--r--g4f/gui/client/static/css/style.css36
-rw-r--r--g4f/gui/server/api.py8
-rw-r--r--g4f/gui/webview.py2
-rw-r--r--g4f/local/__init__.py21
-rw-r--r--g4f/local/_engine.py42
-rw-r--r--g4f/local/_models.py86
-rw-r--r--g4f/local/models/model-here1
-rw-r--r--g4f/locals/__init__.py0
-rw-r--r--g4f/locals/models.py51
-rw-r--r--g4f/locals/provider.py74
-rw-r--r--g4f/providers/types.py1
36 files changed, 692 insertions, 564 deletions
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py
index 4501df2c..7e2b2831 100644
--- a/g4f/Provider/Aura.py
+++ b/g4f/Provider/Aura.py
@@ -18,7 +18,7 @@ class Aura(AsyncGeneratorProvider):
messages: Messages,
proxy: str = None,
temperature: float = 0.5,
- max_tokens: int = 8192.
+ max_tokens: int = 8192,
webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py
index 6cf52694..53c8d6b9 100644
--- a/g4f/Provider/DeepInfra.py
+++ b/g4f/Provider/DeepInfra.py
@@ -1,42 +1,41 @@
from __future__ import annotations
-import json
import requests
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..requests import StreamSession, raise_for_status
+from .needs_auth.Openai import Openai
-class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
+class DeepInfra(Openai):
url = "https://deepinfra.com"
working = True
+ needs_auth = False
supports_stream = True
supports_message_history = True
default_model = 'meta-llama/Llama-2-70b-chat-hf'
-
+
@classmethod
def get_models(cls):
if not cls.models:
url = 'https://api.deepinfra.com/models/featured'
models = requests.get(url).json()
- cls.models = [model['model_name'] for model in models]
+ cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"]
return cls.models
@classmethod
- async def create_async_generator(
+ def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool,
- proxy: str = None,
- timeout: int = 120,
- auth: str = None,
+ api_base: str = "https://api.deepinfra.com/v1/openai",
+ temperature: float = 0.7,
+ max_tokens: int = 1028,
**kwargs
) -> AsyncResult:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Connection': 'keep-alive',
- 'Content-Type': 'application/json',
+ 'Content-Type': None,
'Origin': 'https://deepinfra.com',
'Referer': 'https://deepinfra.com/',
'Sec-Fetch-Dest': 'empty',
@@ -44,46 +43,17 @@ class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-embed',
- 'accept': 'text/event-stream',
+ 'Accept': None,
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
- if auth:
- headers['Authorization'] = f"bearer {auth}"
-
- async with StreamSession(headers=headers,
- timeout=timeout,
- proxies={"https": proxy},
- impersonate="chrome110"
- ) as session:
- json_data = {
- 'model' : cls.get_model(model),
- 'messages': messages,
- 'temperature': kwargs.get("temperature", 0.7),
- 'max_tokens': kwargs.get("max_tokens", 512),
- 'stop': kwargs.get("stop", []),
- 'stream' : True
- }
- async with session.post('https://api.deepinfra.com/v1/openai/chat/completions',
- json=json_data) as response:
- await raise_for_status(response)
- first = True
- async for line in response.iter_lines():
- if not line.startswith(b"data: "):
- continue
- try:
- json_line = json.loads(line[6:])
- choices = json_line.get("choices", [{}])
- finish_reason = choices[0].get("finish_reason")
- if finish_reason:
- break
- token = choices[0].get("delta", {}).get("content")
- if token:
- if first:
- token = token.lstrip()
- if token:
- first = False
- yield token
- except Exception:
- raise RuntimeError(f"Response: {line}")
+ return super().create_async_generator(
+ model, messages,
+ stream=stream,
+ api_base=api_base,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ headers=headers,
+ **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/GeminiPro.py
index 4378a18c..214b7383 100644
--- a/g4f/Provider/GeminiPro.py
+++ b/g4f/Provider/GeminiPro.py
@@ -76,7 +76,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
if not response.ok:
data = await response.json()
data = data[0] if isinstance(data, list) else data
- raise RuntimeError(f"Response {response.status}: {data["error"]["message"]}")
+ raise RuntimeError(f"Response {response.status}: {data['error']['message']}")
if stream:
lines = []
async for chunk in response.content:
diff --git a/g4f/Provider/Local.py b/g4f/Provider/Local.py
new file mode 100644
index 00000000..b4d096de
--- /dev/null
+++ b/g4f/Provider/Local.py
@@ -0,0 +1,42 @@
+from __future__ import annotations
+
+from ..locals.models import get_models
+try:
+ from ..locals.provider import LocalProvider
+ has_requirements = True
+except ModuleNotFoundError:
+ has_requirements = False
+
+from ..typing import Messages, CreateResult
+from ..providers.base_provider import AbstractProvider, ProviderModelMixin
+from ..errors import MissingRequirementsError
+
+class Local(AbstractProvider, ProviderModelMixin):
+ working = True
+ supports_message_history = True
+ supports_system_message = True
+ supports_stream = True
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ cls.models = list(get_models())
+ cls.default_model = cls.models[0]
+ return cls.models
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not has_requirements:
+ raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]')
+ return LocalProvider.create_completion(
+ cls.get_model(model),
+ messages,
+ stream,
+ **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 9b040367..231f953f 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -17,6 +17,8 @@ from ..image import to_bytes, ImageResponse
from ..requests import StreamSession, raise_for_status
from ..errors import MissingRequirementsError
+from .you.har_file import get_dfp_telemetry_id
+
class You(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://you.com"
working = True
@@ -45,6 +47,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
+ stream: bool = True,
image: ImageType = None,
image_name: str = None,
proxy: str = None,
@@ -56,7 +59,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
if image is not None:
chat_mode = "agent"
elif not model or model == cls.default_model:
- chat_mode = "default"
+ ...
elif model.startswith("dall-e"):
chat_mode = "create"
else:
@@ -108,7 +111,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
data = json.loads(line[6:])
if event == "youChatToken" and event in data:
yield data[event]
- elif event == "youChatUpdate" and "t" in data:
+ elif event == "youChatUpdate" and "t" in data and data["t"] is not None:
match = re.search(r"!\[fig\]\((.+?)\)", data["t"])
if match:
yield ImageResponse(match.group(1), messages[-1]["content"])
@@ -177,6 +180,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
"X-SDK-Parent-Host": cls.url
},
json={
+ "dfp_telemetry_id": await get_dfp_telemetry_id(),
"email": f"{user_uuid}@gmail.com",
"password": f"{user_uuid}#{user_uuid}",
"session_duration_minutes": 129600
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 50a5da31..1db29e19 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -33,6 +33,7 @@ from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
+from .Local import Local
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Vercel import Vercel
diff --git a/g4f/Provider/needs_auth/OpenAssistant.py b/g4f/Provider/deprecated/OpenAssistant.py
index e549b517..80cae3c2 100644
--- a/g4f/Provider/needs_auth/OpenAssistant.py
+++ b/g4f/Provider/deprecated/OpenAssistant.py
@@ -8,7 +8,6 @@ from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
-
class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index f6b4a1d9..408f3913 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -31,4 +31,5 @@ from .FakeGpt import FakeGpt
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
-from .Ylokh import Ylokh \ No newline at end of file
+from .Ylokh import Ylokh
+from .OpenAssistant import OpenAssistant \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 9013a4f8..fc9d9575 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -19,7 +19,7 @@ except ImportError:
from ...typing import Messages, Cookies, ImageType, AsyncResult
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
-from requests.raise_for_status import raise_for_status
+from ...requests.raise_for_status import raise_for_status
from ...errors import MissingAuthError, MissingRequirementsError
from ...image import to_bytes, ImageResponse
from ...webdriver import get_browser, get_driver_cookies
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/Openai.py
index b876cd0b..6cd2cf86 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/Openai.py
@@ -3,10 +3,10 @@ from __future__ import annotations
import json
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
-from ...typing import AsyncResult, Messages
+from ...typing import Union, Optional, AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ...requests import StreamSession
-from ...errors import MissingAuthError
+from ...errors import MissingAuthError, ResponseError
class Openai(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://openai.com"
@@ -27,48 +27,82 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
temperature: float = None,
max_tokens: int = None,
top_p: float = None,
- stop: str = None,
+ stop: Union[str, list[str]] = None,
stream: bool = False,
+ headers: dict = None,
+ extra_data: dict = {},
**kwargs
) -> AsyncResult:
- if api_key is None:
+ if cls.needs_auth and api_key is None:
raise MissingAuthError('Add a "api_key"')
async with StreamSession(
proxies={"all": proxy},
- headers=cls.get_headers(api_key),
+ headers=cls.get_headers(stream, api_key, headers),
timeout=timeout
) as session:
- data = {
- "messages": messages,
- "model": cls.get_model(model),
- "temperature": temperature,
- "max_tokens": max_tokens,
- "top_p": top_p,
- "stop": stop,
- "stream": stream,
- }
+ data = filter_none(
+ messages=messages,
+ model=cls.get_model(model),
+ temperature=temperature,
+ max_tokens=max_tokens,
+ top_p=top_p,
+ stop=stop,
+ stream=stream,
+ **extra_data
+ )
async with session.post(f"{api_base.rstrip('/')}/chat/completions", json=data) as response:
await raise_for_status(response)
- async for line in response.iter_lines():
- if line.startswith(b"data: ") or not stream:
- async for chunk in cls.read_line(line[6:] if stream else line, stream):
- yield chunk
+ if not stream:
+ data = await response.json()
+ choice = data["choices"][0]
+ if "content" in choice["message"]:
+ yield choice["message"]["content"].strip()
+ finish = cls.read_finish_reason(choice)
+ if finish is not None:
+ yield finish
+ else:
+ first = True
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk == b"[DONE]":
+ break
+ data = json.loads(chunk)
+ if "error_message" in data:
+ raise ResponseError(data["error_message"])
+ choice = data["choices"][0]
+ if "content" in choice["delta"] and choice["delta"]["content"]:
+ delta = choice["delta"]["content"]
+ if first:
+ delta = delta.lstrip()
+ if delta:
+ first = False
+ yield delta
+ finish = cls.read_finish_reason(choice)
+ if finish is not None:
+ yield finish
@staticmethod
- async def read_line(line: str, stream: bool):
- if line == b"[DONE]":
- return
- choice = json.loads(line)["choices"][0]
- if stream and "content" in choice["delta"] and choice["delta"]["content"]:
- yield choice["delta"]["content"]
- elif not stream and "content" in choice["message"]:
- yield choice["message"]["content"]
+ def read_finish_reason(choice: dict) -> Optional[FinishReason]:
if "finish_reason" in choice and choice["finish_reason"] is not None:
- yield FinishReason(choice["finish_reason"])
+ return FinishReason(choice["finish_reason"])
- @staticmethod
- def get_headers(api_key: str) -> dict:
+ @classmethod
+ def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
return {
- "Authorization": f"Bearer {api_key}",
+ "Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
- } \ No newline at end of file
+ **(
+ {"Authorization": f"Bearer {api_key}"}
+ if cls.needs_auth and api_key is not None
+ else {}
+ ),
+ **({} if headers is None else headers)
+ }
+
+def filter_none(**kwargs) -> dict:
+ return {
+ key: value
+ for key, value in kwargs.items()
+ if value is not None
+ } \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 1c7baf8d..48879bcb 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -1,10 +1,7 @@
from __future__ import annotations
-import requests
-
-from ...typing import Any, CreateResult, Messages
-from ..base_provider import AbstractProvider, ProviderModelMixin
-from ...errors import MissingAuthError
+from ...typing import CreateResult, Messages
+from .Openai import Openai
models = {
"theb-ai": "TheB.AI",
@@ -30,7 +27,7 @@ models = {
"qwen-7b-chat": "Qwen 7B"
}
-class ThebApi(AbstractProvider, ProviderModelMixin):
+class ThebApi(Openai):
url = "https://theb.ai"
working = True
needs_auth = True
@@ -38,44 +35,26 @@ class ThebApi(AbstractProvider, ProviderModelMixin):
models = list(models)
@classmethod
- def create_completion(
+ def create_async_generator(
cls,
model: str,
messages: Messages,
- stream: bool,
- auth: str = None,
- proxy: str = None,
+ api_base: str = "https://api.theb.ai/v1",
+ temperature: float = 1,
+ top_p: float = 1,
**kwargs
) -> CreateResult:
- if not auth:
- raise MissingAuthError("Missing auth")
- headers = {
- 'accept': 'application/json',
- 'authorization': f'Bearer {auth}',
- 'content-type': 'application/json',
- }
- # response = requests.get("https://api.baizhi.ai/v1/models", headers=headers).json()["data"]
- # models = dict([(m["id"], m["name"]) for m in response])
- # print(json.dumps(models, indent=4))
- data: dict[str, Any] = {
- "model": cls.get_model(model),
- "messages": messages,
- "stream": False,
+ if "auth" in kwargs:
+ kwargs["api_key"] = kwargs["auth"]
+ system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
+ if not system_message:
+ system_message = "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."
+ messages = [message for message in messages if message["role"] != "system"]
+ data = {
"model_params": {
- "system_prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."),
- "temperature": 1,
- "top_p": 1,
- **kwargs
+ "system_prompt": system_message,
+ "temperature": temperature,
+ "top_p": top_p,
}
}
- response = requests.post(
- "https://api.theb.ai/v1/chat/completions",
- headers=headers,
- json=data,
- proxies={"https": proxy}
- )
- try:
- response.raise_for_status()
- yield response.json()["choices"][0]["message"]["content"]
- except:
- raise RuntimeError(f"Response: {next(response.iter_lines()).decode()}") \ No newline at end of file
+ return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 92fa165b..581335e1 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -3,7 +3,6 @@ from .Raycast import Raycast
from .Theb import Theb
from .ThebApi import ThebApi
from .OpenaiChat import OpenaiChat
-from .OpenAssistant import OpenAssistant
from .Poe import Poe
from .Openai import Openai
from .Groq import Groq \ No newline at end of file
diff --git a/g4f/Provider/you/__init__.py b/g4f/Provider/you/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/g4f/Provider/you/__init__.py
diff --git a/g4f/Provider/you/har_file.py b/g4f/Provider/you/har_file.py
new file mode 100644
index 00000000..281f37e2
--- /dev/null
+++ b/g4f/Provider/you/har_file.py
@@ -0,0 +1,72 @@
+from __future__ import annotations
+
+import json
+import os
+import random
+import uuid
+
+from ...requests import StreamSession, raise_for_status
+
+class NoValidHarFileError(Exception):
+ ...
+
+class arkReq:
+ def __init__(self, arkURL, arkHeaders, arkBody, arkCookies, userAgent):
+ self.arkURL = arkURL
+ self.arkHeaders = arkHeaders
+ self.arkBody = arkBody
+ self.arkCookies = arkCookies
+ self.userAgent = userAgent
+
+arkPreURL = "https://telemetry.stytch.com/submit"
+chatArks: list = None
+
+def readHAR():
+ dirPath = "./"
+ harPath = []
+ chatArks = []
+ for root, dirs, files in os.walk(dirPath):
+ for file in files:
+ if file.endswith(".har"):
+ harPath.append(os.path.join(root, file))
+ if harPath:
+ break
+ if not harPath:
+ raise NoValidHarFileError("No .har file found")
+ for path in harPath:
+ with open(path, 'rb') as file:
+ try:
+ harFile = json.load(file)
+ except json.JSONDecodeError:
+ # Error: not a HAR file!
+ continue
+ for v in harFile['log']['entries']:
+ if arkPreURL in v['request']['url']:
+ chatArks.append(parseHAREntry(v))
+ if not chatArks:
+ raise NoValidHarFileError("No telemetry in .har files found")
+ return chatArks
+
+def parseHAREntry(entry) -> arkReq:
+ tmpArk = arkReq(
+ arkURL=entry['request']['url'],
+ arkHeaders={h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')},
+ arkBody=entry['request']['postData']['text'],
+ arkCookies={c['name']: c['value'] for c in entry['request']['cookies']},
+ userAgent=""
+ )
+ tmpArk.userAgent = tmpArk.arkHeaders.get('user-agent', '')
+ return tmpArk
+
+async def sendRequest(tmpArk: arkReq, proxy: str = None):
+ async with StreamSession(headers=tmpArk.arkHeaders, cookies=tmpArk.arkCookies, proxies={"all": proxy}) as session:
+ async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
+ await raise_for_status(response)
+ return await response.text()
+
+async def get_dfp_telemetry_id(proxy: str = None):
+ return str(uuid.uuid4())
+ global chatArks
+ if chatArks is None:
+ chatArks = readHAR()
+ return await sendRequest(random.choice(chatArks), proxy) \ No newline at end of file
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 14405e92..017eb2e6 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -2,95 +2,14 @@ from __future__ import annotations
import os
-from .errors import *
-from .models import Model, ModelUtils
-from .Provider import AsyncGeneratorProvider, ProviderUtils
-from .typing import Messages, CreateResult, AsyncResult, Union
-from .cookies import get_cookies, set_cookies
-from . import debug, version
-from .providers.types import BaseRetryProvider, ProviderType
-from .providers.base_provider import ProviderModelMixin
-from .providers.retry_provider import IterProvider
-
-def get_model_and_provider(model : Union[Model, str],
- provider : Union[ProviderType, str, None],
- stream : bool,
- ignored : list[str] = None,
- ignore_working: bool = False,
- ignore_stream: bool = False,
- **kwargs) -> tuple[str, ProviderType]:
- """
- Retrieves the model and provider based on input parameters.
-
- Args:
- model (Union[Model, str]): The model to use, either as an object or a string identifier.
- provider (Union[ProviderType, str, None]): The provider to use, either as an object, a string identifier, or None.
- stream (bool): Indicates if the operation should be performed as a stream.
- ignored (list[str], optional): List of provider names to be ignored.
- ignore_working (bool, optional): If True, ignores the working status of the provider.
- ignore_stream (bool, optional): If True, ignores the streaming capability of the provider.
-
- Returns:
- tuple[str, ProviderType]: A tuple containing the model name and the provider type.
-
- Raises:
- ProviderNotFoundError: If the provider is not found.
- ModelNotFoundError: If the model is not found.
- ProviderNotWorkingError: If the provider is not working.
- StreamNotSupportedError: If streaming is not supported by the provider.
- """
- if debug.version_check:
- debug.version_check = False
- version.utils.check_version()
-
- if isinstance(provider, str):
- if " " in provider:
- provider_list = [ProviderUtils.convert[p] for p in provider.split() if p in ProviderUtils.convert]
- if not provider_list:
- raise ProviderNotFoundError(f'Providers not found: {provider}')
- provider = IterProvider(provider_list)
- elif provider in ProviderUtils.convert:
- provider = ProviderUtils.convert[provider]
- elif provider:
- raise ProviderNotFoundError(f'Provider not found: {provider}')
-
- if isinstance(model, str):
- if model in ModelUtils.convert:
- model = ModelUtils.convert[model]
-
- if not provider:
- if isinstance(model, str):
- raise ModelNotFoundError(f'Model not found: {model}')
- provider = model.best_provider
-
- if not provider:
- raise ProviderNotFoundError(f'No provider found for model: {model}')
-
- if isinstance(model, Model):
- model = model.name
-
- if not ignore_working and not provider.working:
- raise ProviderNotWorkingError(f'{provider.__name__} is not working')
-
- if not ignore_working and isinstance(provider, BaseRetryProvider):
- provider.providers = [p for p in provider.providers if p.working]
-
- if ignored and isinstance(provider, BaseRetryProvider):
- provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
-
- if not ignore_stream and not provider.supports_stream and stream:
- raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
-
- if debug.logging:
- if model:
- print(f'Using {provider.__name__} provider and {model} model')
- else:
- print(f'Using {provider.__name__} provider')
-
- debug.last_provider = provider
- debug.last_model = model
-
- return model, provider
+from . import debug, version
+from .models import Model
+from .typing import Messages, CreateResult, AsyncResult, Union
+from .errors import StreamNotSupportedError, ModelNotAllowedError
+from .cookies import get_cookies, set_cookies
+from .providers.types import ProviderType
+from .providers.base_provider import AsyncGeneratorProvider
+from .client.service import get_model_and_provider, get_last_provider
class ChatCompletion:
@staticmethod
@@ -134,7 +53,7 @@ class ChatCompletion:
ignore_stream or kwargs.get("ignore_stream_and_auth")
)
- if auth:
+ if auth is not None:
kwargs['auth'] = auth
if "proxy" not in kwargs:
@@ -154,6 +73,7 @@ class ChatCompletion:
provider : Union[ProviderType, str, None] = None,
stream : bool = False,
ignored : list[str] = None,
+ ignore_working: bool = False,
patch_provider: callable = None,
**kwargs) -> Union[AsyncResult, str]:
"""
@@ -174,7 +94,7 @@ class ChatCompletion:
Raises:
StreamNotSupportedError: If streaming is requested but not supported by the provider.
"""
- model, provider = get_model_and_provider(model, provider, False, ignored)
+ model, provider = get_model_and_provider(model, provider, False, ignored, ignore_working)
if stream:
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
@@ -225,26 +145,4 @@ class Completion:
result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream, **kwargs)
- return result if stream else ''.join(result)
-
-def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, str]]:
- """
- Retrieves the last used provider.
-
- Args:
- as_dict (bool, optional): If True, returns the provider information as a dictionary.
-
- Returns:
- Union[ProviderType, dict[str, str]]: The last used provider, either as an object or a dictionary.
- """
- last = debug.last_provider
- if isinstance(last, BaseRetryProvider):
- last = last.last_provider
- if last and as_dict:
- return {
- "name": last.__name__,
- "url": last.url,
- "model": debug.last_model,
- "models": last.models if isinstance(last, ProviderModelMixin) else []
- }
- return last \ No newline at end of file
+ return result if stream else ''.join(result) \ No newline at end of file
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index b5af629e..579090fe 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -3,10 +3,13 @@ import json
import uvicorn
import nest_asyncio
-from fastapi import FastAPI, Response, Request
+from fastapi import FastAPI, Response, Request
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
-from pydantic import BaseModel
-from typing import List, Union
+from fastapi.exceptions import RequestValidationError
+from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
+from fastapi.encoders import jsonable_encoder
+from pydantic import BaseModel
+from typing import List, Union
import g4f
import g4f.debug
@@ -39,6 +42,25 @@ class Api:
self.app = FastAPI()
self.routes()
+ self.register_validation_exception_handler()
+
+ def register_validation_exception_handler(self):
+ @self.app.exception_handler(RequestValidationError)
+ async def validation_exception_handler(request: Request, exc: RequestValidationError):
+ details = exc.errors()
+ modified_details = []
+ for error in details:
+ modified_details.append(
+ {
+ "loc": error["loc"],
+ "message": error["msg"],
+ "type": error["type"],
+ }
+ )
+ return JSONResponse(
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
+ content=jsonable_encoder({"detail": modified_details}),
+ )
def routes(self):
@self.app.get("/")
diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py
index e69de29b..5bb4ba35 100644
--- a/g4f/client/__init__.py
+++ b/g4f/client/__init__.py
@@ -0,0 +1,3 @@
+from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse
+from .client import Client
+from .async_client import AsyncClient \ No newline at end of file
diff --git a/g4f/client/async.py b/g4f/client/async_client.py
index 76e410fc..25de1c76 100644
--- a/g4f/client/async.py
+++ b/g4f/client/async_client.py
@@ -1,20 +1,21 @@
from __future__ import annotations
-import re
-import os
import time
import random
import string
from .types import Client as BaseClient
-from .types import BaseProvider, ProviderType, FinishReason
-from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
-from ..typing import Union, Iterator, Messages, ImageType, AsyncIerator
+from .types import ProviderType, FinishReason
+from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse, Image
+from .types import AsyncIterResponse, ImageProvider
+from .image_models import ImageModels
+from .helper import filter_json, find_stop, filter_none, cast_iter_async
+from .service import get_last_provider, get_model_and_provider
+from ..typing import Union, Iterator, Messages, AsyncIterator, ImageType
+from ..errors import NoImageResponseError
from ..image import ImageResponse as ImageProviderResponse
-from ..errors import NoImageResponseError, RateLimitError, MissingAuthError
-from .. import get_model_and_provider, get_last_provider
-from .helper import read_json, find_stop, filter_none
-ä
+from ..providers.base_provider import AsyncGeneratorProvider
+
async def iter_response(
response: AsyncIterator[str],
stream: bool,
@@ -47,10 +48,10 @@ async def iter_response(
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
- content = read_json(content)
+ content = filter_json(content)
yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
-async def iter_append_model_and_provider(response: AsyncIterResponse) -> IterResponse:
+async def iter_append_model_and_provider(response: AsyncIterResponse) -> AsyncIterResponse:
last_provider = None
async for chunk in response:
last_provider = get_last_provider(True) if last_provider is None else last_provider
@@ -58,51 +59,50 @@ async def iter_append_model_and_provider(response: AsyncIterResponse) -> IterRes
chunk.provider = last_provider.get("name")
yield chunk
-class Client(BaseClient):
+class AsyncClient(BaseClient):
def __init__(
self,
+ provider: ProviderType = None,
+ image_provider: ImageProvider = None,
**kwargs
):
super().__init__(**kwargs)
self.chat: Chat = Chat(self, provider)
self.images: Images = Images(self, image_provider)
-async def cast_iter_async(iter):
- for chunk in iter:
- yield chunk
-
def create_response(
messages: Messages,
model: str,
provider: ProviderType = None,
stream: bool = False,
- response_format: dict = None,
+ proxy: str = None,
max_tokens: int = None,
- stop: Union[list[str], str] = None,
+ stop: list[str] = None,
api_key: str = None,
**kwargs
):
- if hasattr(provider, "create_async_generator):
+ has_asnyc = isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider)
+ if has_asnyc:
create = provider.create_async_generator
else:
create = provider.create_completion
response = create(
model, messages, stream,
**filter_none(
- proxy=self.client.get_proxy(),
+ proxy=proxy,
max_tokens=max_tokens,
stop=stop,
- api_key=self.client.api_key if api_key is None else api_key
+ api_key=api_key
),
**kwargs
)
- if not hasattr(provider, "create_async_generator")
+ if not has_asnyc:
response = cast_iter_async(response)
return response
class Completions():
- def __init__(self, client: Client, provider: ProviderType = None):
- self.client: Client = client
+ def __init__(self, client: AsyncClient, provider: ProviderType = None):
+ self.client: AsyncClient = client
self.provider: ProviderType = provider
def create(
@@ -111,6 +111,10 @@ class Completions():
model: str,
provider: ProviderType = None,
stream: bool = False,
+ proxy: str = None,
+ max_tokens: int = None,
+ stop: Union[list[str], str] = None,
+ api_key: str = None,
response_format: dict = None,
ignored : list[str] = None,
ignore_working: bool = False,
@@ -123,11 +127,18 @@ class Completions():
stream,
ignored,
ignore_working,
- ignore_stream,
- **kwargs
+ ignore_stream
)
stop = [stop] if isinstance(stop, str) else stop
- response = create_response(messages, model, provider, stream, **kwargs)
+ response = create_response(
+ messages, model,
+ provider, stream,
+ proxy=self.client.get_proxy() if proxy is None else proxy,
+ max_tokens=max_tokens,
+ stop=stop,
+ api_key=self.client.api_key if api_key is None else api_key
+ **kwargs
+ )
response = iter_response(response, stream, response_format, max_tokens, stop)
response = iter_append_model_and_provider(response)
return response if stream else anext(response)
@@ -135,44 +146,40 @@ class Completions():
class Chat():
completions: Completions
- def __init__(self, client: Client, provider: ProviderType = None):
+ def __init__(self, client: AsyncClient, provider: ProviderType = None):
self.completions = Completions(client, provider)
async def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
- async for chunk in list(response):
+ async for chunk in response:
if isinstance(chunk, ImageProviderResponse):
return ImagesResponse([Image(image) for image in chunk.get_list()])
-def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
+def create_image(client: AsyncClient, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
prompt = f"create a image with: {prompt}"
+ if provider.__name__ == "You":
+ kwargs["chat_mode"] = "create"
return provider.create_async_generator(
model,
[{"role": "user", "content": prompt}],
- True,
+ stream=True,
proxy=client.get_proxy(),
**kwargs
)
class Images():
- def __init__(self, client: Client, provider: ImageProvider = None):
- self.client: Client = client
+ def __init__(self, client: AsyncClient, provider: ImageProvider = None):
+ self.client: AsyncClient = client
self.provider: ImageProvider = provider
self.models: ImageModels = ImageModels(client)
- async def generate(self, prompt, model: str = None, **kwargs) -> ImagesResponse:
+ async def generate(self, prompt, model: str = "", **kwargs) -> ImagesResponse:
provider = self.models.get(model, self.provider)
- if isinstance(provider, type) and issubclass(provider, BaseProvider):
+ if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
response = create_image(self.client, provider, prompt, **kwargs)
else:
- try:
- response = list(provider.create(prompt))
- except (RateLimitError, MissingAuthError) as e:
- # Fallback for default provider
- if self.provider is None:
- response = create_image(self.client, self.models.you, prompt, model or "dall-e", **kwargs)
- else:
- raise e
- image = iter_image_response(response)
+ response = await provider.create_async(prompt)
+ return ImagesResponse([Image(image) for image in response.get_list()])
+ image = await iter_image_response(response)
if image is None:
raise NoImageResponseError()
return image
@@ -180,7 +187,7 @@ class Images():
async def create_variation(self, image: ImageType, model: str = None, **kwargs):
provider = self.models.get(model, self.provider)
result = None
- if isinstance(provider, type) and issubclass(provider, BaseProvider):
+ if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
response = provider.create_async_generator(
"",
[{"role": "user", "content": "create a image like this"}],
@@ -189,10 +196,7 @@ class Images():
proxy=self.client.get_proxy(),
**kwargs
)
- async for chunk in response:
- if isinstance(chunk, ImageProviderResponse):
- result = ([chunk.images] if isinstance(chunk.images, str) else chunk.images)
- result = ImagesResponse([Image(image)for image in result])
+ result = iter_image_response(response)
if result is None:
raise NoImageResponseError()
- return result
+ return result \ No newline at end of file
diff --git a/g4f/client.py b/g4f/client/client.py
index 2c4fe788..8ce5d932 100644
--- a/g4f/client.py
+++ b/g4f/client/client.py
@@ -1,40 +1,19 @@
from __future__ import annotations
-import re
-import os
import time
import random
import string
+from ..typing import Union, Iterator, Messages, ImageType
+from ..providers.types import BaseProvider, ProviderType, FinishReason
+from ..image import ImageResponse as ImageProviderResponse
+from ..errors import NoImageResponseError
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
-from .typing import Union, Iterator, Messages, ImageType
-from .providers.types import BaseProvider, ProviderType, FinishReason
-from .image import ImageResponse as ImageProviderResponse
-from .errors import NoImageResponseError, RateLimitError, MissingAuthError
-from . import get_model_and_provider, get_last_provider
-
-from .Provider.BingCreateImages import BingCreateImages
-from .Provider.needs_auth import Gemini, OpenaiChat
-from .Provider.You import You
-
-ImageProvider = Union[BaseProvider, object]
-Proxies = Union[dict, str]
-IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]]
-
-def read_json(text: str) -> dict:
- """
- Parses JSON code block from a string.
-
- Args:
- text (str): A string containing a JSON code block.
-
- Returns:
- dict: A dictionary parsed from the JSON code block.
- """
- match = re.search(r"```(json|)\n(?P<code>[\S\s]+?)\n```", text)
- if match:
- return match.group("code")
- return text
+from .image_models import ImageModels
+from .types import IterResponse, ImageProvider
+from .types import Client as BaseClient
+from .service import get_model_and_provider, get_last_provider
+from .helper import find_stop, filter_json, filter_none
def iter_response(
response: iter[str],
@@ -53,20 +32,7 @@ def iter_response(
content += str(chunk)
if max_tokens is not None and idx + 1 >= max_tokens:
finish_reason = "length"
- first = -1
- word = None
- if stop is not None:
- for word in list(stop):
- first = content.find(word)
- if first != -1:
- content = content[:first]
- break
- if stream and first != -1:
- first = chunk.find(word)
- if first != -1:
- chunk = chunk[:first]
- else:
- first = 0
+ first, content, chunk = find_stop(stop, content, chunk if stream else None)
if first != -1:
finish_reason = "stop"
if stream:
@@ -79,7 +45,7 @@ def iter_response(
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
- content = read_json(content)
+ content = filter_json(content)
yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
@@ -90,37 +56,17 @@ def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
chunk.provider = last_provider.get("name")
yield chunk
-class Client():
-
+class Client(BaseClient):
def __init__(
self,
- api_key: str = None,
- proxies: Proxies = None,
provider: ProviderType = None,
image_provider: ImageProvider = None,
**kwargs
) -> None:
- self.api_key: str = api_key
- self.proxies: Proxies = proxies
+ super().__init__(**kwargs)
self.chat: Chat = Chat(self, provider)
self.images: Images = Images(self, image_provider)
- def get_proxy(self) -> Union[str, None]:
- if isinstance(self.proxies, str):
- return self.proxies
- elif self.proxies is None:
- return os.environ.get("G4F_PROXY")
- elif "all" in self.proxies:
- return self.proxies["all"]
- elif "https" in self.proxies:
- return self.proxies["https"]
-
-def filter_none(**kwargs):
- for key in list(kwargs.keys()):
- if kwargs[key] is None:
- del kwargs[key]
- return kwargs
-
class Completions():
def __init__(self, client: Client, provider: ProviderType = None):
self.client: Client = client
@@ -132,6 +78,7 @@ class Completions():
model: str,
provider: ProviderType = None,
stream: bool = False,
+ proxy: str = None,
response_format: dict = None,
max_tokens: int = None,
stop: Union[list[str], str] = None,
@@ -148,13 +95,12 @@ class Completions():
ignored,
ignore_working,
ignore_stream,
- **kwargs
)
stop = [stop] if isinstance(stop, str) else stop
response = provider.create_completion(
model, messages, stream,
**filter_none(
- proxy=self.client.get_proxy(),
+ proxy=self.client.get_proxy() if proxy is None else proxy,
max_tokens=max_tokens,
stop=stop,
api_key=self.client.api_key if api_key is None else api_key
@@ -171,18 +117,6 @@ class Chat():
def __init__(self, client: Client, provider: ProviderType = None):
self.completions = Completions(client, provider)
-class ImageModels():
- gemini = Gemini
- openai = OpenaiChat
- you = You
-
- def __init__(self, client: Client) -> None:
- self.client = client
- self.default = BingCreateImages(proxy=self.client.get_proxy())
-
- def get(self, name: str, default: ImageProvider = None) -> ImageProvider:
- return getattr(self, name) if hasattr(self, name) else default or self.default
-
def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
for chunk in list(response):
if isinstance(chunk, ImageProviderResponse):
@@ -190,10 +124,12 @@ def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> Iterator:
prompt = f"create a image with: {prompt}"
+ if provider.__name__ == "You":
+ kwargs["chat_mode"] = "create"
return provider.create_completion(
model,
[{"role": "user", "content": prompt}],
- True,
+ stream=True,
proxy=client.get_proxy(),
**kwargs
)
@@ -209,14 +145,7 @@ class Images():
if isinstance(provider, type) and issubclass(provider, BaseProvider):
response = create_image(self.client, provider, prompt, **kwargs)
else:
- try:
- response = list(provider.create(prompt))
- except (RateLimitError, MissingAuthError) as e:
- # Fallback for default provider
- if self.provider is None:
- response = create_image(self.client, self.models.you, prompt, model or "dall-e", **kwargs)
- else:
- raise e
+ response = list(provider.create(prompt))
image = iter_image_response(response)
if image is None:
raise NoImageResponseError()
@@ -234,10 +163,7 @@ class Images():
proxy=self.client.get_proxy(),
**kwargs
)
- for chunk in response:
- if isinstance(chunk, ImageProviderResponse):
- result = ([chunk.images] if isinstance(chunk.images, str) else chunk.images)
- result = ImagesResponse([Image(image)for image in result])
+ result = iter_image_response(response)
if result is None:
raise NoImageResponseError()
return result \ No newline at end of file
diff --git a/g4f/client/helper.py b/g4f/client/helper.py
index 32aa9183..c502d478 100644
--- a/g4f/client/helper.py
+++ b/g4f/client/helper.py
@@ -1,6 +1,9 @@
+from __future__ import annotations
+
import re
+from typing import Iterable, AsyncIterator
-def read_json(text: str) -> dict:
+def filter_json(text: str) -> str:
"""
Parses JSON code block from a string.
@@ -15,7 +18,7 @@ def read_json(text: str) -> dict:
return match.group("code")
return text
-def find_stop(stop, content: str, chunk: str):
+def find_stop(stop, content: str, chunk: str = None):
first = -1
word = None
if stop is not None:
@@ -24,10 +27,21 @@ def find_stop(stop, content: str, chunk: str):
if first != -1:
content = content[:first]
break
- if stream and first != -1:
+ if chunk is not None and first != -1:
first = chunk.find(word)
if first != -1:
chunk = chunk[:first]
else:
first = 0
return first, content, chunk
+
+def filter_none(**kwargs) -> dict:
+ return {
+ key: value
+ for key, value in kwargs.items()
+ if value is not None
+ }
+
+async def cast_iter_async(iter: Iterable) -> AsyncIterator:
+ for chunk in iter:
+ yield chunk \ No newline at end of file
diff --git a/g4f/client/image_models.py b/g4f/client/image_models.py
index 1fd2d0b5..db2ce09a 100644
--- a/g4f/client/image_models.py
+++ b/g4f/client/image_models.py
@@ -1,8 +1,10 @@
-from .Provider.BingCreateImages import BingCreateImages
-from .Provider.needs_auth import Gemini, OpenaiChat
-from ..Provider.You import You
+from __future__ import annotations
+
+from .types import Client, ImageProvider
-from .types import Client
+from ..Provider.BingCreateImages import BingCreateImages
+from ..Provider.needs_auth import Gemini, OpenaiChat
+from ..Provider.You import You
class ImageModels():
gemini = Gemini
diff --git a/g4f/client/service.py b/g4f/client/service.py
new file mode 100644
index 00000000..f3565f6d
--- /dev/null
+++ b/g4f/client/service.py
@@ -0,0 +1,114 @@
+from __future__ import annotations
+
+from typing import Union
+
+from .. import debug, version
+from ..errors import ProviderNotFoundError, ModelNotFoundError, ProviderNotWorkingError, StreamNotSupportedError
+from ..models import Model, ModelUtils
+from ..Provider import ProviderUtils
+from ..providers.types import BaseRetryProvider, ProviderType
+from ..providers.retry_provider import IterProvider
+
+def convert_to_provider(provider: str) -> ProviderType:
+ if " " in provider:
+ provider_list = [ProviderUtils.convert[p] for p in provider.split() if p in ProviderUtils.convert]
+ if not provider_list:
+ raise ProviderNotFoundError(f'Providers not found: {provider}')
+ provider = IterProvider(provider_list)
+ elif provider in ProviderUtils.convert:
+ provider = ProviderUtils.convert[provider]
+ elif provider:
+ raise ProviderNotFoundError(f'Provider not found: {provider}')
+ return provider
+
+def get_model_and_provider(model : Union[Model, str],
+ provider : Union[ProviderType, str, None],
+ stream : bool,
+ ignored : list[str] = None,
+ ignore_working: bool = False,
+ ignore_stream: bool = False) -> tuple[str, ProviderType]:
+ """
+ Retrieves the model and provider based on input parameters.
+
+ Args:
+ model (Union[Model, str]): The model to use, either as an object or a string identifier.
+ provider (Union[ProviderType, str, None]): The provider to use, either as an object, a string identifier, or None.
+ stream (bool): Indicates if the operation should be performed as a stream.
+ ignored (list[str], optional): List of provider names to be ignored.
+ ignore_working (bool, optional): If True, ignores the working status of the provider.
+ ignore_stream (bool, optional): If True, ignores the streaming capability of the provider.
+
+ Returns:
+ tuple[str, ProviderType]: A tuple containing the model name and the provider type.
+
+ Raises:
+ ProviderNotFoundError: If the provider is not found.
+ ModelNotFoundError: If the model is not found.
+ ProviderNotWorkingError: If the provider is not working.
+ StreamNotSupportedError: If streaming is not supported by the provider.
+ """
+ if debug.version_check:
+ debug.version_check = False
+ version.utils.check_version()
+
+ if isinstance(provider, str):
+ provider = convert_to_provider(provider)
+
+ if isinstance(model, str):
+ if model in ModelUtils.convert:
+ model = ModelUtils.convert[model]
+
+ if not provider:
+ if isinstance(model, str):
+ raise ModelNotFoundError(f'Model not found: {model}')
+ provider = model.best_provider
+
+ if not provider:
+ raise ProviderNotFoundError(f'No provider found for model: {model}')
+
+ if isinstance(model, Model):
+ model = model.name
+
+ if not ignore_working and not provider.working:
+ raise ProviderNotWorkingError(f'{provider.__name__} is not working')
+
+ if not ignore_working and isinstance(provider, BaseRetryProvider):
+ provider.providers = [p for p in provider.providers if p.working]
+
+ if ignored and isinstance(provider, BaseRetryProvider):
+ provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
+
+ if not ignore_stream and not provider.supports_stream and stream:
+ raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
+
+ if debug.logging:
+ if model:
+ print(f'Using {provider.__name__} provider and {model} model')
+ else:
+ print(f'Using {provider.__name__} provider')
+
+ debug.last_provider = provider
+ debug.last_model = model
+
+ return model, provider
+
+def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, str]]:
+ """
+ Retrieves the last used provider.
+
+ Args:
+ as_dict (bool, optional): If True, returns the provider information as a dictionary.
+
+ Returns:
+ Union[ProviderType, dict[str, str]]: The last used provider, either as an object or a dictionary.
+ """
+ last = debug.last_provider
+ if isinstance(last, BaseRetryProvider):
+ last = last.last_provider
+ if last and as_dict:
+ return {
+ "name": last.__name__,
+ "url": last.url,
+ "model": debug.last_model,
+ }
+ return last \ No newline at end of file
diff --git a/g4f/client/types.py b/g4f/client/types.py
index b21ff03a..100be432 100644
--- a/g4f/client/types.py
+++ b/g4f/client/types.py
@@ -1,9 +1,15 @@
+from __future__ import annotations
+
+import os
+
+from .stubs import ChatCompletion, ChatCompletionChunk
from ..providers.types import BaseProvider, ProviderType, FinishReason
-from typing import Union, Iterator
+from typing import Union, Iterator, AsyncIterator
ImageProvider = Union[BaseProvider, object]
Proxies = Union[dict, str]
IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]]
+AsyncIterResponse = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk]]
class ClientProxyMixin():
def get_proxy(self) -> Union[str, None]:
@@ -21,9 +27,7 @@ class Client(ClientProxyMixin):
self,
api_key: str = None,
proxies: Proxies = None,
- provider: ProviderType = None,
- image_provider: ImageProvider = None,
**kwargs
) -> None:
self.api_key: str = api_key
- self.proxies: Proxies = proxies
+ self.proxies: Proxies = proxies \ No newline at end of file
diff --git a/g4f/gui/__init__.py b/g4f/gui/__init__.py
index 16b03e4c..f5e448ad 100644
--- a/g4f/gui/__init__.py
+++ b/g4f/gui/__init__.py
@@ -10,7 +10,7 @@ except ImportError as e:
def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> None:
if import_error is not None:
- raise MissingRequirementsError(f'Install "gui" requirements | pip install g4f[gui] -U\n{import_error}')
+ raise MissingRequirementsError(f'Install "gui" requirements | pip install -U g4f[gui]\n{import_error}')
if debug:
from g4f import debug
@@ -20,7 +20,7 @@ def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> Non
'port' : port,
'debug': debug
}
-
+
site = Website(app)
for route in site.routes:
app.add_url_rule(
@@ -28,7 +28,7 @@ def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> Non
view_func = site.routes[route]['function'],
methods = site.routes[route]['methods'],
)
-
+
backend_api = Backend_Api(app)
for route in backend_api.routes:
app.add_url_rule(
@@ -36,7 +36,7 @@ def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> Non
view_func = backend_api.routes[route]['function'],
methods = backend_api.routes[route]['methods'],
)
-
+
print(f"Running on port {config['port']}")
app.run(**config)
print(f"Closing port {config['port']}")
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index f63932e7..7103b9c3 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -77,17 +77,35 @@
</div>
</div>
<div class="settings">
- <textarea name="OpenaiChat[api_key]" class="box" placeholder="OpenaiChat: accessToken"></textarea>
- <div class="field">
- <input id="auto_continue" type="checkbox" name="OpenaiChat[auto_continue]" checked/>
- <label for="auto_continue" title=""></label>
- <span class="about">Auto Continue</span>
+ <div class="field box">
+ <label for="OpenaiChat-api_key" class="label" title="">OpenaiChat: access_token</label>
+ <textarea id="OpenaiChat-api_key" name="OpenaiChat[api_key]" placeholder="..."></textarea>
+ </div>
+ <div class="field">
+ <span class="label">OpenaiChat: Auto continue</span>
+ <input id="OpenaiChat-auto_continue" type="checkbox" name="OpenaiChat[auto_continue]" checked/>
+ <label for="OpenaiChat-auto_continue" class="toogle" title=""></label>
+ </div>
+ <div class="field box">
+ <label for="Bing-api_key" class="label" title="">Bing: "_U" cookie</label>
+ <textarea id="Bing-api_key" name="Bing[api_key]" placeholder="..."></textarea>
+ </div>
+ <div class="field box">
+ <label for="Gemini-api_key" class="label" title="">Gemini: Auth cookies</label>
+ <textarea id="Gemini-api_key" name="Gemini[api_key]" placeholder="..."></textarea>
+ </div>
+ <div class="field box">
+ <label for="Openai-api_key" class="label" title="">Openai: api_key</label>
+ <textarea id="Openai-api_key" name="Openai[api_key]" placeholder="..."></textarea>
+ </div>
+ <div class="field box">
+ <label for="GeminiPro-api_key" class="label" title="">GeminiPro: api_key</label>
+ <textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="..."></textarea>
+ </div>
+ <div class="field box">
+ <label for="HuggingFace-api_key" class="label" title="">HuggingFace: api_key</label>
+ <textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="..."></textarea>
</div>
- <textarea name="Bing[api_key]" class="box" placeholder="Bing: _U cookie"></textarea>
- <textarea name="Gemini[api_key]" class="box" placeholder="Gemini: Auth cookies"></textarea>
- <textarea name="Openai[api_key]" class="box" placeholder="Openai: api_key></textarea>
- <textarea name="Grok[api_key]" class="box" placeholder="Grok: api_key"></textarea>
- <textarea name="GeminiPro[api_key]" class="box" placeholder="GeminiPro: api_key"></textarea>
</div>
<div class="conversation">
<textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea>
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index 25fc4911..b0f7a4a0 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -520,7 +520,7 @@ label[for="camera"] {
}
.buttons label,
-.settings label {
+.settings label.toogle {
cursor: pointer;
text-indent: -9999px;
width: 50px;
@@ -538,7 +538,7 @@ label[for="camera"] {
}
.buttons label:after,
-.settings label:after {
+.settings label.toogle:after {
content: "";
position: absolute;
top: 50%;
@@ -560,17 +560,13 @@ label[for="camera"] {
left: calc(100% - 5px - 20px);
}
-.buttons, .settings {
+.buttons {
display: flex;
align-items: center;
justify-content: left;
width: 100%;
}
-.settings textarea{
- height: 20px;
-}
-
.field {
height: fit-content;
display: flex;
@@ -1017,7 +1013,7 @@ a:-webkit-any-link {
border: 1px solid #e4d4ffc9;
}
-#systemPrompt {
+#systemPrompt, .settings textarea {
font-size: 15px;
width: 100%;
color: var(--colour-3);
@@ -1028,6 +1024,30 @@ a:-webkit-any-link {
resize: vertical;
}
+.settings {
+ width: 100%;
+ display: none;
+}
+
+.settings .field {
+ margin: var(--inner-gap) 0;
+}
+
+.settings textarea {
+ background-color: transparent;
+ border: none;
+ padding: var(--inner-gap) 0;
+}
+
+.settings .label {
+ font-size: 15px;
+ padding: var(--inner-gap) 0;
+ width: fit-content;
+ min-width: 190px;
+ margin-left: var(--section-gap);
+ white-space:nowrap;
+}
+
::-webkit-scrollbar {
width: 10px;
}
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 2c6279d1..7c09fdbe 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -98,7 +98,7 @@ class Api():
if conversation_id and provider in conversations and conversation_id in conversations[provider]:
kwargs["conversation"] = conversations[provider][conversation_id]
- model = json_data.get('model', models.default)
+ model = json_data.get('model') or models.default
return {
"model": model,
@@ -169,4 +169,8 @@ def get_error_message(exception: Exception) -> str:
Returns:
str: A formatted error message string.
"""
- return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}" \ No newline at end of file
+ message = f"{type(exception).__name__}: {exception}"
+ provider = get_last_provider()
+ if provider is None:
+ return message
+ return f"{provider.__name__}: {message}" \ No newline at end of file
diff --git a/g4f/gui/webview.py b/g4f/gui/webview.py
index 1557279c..dce47ecc 100644
--- a/g4f/gui/webview.py
+++ b/g4f/gui/webview.py
@@ -31,7 +31,7 @@ def run_webview(
f"g4f - {g4f.version.utils.current_version}",
os.path.join(dirname, "client/index.html"),
text_select=True,
- js_api=Api(),
+ js_api=JsApi(),
)
if has_platformdirs and storage_path is None:
storage_path = user_config_dir("g4f-webview")
diff --git a/g4f/local/__init__.py b/g4f/local/__init__.py
index c9d3d74a..117cd48d 100644
--- a/g4f/local/__init__.py
+++ b/g4f/local/__init__.py
@@ -1,17 +1,19 @@
-from ..typing import Union, Iterator, Messages
-from ..stubs import ChatCompletion, ChatCompletionChunk
-from ._engine import LocalProvider
-from ._models import models
-from ..client import iter_response, filter_none, IterResponse
+from __future__ import annotations
+
+from ..typing import Union, Messages
+from ..locals.provider import LocalProvider
+from ..locals.models import get_models
+from ..client.client import iter_response, filter_none
+from ..client.types import IterResponse
class LocalClient():
def __init__(self, **kwargs) -> None:
self.chat: Chat = Chat(self)
-
+
@staticmethod
def list_models():
- return list(models.keys())
-
+ return list(get_models())
+
class Completions():
def __init__(self, client: LocalClient):
self.client: LocalClient = client
@@ -25,8 +27,7 @@ class Completions():
max_tokens: int = None,
stop: Union[list[str], str] = None,
**kwargs
- ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
-
+ ) -> IterResponse:
stop = [stop] if isinstance(stop, str) else stop
response = LocalProvider.create_completion(
model, messages, stream,
diff --git a/g4f/local/_engine.py b/g4f/local/_engine.py
deleted file mode 100644
index 917de16c..00000000
--- a/g4f/local/_engine.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-
-from gpt4all import GPT4All
-from ._models import models
-
-class LocalProvider:
- @staticmethod
- def create_completion(model, messages, stream, **kwargs):
- if model not in models:
- raise ValueError(f"Model '{model}' not found / not yet implemented")
-
- model = models[model]
- model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models/')
- full_model_path = os.path.join(model_dir, model['path'])
-
- if not os.path.isfile(full_model_path):
- print(f"Model file '{full_model_path}' not found.")
- download = input(f'Do you want to download {model["path"]} ? [y/n]')
-
- if download in ['y', 'Y']:
- GPT4All.download_model(model['path'], model_dir)
- else:
- raise ValueError(f"Model '{model['path']}' not found.")
-
- model = GPT4All(model_name=model['path'],
- #n_threads=8,
- verbose=False,
- allow_download=False,
- model_path=model_dir)
-
- system_template = next((message['content'] for message in messages if message['role'] == 'system'),
- 'A chat between a curious user and an artificial intelligence assistant.')
-
- prompt_template = 'USER: {0}\nASSISTANT: '
- conversation = '\n'.join(f"{msg['role'].upper()}: {msg['content']}" for msg in messages) + "\nASSISTANT: "
-
- with model.chat_session(system_template, prompt_template):
- if stream:
- for token in model.generate(conversation, streaming=True):
- yield token
- else:
- yield model.generate(conversation) \ No newline at end of file
diff --git a/g4f/local/_models.py b/g4f/local/_models.py
deleted file mode 100644
index ec36fe41..00000000
--- a/g4f/local/_models.py
+++ /dev/null
@@ -1,86 +0,0 @@
-models = {
- "mistral-7b": {
- "path": "mistral-7b-openorca.gguf2.Q4_0.gguf",
- "ram": "8",
- "prompt": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n",
- "system": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI. For multi-step problems, write out your reasoning for each step.\n<|im_end|>"
- },
- "mistral-7b-instruct": {
- "path": "mistral-7b-instruct-v0.1.Q4_0.gguf",
- "ram": "8",
- "prompt": "[INST] %1 [/INST]",
- "system": None
- },
- "gpt4all-falcon": {
- "path": "gpt4all-falcon-newbpe-q4_0.gguf",
- "ram": "8",
- "prompt": "### Instruction:\n%1\n### Response:\n",
- "system": None
- },
- "orca-2": {
- "path": "orca-2-13b.Q4_0.gguf",
- "ram": "16",
- "prompt": None,
- "system": None
- },
- "wizardlm-13b": {
- "path": "wizardlm-13b-v1.2.Q4_0.gguf",
- "ram": "16",
- "prompt": None,
- "system": None
- },
- "nous-hermes-llama2": {
- "path": "nous-hermes-llama2-13b.Q4_0.gguf",
- "ram": "16",
- "prompt": "### Instruction:\n%1\n### Response:\n",
- "system": None
- },
- "gpt4all-13b-snoozy": {
- "path": "gpt4all-13b-snoozy-q4_0.gguf",
- "ram": "16",
- "prompt": None,
- "system": None
- },
- "mpt-7b-chat": {
- "path": "mpt-7b-chat-newbpe-q4_0.gguf",
- "ram": "8",
- "prompt": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n",
- "system": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>"
- },
- "orca-mini-3b": {
- "path": "orca-mini-3b-gguf2-q4_0.gguf",
- "ram": "4",
- "prompt": "### User:\n%1\n### Response:\n",
- "system": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n"
- },
- "replit-code-3b": {
- "path": "replit-code-v1_5-3b-newbpe-q4_0.gguf",
- "ram": "4",
- "prompt": "%1",
- "system": None
- },
- "starcoder": {
- "path": "starcoder-newbpe-q4_0.gguf",
- "ram": "4",
- "prompt": "%1",
- "system": None
- },
- "rift-coder-7b": {
- "path": "rift-coder-v0-7b-q4_0.gguf",
- "ram": "8",
- "prompt": "%1",
- "system": None
- },
- "all-MiniLM-L6-v2": {
- "path": "all-MiniLM-L6-v2-f16.gguf",
- "ram": "1",
- "prompt": None,
- "system": None
- },
- "mistral-7b-german": {
- "path": "em_german_mistral_v01.Q4_0.gguf",
- "ram": "8",
- "prompt": "USER: %1 ASSISTANT: ",
- "system": "Du bist ein hilfreicher Assistent. "
- }
-} \ No newline at end of file
diff --git a/g4f/local/models/model-here b/g4f/local/models/model-here
deleted file mode 100644
index 945c9b46..00000000
--- a/g4f/local/models/model-here
+++ /dev/null
@@ -1 +0,0 @@
-. \ No newline at end of file
diff --git a/g4f/locals/__init__.py b/g4f/locals/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/g4f/locals/__init__.py
diff --git a/g4f/locals/models.py b/g4f/locals/models.py
new file mode 100644
index 00000000..f82f5448
--- /dev/null
+++ b/g4f/locals/models.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+import os
+import requests
+import json
+
+from ..requests.raise_for_status import raise_for_status
+
+def load_models():
+ response = requests.get("https://gpt4all.io/models/models3.json")
+ raise_for_status(response)
+ return format_models(response.json())
+
+def get_model_name(filename: str) -> str:
+ name = filename.split(".", 1)[0]
+ for replace in ["-v1_5", "-v1", "-q4_0", "_v01", "-v0", "-f16", "-gguf2", "-newbpe"]:
+ name = name.replace(replace, "")
+ return name
+
+def format_models(models: list) -> dict:
+ return {get_model_name(model["filename"]): {
+ "path": model["filename"],
+ "ram": model["ramrequired"],
+ "prompt": model["promptTemplate"] if "promptTemplate" in model else None,
+ "system": model["systemPrompt"] if "systemPrompt" in model else None,
+ } for model in models}
+
+def read_models(file_path: str):
+ with open(file_path, "rb") as f:
+ return json.load(f)
+
+def save_models(file_path: str, data):
+ with open(file_path, 'w') as f:
+ json.dump(data, f, indent=4)
+
+def get_model_dir() -> str:
+ local_dir = os.path.dirname(os.path.abspath(__file__))
+ project_dir = os.path.dirname(os.path.dirname(local_dir))
+ model_dir = os.path.join(project_dir, "models")
+ if os.path.exists(model_dir):
+ return model_dir
+
+def get_models() -> dict[str, dict]:
+ model_dir = get_model_dir()
+ file_path = os.path.join(model_dir, "models.json")
+ if os.path.isfile(file_path):
+ return read_models(file_path)
+ else:
+ models = load_models()
+ save_models(file_path, models)
+ return models \ No newline at end of file
diff --git a/g4f/locals/provider.py b/g4f/locals/provider.py
new file mode 100644
index 00000000..45041539
--- /dev/null
+++ b/g4f/locals/provider.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+import os
+
+from gpt4all import GPT4All
+from .models import get_models
+from ..typing import Messages
+
+MODEL_LIST: dict[str, dict] = None
+
+def find_model_dir(model_file: str) -> str:
+ local_dir = os.path.dirname(os.path.abspath(__file__))
+ project_dir = os.path.dirname(os.path.dirname(local_dir))
+
+ new_model_dir = os.path.join(project_dir, "models")
+ new_model_file = os.path.join(new_model_dir, model_file)
+ if os.path.isfile(new_model_file):
+ return new_model_dir
+
+ old_model_dir = os.path.join(local_dir, "models")
+ old_model_file = os.path.join(old_model_dir, model_file)
+ if os.path.isfile(old_model_file):
+ return old_model_dir
+
+ working_dir = "./"
+ for root, dirs, files in os.walk(working_dir):
+ if model_file in files:
+ return root
+
+ return new_model_dir
+
+class LocalProvider:
+ @staticmethod
+ def create_completion(model: str, messages: Messages, stream: bool = False, **kwargs):
+ global MODEL_LIST
+ if MODEL_LIST is None:
+ MODEL_LIST = get_models()
+ if model not in MODEL_LIST:
+ raise ValueError(f'Model "{model}" not found / not yet implemented')
+
+ model = MODEL_LIST[model]
+ model_file = model["path"]
+ model_dir = find_model_dir(model_file)
+ if not os.path.isfile(os.path.join(model_dir, model_file)):
+ print(f'Model file "models/{model_file}" not found.')
+ download = input(f"Do you want to download {model_file}? [y/n]: ")
+ if download in ["y", "Y"]:
+ GPT4All.download_model(model_file, model_dir)
+ else:
+ raise ValueError(f'Model "{model_file}" not found.')
+
+ model = GPT4All(model_name=model_file,
+ #n_threads=8,
+ verbose=False,
+ allow_download=False,
+ model_path=model_dir)
+
+ system_message = "\n".join(message["content"] for message in messages if message["role"] == "system")
+ if system_message:
+ system_message = "A chat between a curious user and an artificial intelligence assistant."
+
+ prompt_template = "USER: {0}\nASSISTANT: "
+ conversation = "\n" . join(
+ f"{message['role'].upper()}: {message['content']}"
+ for message in messages
+ if message["role"] != "system"
+ ) + "\nASSISTANT: "
+
+ with model.chat_session(system_message, prompt_template):
+ if stream:
+ for token in model.generate(conversation, streaming=True):
+ yield token
+ else:
+ yield model.generate(conversation) \ No newline at end of file
diff --git a/g4f/providers/types.py b/g4f/providers/types.py
index a3eeb99e..f33ea969 100644
--- a/g4f/providers/types.py
+++ b/g4f/providers/types.py
@@ -96,6 +96,7 @@ class BaseRetryProvider(BaseProvider):
__name__: str = "RetryProvider"
supports_stream: bool = True
+ last_provider: Type[BaseProvider] = None
ProviderType = Union[Type[BaseProvider], BaseRetryProvider]