summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-09-26 01:20:27 +0200
committerGitHub <noreply@github.com>2023-09-26 01:20:27 +0200
commitfd5d28cf7b936855b36b540bb589219b84eea376 (patch)
tree65f3b7b5c3a15d4681cf07d1bb120489d61072b7 /g4f
parentMerge pull request #940 from hlohaus/myshell (diff)
parentAItianhuSpace Provider with GPT 4 added (diff)
downloadgpt4free-fd5d28cf7b936855b36b540bb589219b84eea376.tar
gpt4free-fd5d28cf7b936855b36b540bb589219b84eea376.tar.gz
gpt4free-fd5d28cf7b936855b36b540bb589219b84eea376.tar.bz2
gpt4free-fd5d28cf7b936855b36b540bb589219b84eea376.tar.lz
gpt4free-fd5d28cf7b936855b36b540bb589219b84eea376.tar.xz
gpt4free-fd5d28cf7b936855b36b540bb589219b84eea376.tar.zst
gpt4free-fd5d28cf7b936855b36b540bb589219b84eea376.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/AItianhuSpace.py73
-rw-r--r--g4f/Provider/Aivvm.py5
-rw-r--r--g4f/Provider/Vercel.py30
-rw-r--r--g4f/Provider/__init__.py2
-rw-r--r--g4f/Provider/base_provider.py31
-rw-r--r--g4f/models.py7
-rw-r--r--g4f/requests.py78
7 files changed, 206 insertions, 20 deletions
diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py
new file mode 100644
index 00000000..8beb3355
--- /dev/null
+++ b/g4f/Provider/AItianhuSpace.py
@@ -0,0 +1,73 @@
+from __future__ import annotations
+
+import random, json
+
+from g4f.requests import AsyncSession, StreamRequest
+from .base_provider import AsyncGeneratorProvider, format_prompt
+
+domains = {
+ "gpt-3.5-turbo": ".aitianhu.space",
+ "gpt-4": ".aitianhu.website",
+}
+
+class AItianhuSpace(AsyncGeneratorProvider):
+ url = "https://chat3.aiyunos.top/"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = True,
+ **kwargs
+ ) -> str:
+ if not model:
+ model = "gpt-3.5-turbo"
+ elif not model in domains:
+ raise ValueError(f"Model are not supported: {model}")
+
+ chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
+ rand = ''.join(random.choice(chars) for _ in range(6))
+ domain = domains[model]
+ url = f'https://{rand}{domain}/api/chat-process'
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
+ }
+ async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session:
+ data = {
+ "prompt": format_prompt(messages),
+ "options": {},
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
+ "temperature": 0.8,
+ "top_p": 1,
+ **kwargs
+ }
+ async with StreamRequest(session, "POST", url, json=data) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ line = json.loads(line.rstrip())
+ if "detail" in line:
+ content = line["detail"]["choices"][0]["delta"].get("content")
+ if content:
+ yield content
+ elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
+ raise RuntimeError("Rate limit for GPT 4 reached")
+ else:
+ raise RuntimeError("Response: {line}")
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("top_p", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py
index c38c4a74..7a3d57bd 100644
--- a/g4f/Provider/Aivvm.py
+++ b/g4f/Provider/Aivvm.py
@@ -60,9 +60,10 @@ class Aivvm(BaseProvider):
response = requests.post(
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
+ response.raise_for_status()
- for line in response.iter_content(chunk_size=1048):
- yield line.decode('utf-8')
+ for chunk in response.iter_content(chunk_size=None):
+ yield chunk.decode('utf-8')
@classmethod
@property
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index ca124fec..4102c07b 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -18,7 +18,13 @@ class Vercel(BaseProvider):
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool, **kwargs ) -> CreateResult:
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ elif model not in model_info:
+ raise ValueError(f"Model are not supported: {model}")
headers = {
'authority' : 'sdk.vercel.ai',
@@ -26,7 +32,7 @@ class Vercel(BaseProvider):
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
- 'custom-encoding' : AntiBotToken(),
+ 'custom-encoding' : get_anti_bot_token(),
'origin' : 'https://sdk.vercel.ai',
'pragma' : 'no-cache',
'referer' : 'https://sdk.vercel.ai/',
@@ -48,22 +54,20 @@ class Vercel(BaseProvider):
'playgroundId': str(uuid.uuid4()),
'chatIndex' : 0} | model_info[model]['default_params']
- server_error = True
- retries = 0
max_retries = kwargs.get('max_retries', 20)
-
- while server_error and not retries > max_retries:
+ for i in range(max_retries):
response = requests.post('https://sdk.vercel.ai/api/generate',
headers=headers, json=json_data, stream=True)
+ try:
+ response.raise_for_status()
+ except:
+ continue
+ for token in response.iter_content(chunk_size=8):
+ yield token.decode()
+ break
- for token in response.iter_content(chunk_size=2046):
- if token != b'Internal Server Error':
- server_error = False
- yield (token.decode())
-
- retries += 1
-def AntiBotToken() -> str:
+def get_anti_bot_token() -> str:
headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index aa19ade3..ebe01603 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -4,6 +4,7 @@ from .Aichat import Aichat
from .Ails import Ails
from .AiService import AiService
from .AItianhu import AItianhu
+from .AItianhuSpace import AItianhuSpace
from .Aivvm import Aivvm
from .Bard import Bard
from .Bing import Bing
@@ -52,6 +53,7 @@ __all__ = [
'Ails',
'AiService',
'AItianhu',
+ 'AItianhuSpace',
'Aivvm',
'Bard',
'Bing',
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index ea81502f..e8a54f78 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,7 +1,9 @@
from __future__ import annotations
import asyncio
-from asyncio import SelectorEventLoop
+import functools
+from asyncio import SelectorEventLoop, AbstractEventLoop
+from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod
import browser_cookie3
@@ -27,6 +29,31 @@ class BaseProvider(ABC):
) -> CreateResult:
raise NotImplementedError()
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ *,
+ loop: AbstractEventLoop = None,
+ executor: ThreadPoolExecutor = None,
+ **kwargs
+ ) -> str:
+ if not loop:
+ loop = asyncio.get_event_loop()
+
+ partial_func = functools.partial(
+ cls.create_completion,
+ model,
+ messages,
+ False,
+ **kwargs
+ )
+ response = await loop.run_in_executor(
+ executor,
+ partial_func
+ )
+ return "".join(response)
@classmethod
@property
@@ -127,7 +154,7 @@ def create_event_loop() -> SelectorEventLoop:
except RuntimeError:
return SelectorEventLoop()
raise RuntimeError(
- 'Use "create_async" instead of "create" function in a async loop.')
+ 'Use "create_async" instead of "create" function in a running event loop.')
_cookies = {}
diff --git a/g4f/models.py b/g4f/models.py
index 5cf8d9e9..23fc8e65 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -17,6 +17,7 @@ from .Provider import (
Wewordle,
Yqcloud,
AItianhu,
+ AItianhuSpace,
Aichat,
Myshell,
)
@@ -38,7 +39,7 @@ default = Model(
Wewordle, # Responds with markdown
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
- DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
+ DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell,
])
)
@@ -47,7 +48,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
+ DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell,
])
)
@@ -55,7 +56,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
- Aivvm, Myshell
+ Aivvm, Myshell, AItianhuSpace,
])
)
diff --git a/g4f/requests.py b/g4f/requests.py
new file mode 100644
index 00000000..1a0c612c
--- /dev/null
+++ b/g4f/requests.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import json, sys
+from aiohttp import StreamReader
+from aiohttp.base_protocol import BaseProtocol
+
+from curl_cffi.requests import AsyncSession
+from curl_cffi.requests.cookies import Request
+from curl_cffi.requests.cookies import Response
+
+
+class StreamResponse:
+ def __init__(self, inner: Response, content: StreamReader, request: Request):
+ self.inner = inner
+ self.content = content
+ self.request = request
+ self.status_code = inner.status_code
+ self.reason = inner.reason
+ self.ok = inner.ok
+
+ async def text(self) -> str:
+ content = await self.content.read()
+ return content.decode()
+
+ def raise_for_status(self):
+ if not self.ok:
+ raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}")
+
+ async def json(self, **kwargs):
+ return json.loads(await self.content.read(), **kwargs)
+
+
+class StreamRequest:
+ def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
+ self.session = session
+ self.loop = session.loop
+ self.content = StreamReader(
+ BaseProtocol(session.loop),
+ sys.maxsize,
+ loop=session.loop
+ )
+ self.method = method
+ self.url = url
+ self.options = kwargs
+
+ def on_content(self, data):
+ if not self.enter.done():
+ self.enter.set_result(None)
+ self.content.feed_data(data)
+
+ def on_done(self, task):
+ self.content.feed_eof()
+
+ async def __aenter__(self) -> StreamResponse:
+ self.curl = await self.session.pop_curl()
+ self.enter = self.session.loop.create_future()
+ request, _, header_buffer = self.session._set_curl_options(
+ self.curl,
+ self.method,
+ self.url,
+ content_callback=self.on_content,
+ **self.options
+ )
+ handle = self.session.acurl.add_handle(self.curl)
+ self.handle = self.session.loop.create_task(handle)
+ self.handle.add_done_callback(self.on_done)
+ await self.enter
+ return StreamResponse(
+ self.session._parse_response(self.curl, request, _, header_buffer),
+ self.content,
+ request
+ )
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.handle
+ self.curl.clean_after_perform()
+ self.curl.reset()
+ self.session.push_curl(self.curl) \ No newline at end of file