diff options
Diffstat (limited to 'g4f')
-rw-r--r-- | g4f/Provider/GeminiProChat.py | 56 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 3 | ||||
-rw-r--r-- | g4f/models.py | 6 |
3 files changed, 59 insertions, 6 deletions
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py new file mode 100644 index 00000000..32c86332 --- /dev/null +++ b/g4f/Provider/GeminiProChat.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import time +from hashlib import sha256 +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider + + +class GeminiProChat(AsyncGeneratorProvider): + url = "https://geminiprochat.com" + working = True + supports_gpt_35_turbo = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Content-Type": "text/plain;charset=UTF-8", + "Referer": "https://geminiprochat.com/", + "Origin": "https://geminiprochat.com", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Connection": "keep-alive", + "TE": "trailers", + } + async with ClientSession(headers=headers) as session: + timestamp = int(time.time() * 1e3) + data = { + "messages":[{ + "role": "model" if message["role"] == "assistant" else "user", + "parts": [{"text": message["content"]}] + } for message in messages], + "time": timestamp, + "pass": None, + "sign": generate_signature(timestamp, messages[-1]["content"]), + } + async with session.post(f"{cls.url}/api/generate", json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + yield chunk.decode() + +def generate_signature(time: int, text: str): + message = f'{time}:{text}:9C4680FB-A4E1-6BC7-052A-7F68F9F5AD1F'; + return sha256(message.encode()).hexdigest() diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 9703983a..19212836 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -31,6 +31,8 @@ from .Chatxyz import Chatxyz from .DeepInfra import DeepInfra from .FakeGpt import FakeGpt from .FreeGpt import FreeGpt +from .GeekGpt import GeekGpt +from .GeminiProChat import GeminiProChat from .Gpt6 import Gpt6 from .GPTalk import GPTalk from .GptChatly import GptChatly @@ -53,7 +55,6 @@ from .Vercel import Vercel from .Ylokh import Ylokh from .You import You from .Yqcloud import Yqcloud -from .GeekGpt import GeekGpt from .Bestim import Bestim import sys diff --git a/g4f/models.py b/g4f/models.py index 264cd40e..6103ca03 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -4,7 +4,6 @@ from .Provider import RetryProvider, ProviderType from .Provider import ( Chatgpt4Online, ChatgptDemoAi, - ChatAnywhere, ChatgptNext, HuggingChat, ChatgptDemo, @@ -48,8 +47,7 @@ default = Model( Bing, ChatgptAi, GptGo, GeekGpt, You, - Chatgpt4Online, - ChatAnywhere + Chatgpt4Online ]) ) @@ -62,7 +60,6 @@ gpt_35_long = Model( GeekGpt, FakeGpt, Berlin, Koala, Chatgpt4Online, - ChatAnywhere, ChatgptDemoAi, OnlineGpt, ChatgptNext, @@ -79,7 +76,6 @@ gpt_35_turbo = Model( GptGo, You, GptForLove, ChatBase, Chatgpt4Online, - ChatAnywhere, ]) ) |