diff options
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/ChatForAi.py | 53 |
1 files changed, 53 insertions, 0 deletions
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py new file mode 100644 index 00000000..86b29639 --- /dev/null +++ b/g4f/Provider/ChatForAi.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from ..typing import AsyncGenerator +from ..requests import StreamSession +from .base_provider import AsyncGeneratorProvider + + +class ChatForAi(AsyncGeneratorProvider): + url = "https://chatforai.com" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + timeout: int = 30, + **kwargs + ) -> AsyncGenerator: + async with StreamSession(impersonate="chrome107", timeout=timeout) as session: + prompt = messages[-1]["content"] + data = { + "conversationId": "temp", + "conversationType": "chat_continuous", + "botId": "chat_continuous", + "globalSettings":{ + "baseUrl": "https://api.openai.com", + "model": model if model else "gpt-3.5-turbo", + "messageHistorySize": 5, + "temperature": 0.7, + "top_p": 1, + **kwargs + }, + "botSettings": {}, + "prompt": prompt, + "messages": messages, + } + async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: + response.raise_for_status() + async for chunk in response.iter_content(): + yield chunk.decode() + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file |