summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated/ChatForAi.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/deprecated/ChatForAi.py')
-rw-r--r--g4f/Provider/deprecated/ChatForAi.py55
1 files changed, 0 insertions, 55 deletions
diff --git a/g4f/Provider/deprecated/ChatForAi.py b/g4f/Provider/deprecated/ChatForAi.py
deleted file mode 100644
index ab4cd89c..00000000
--- a/g4f/Provider/deprecated/ChatForAi.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from __future__ import annotations
-
-from ...typing import AsyncResult, Messages
-from ...requests import StreamSession
-from ..base_provider import AsyncGeneratorProvider
-
-
-class ChatForAi(AsyncGeneratorProvider):
- url = "https://chatforai.com"
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- timeout: int = 120,
- **kwargs
- ) -> AsyncResult:
- async with StreamSession(impersonate="chrome107", proxies={"https": proxy}, timeout=timeout) as session:
- prompt = messages[-1]["content"]
- data = {
- "conversationId": "temp",
- "conversationType": "chat_continuous",
- "botId": "chat_continuous",
- "globalSettings":{
- "baseUrl": "https://api.openai.com",
- "model": model if model else "gpt-3.5-turbo",
- "messageHistorySize": 5,
- "temperature": 0.7,
- "top_p": 1,
- **kwargs
- },
- "botSettings": {},
- "prompt": prompt,
- "messages": messages,
- }
- async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
- response.raise_for_status()
- async for chunk in response.iter_content():
- if b"https://chatforai.store" in chunk:
- raise RuntimeError(f"Response: {chunk.decode()}")
- yield chunk.decode()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file