from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class GptTalkRu(AsyncGeneratorProvider):
url = "https://gpttalk.ru"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US",
"Connection": "keep-alive",
"Content-Type": "application/json",
"Origin": "https://gpttalk.ru",
"Referer": "https://gpttalk.ru/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
}
async with ClientSession(headers=headers) as session:
data = {
"model": model,
"modelType": 1,
"prompt": messages,
"responseType": "stream",
}
async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
yield chunk.decode()