from __future__ import annotations import random, json from ..typing import AsyncResult, Messages from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies domains = { "gpt-3.5-turbo": "aitianhu.space", "gpt-4": "aitianhu.website", } class AItianhuSpace(AsyncGeneratorProvider): url = "https://chat3.aiyunos.top/" working = True supports_gpt_35_turbo = True @classmethod async def create_async_generator( cls, model: str, messages: Messages, proxy: str = None, domain: str = None, cookies: dict = None, timeout: int = 120, **kwargs ) -> AsyncResult: if not model: model = "gpt-3.5-turbo" elif not model in domains: raise ValueError(f"Model are not supported: {model}") if not domain: chars = 'abcdefghijklmnopqrstuvwxyz0123456789' rand = ''.join(random.choice(chars) for _ in range(6)) domain = f"{rand}.{domains[model]}" if not cookies: cookies = get_cookies(domain) if not cookies: raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies") url = f'https://{domain}' async with StreamSession( proxies={"https": proxy}, cookies=cookies, timeout=timeout, impersonate="chrome110", verify=False ) as session: data = { "prompt": format_prompt(messages), "options": {}, "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", "temperature": 0.8, "top_p": 1, **kwargs } headers = { "Authority": url, "Accept": "application/json, text/plain, */*", "Origin": url, "Referer": f"{url}/" } async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response: response.raise_for_status() async for line in response.iter_lines(): if line == b"