summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/AItianhu.py
blob: e8e5714a6e0fe13ddaf8ab356407d2dc14fc4c77 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import json

import requests

from ..typing import Any, CreateResult
from .base_provider import BaseProvider


class AItianhu(BaseProvider):
    url = "https://www.aitianhu.com/api/chat-process"
    working = False
    supports_gpt_35_turbo = True

    @staticmethod
    def create_completion(
        model: str,
        messages: list[dict[str, str]],
        stream: bool,
        **kwargs: Any,
    ) -> CreateResult:
        base = ""
        for message in messages:
            base += "%s: %s\n" % (message["role"], message["content"])
        base += "assistant:"

        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
        }
        data: dict[str, Any] = {
            "prompt": base,
            "options": {},
            "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
            "temperature": kwargs.get("temperature", 0.8),
            "top_p": kwargs.get("top_p", 1),
        }
        url = "https://www.aitianhu.com/api/chat-process"
        response = requests.post(url, headers=headers, json=data)
        response.raise_for_status()
        lines = response.text.strip().split("\n")
        res = json.loads(lines[-1])
        yield res["text"]

    @classmethod
    @property
    def params(cls):
        params = [
            ("model", "str"),
            ("messages", "list[dict[str, str]]"),
            ("stream", "bool"),
            ("temperature", "float"),
            ("top_p", "int"),
        ]
        param = ", ".join([": ".join(p) for p in params])
        return f"g4f.provider.{cls.__name__} supports: ({param})"