summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/OpenaiChat.py
blob: 9ca0cd584fe8bc677c0447c3a8dd062e16a7d080 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
has_module = True
try:
    from revChatGPT.V1 import AsyncChatbot
except ImportError:
    has_module = False

from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
from ..typing import AsyncGenerator
from httpx import AsyncClient
import json


class OpenaiChat(AsyncGeneratorProvider):
    url                   = "https://chat.openai.com"
    needs_auth            = True
    working               = has_module
    supports_gpt_35_turbo = True
    supports_gpt_4        = True
    supports_stream       = True
    _access_token         = None

    @classmethod
    async def create_async_generator(
        cls,
        model: str,
        messages: list[dict[str, str]],
        proxy: str = None,
        access_token: str = _access_token,
        cookies: dict = None,
        **kwargs: dict
    ) -> AsyncGenerator:
        
        config = {"access_token": access_token, "model": model}
        if proxy:
            if "://" not in proxy:
                proxy = f"http://{proxy}"
            config["proxy"] = proxy

        bot = AsyncChatbot(
            config=config
        )

        if not access_token:
            cookies = cookies if cookies else get_cookies("chat.openai.com")
            cls._access_token = await get_access_token(bot.session, cookies)
            bot.set_access_token(cls._access_token)

        returned = None
        async for message in bot.ask(format_prompt(messages)):
            message = message["message"]
            if returned:
                if message.startswith(returned):
                    new = message[len(returned):]
                    if new:
                        yield new
            else:
                yield message
            returned = message
        
        await bot.delete_conversation(bot.conversation_id)


    @classmethod
    @property
    def params(cls):
        params = [
            ("model", "str"),
            ("messages", "list[dict[str, str]]"),
            ("stream", "bool"),
            ("proxy", "str"),
        ]
        param = ", ".join([": ".join(p) for p in params])
        return f"g4f.provider.{cls.__name__} supports: ({param})"
    

async def get_access_token(session: AsyncClient, cookies: dict):
    response = await session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
    response.raise_for_status()
    try:
        return response.json()["accessToken"]
    except json.decoder.JSONDecodeError:
        raise RuntimeError(f"Response: {response.text}")
hhhhhh