summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/FreeChatgpt.py
blob: 75514118469c68c966d8a9c4ee4cc783771ba59b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider


models = {
     "claude-v2": "claude-2.0",
     "gemini-pro": "google-gemini-pro"
}

class FreeChatgpt(AsyncGeneratorProvider):
    url = "https://free.chatgpt.org.uk"
    working = True
    supports_gpt_35_turbo = True
    supports_gpt_4 = True
    supports_message_history = True

    @classmethod
    async def create_async_generator(
        cls,
        model: str,
        messages: Messages,
        proxy: str = None,
        **kwargs
    ) -> AsyncResult:
        if model in models:
            model = models[model]
        elif not model:
            model = "gpt-3.5-turbo"
        headers = {
            "Accept": "application/json, text/event-stream",
            "Content-Type":"application/json",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "en-US,en;q=0.5",
            "Host":"free.chatgpt.org.uk",
            "Referer":f"{cls.url}/",
            "Origin":f"{cls.url}",
            "Sec-Fetch-Dest": "empty",
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-origin",
            "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", 
        }
        async with ClientSession(headers=headers) as session:
            data = {
                "messages":messages,
                "stream":True,
                "model":model,
                "temperature":0.5,
                "presence_penalty":0,
                "frequency_penalty":0,
                "top_p":1,
                **kwargs
            }
            async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
                response.raise_for_status()
                started = False
                async for line in response.content:
                    if line.startswith(b"data: [DONE]"):
                        break
                    elif line.startswith(b"data: "):
                        line = json.loads(line[6:])
                        if(line["choices"]==[]):
                            continue
                        chunk = line["choices"][0]["delta"].get("content")
                        if chunk:
                            started = True
                            yield chunk
                if not started:
                    raise RuntimeError("Empty response")