summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/DarkAI.py
blob: d5bd86a568d8ddc6006884213c7481959ed64aa0 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt


class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
    url = "https://www.aiuncensored.info"
    api_endpoint = "https://darkai.foundation/chat"
    working = True
    supports_gpt_35_turbo = True
    supports_gpt_4 = True
    supports_stream = True
    supports_system_message = True
    supports_message_history = True
    
    default_model = 'gpt-4o'
    models = [
         default_model, # Uncensored
         'gpt-3.5-turbo', # Uncensored
         'llama-3-70b', # Uncensored
         'llama-3-405b',
    ]
    
    model_aliases = {
        "llama-3.1-70b": "llama-3-70b",
        "llama-3.1-405b": "llama-3-405b",
    }

    @classmethod
    def get_model(cls, model: str) -> str:
        if model in cls.models:
            return model
        elif model in cls.model_aliases:
            return cls.model_aliases[model]
        else:
            return cls.default_model

    @classmethod
    async def create_async_generator(
        cls,
        model: str,
        messages: Messages,
        proxy: str = None,
        **kwargs
    ) -> AsyncResult:
        model = cls.get_model(model)
        
        headers = {
            "accept": "text/event-stream",
            "content-type": "application/json",
            "origin": "https://www.aiuncensored.info",
            "referer": "https://www.aiuncensored.info/",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
        }
        async with ClientSession(headers=headers) as session:
            prompt = format_prompt(messages)
            data = {
                "query": prompt,
                "model": model,
            }
            async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
                response.raise_for_status()
                full_text = ""
                async for chunk in response.content:
                    if chunk:
                        try:
                            chunk_str = chunk.decode().strip()
                            if chunk_str.startswith('data: '):
                                chunk_data = json.loads(chunk_str[6:])
                                if chunk_data['event'] == 'text-chunk':
                                    full_text += chunk_data['data']['text']
                                elif chunk_data['event'] == 'stream-end':
                                    if full_text:
                                        yield full_text.strip()
                                    return
                        except json.JSONDecodeError:
                            print(f"Failed to decode JSON: {chunk_str}")
                        except Exception as e:
                            print(f"Error processing chunk: {e}")
                
                if full_text:
                    yield full_text.strip()