summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/DeepInfra.py
blob: 6cf5269451b7f428b800b829d6e0b509750e71ec (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from __future__ import annotations

import json
import requests
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests import StreamSession, raise_for_status

class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
    url = "https://deepinfra.com"
    working = True
    supports_stream = True
    supports_message_history = True
    default_model = 'meta-llama/Llama-2-70b-chat-hf'
    
    @classmethod
    def get_models(cls):
        if not cls.models:
            url = 'https://api.deepinfra.com/models/featured'
            models = requests.get(url).json()
            cls.models = [model['model_name'] for model in models]
        return cls.models

    @classmethod
    async def create_async_generator(
        cls,
        model: str,
        messages: Messages,
        stream: bool,
        proxy: str = None,
        timeout: int = 120,
        auth: str = None,
        **kwargs
    ) -> AsyncResult:
        headers = {
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'en-US',
            'Connection': 'keep-alive',
            'Content-Type': 'application/json',
            'Origin': 'https://deepinfra.com',
            'Referer': 'https://deepinfra.com/',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-site',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'X-Deepinfra-Source': 'web-embed',
            'accept': 'text/event-stream',
            'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"macOS"',
        }
        if auth:
            headers['Authorization'] = f"bearer {auth}" 
            
        async with StreamSession(headers=headers,
            timeout=timeout,
            proxies={"https": proxy},
            impersonate="chrome110"
        ) as session:
            json_data = {
                'model'   : cls.get_model(model),
                'messages': messages,
                'temperature': kwargs.get("temperature", 0.7),
                'max_tokens': kwargs.get("max_tokens", 512),
                'stop': kwargs.get("stop", []),
                'stream'  : True
            }
            async with session.post('https://api.deepinfra.com/v1/openai/chat/completions',
                                    json=json_data) as response:
                await raise_for_status(response)
                first = True
                async for line in response.iter_lines():
                    if not line.startswith(b"data: "):
                        continue
                    try:
                        json_line = json.loads(line[6:])
                        choices = json_line.get("choices", [{}])
                        finish_reason = choices[0].get("finish_reason")
                        if finish_reason:
                            break
                        token = choices[0].get("delta", {}).get("content")
                        if token:
                            if first:
                                token = token.lstrip()
                            if token:
                                first = False
                                yield token
                    except Exception:
                        raise RuntimeError(f"Response: {line}")