summaryrefslogblamecommitdiffstats
path: root/g4f/Provider/Vercel.py
blob: a0bca0ce9cd8ff3884a829b9f410d397ccb561b3 (plain) (tree)
1
2
3
4
5
6
7
8
9
                                  
 
                                                   
                                 
 

                                                         
 
 
                                     

                                 
                                
                                
                                
 


                                     

                                       







                                                                 
        

                                                           




                                                    

                                               
        




































                                                                                                                                                                    
 
                                                 


















                                                                                                                                                           
                                       
                                                                 
                                                                           










                                                                                                                         




                                  
                                    









                                            

          









                                            

          









                                            

          






                                                     

          






                                                      

          






                                                     

          







                                             

          







                                               

          








                                                    

          






                                                                           
      






                                                                 
      







                                               

          









                                             

          









                                       

          








                                  

          








                                  

          








                                        

          









                                     

          









                                         

          









                                              

          








                                    

          








                                        

          








                                      

          








                                        

          








                                        


          
from __future__ import annotations

import json, base64, requests, execjs, random, uuid
from aiohttp import ClientSession

from ..typing       import Any, TypedDict, AsyncGenerator
from .base_provider import AsyncGeneratorProvider


class Vercel(AsyncGeneratorProvider):
    url = 'https://sdk.vercel.ai'
    working               = True
    supports_gpt_35_turbo = True
    supports_stream       = True
    _anti_bot_token       = None

    @classmethod
    async def create_async_generator(
        cls,
        model: str,
        messages: list[dict[str, str]],
        stream: bool,
        proxy: str = None,
        **kwargs
    ) -> AsyncGenerator:
        if not model:
            model = "gpt-3.5-turbo"
        elif model not in model_info:
            raise ValueError(f"Model are not supported: {model}")
        
        if not cls._anti_bot_token:
            cls._anti_bot_token = get_anti_bot_token(proxy)

        json_data = {
            'model'       : model_info[model]['id'],
            'messages'    : messages,
            'playgroundId': str(uuid.uuid4()),
            'chatIndex'   : 0
        } | model_info[model]['default_params']
        
        for tries in range(100):
            headers = {
                'authority'         : 'sdk.vercel.ai',
                'accept'            : '*/*',
                'accept-language'   : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
                'cache-control'     : 'no-cache',
                'content-type'      : 'application/json',
                'custom-encoding'   : cls._anti_bot_token,
                'origin'            : 'https://sdk.vercel.ai',
                'pragma'            : 'no-cache',
                'referer'           : 'https://sdk.vercel.ai/',
                'sec-ch-ua'         : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
                'sec-ch-ua-mobile'  : '?0',
                'sec-ch-ua-platform': '"macOS"',
                'sec-fetch-dest'    : 'empty',
                'sec-fetch-mode'    : 'cors',
                'sec-fetch-site'    : 'same-origin',
                'user-agent'        :  'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
                    random.randint(99, 999),
                    random.randint(99, 999)
                )
            }
            async with ClientSession(
                headers=headers
            ) as session:
                async with session.post(f"{cls.url}/api/generate", proxy=proxy, json=json_data) as response:
                    try:
                        response.raise_for_status()
                    except Exception as e:
                        if tries >= 99:
                            raise e
                        # Maybe the token is the reason for failing
                        cls._anti_bot_token = get_anti_bot_token(proxy)
                        continue
                    async for token in response.content.iter_any():
                        yield token.decode()
                    break    

def get_anti_bot_token(proxy: str = None) -> str:
    headers = {
        'authority'         : 'sdk.vercel.ai',
        'accept'            : '*/*',
        'accept-language'   : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
        'cache-control'     : 'no-cache',
        'pragma'            : 'no-cache',
        'referer'           : 'https://sdk.vercel.ai/',
        'sec-ch-ua'         : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
        'sec-ch-ua-mobile'  : '?0',
        'sec-ch-ua-platform': '"macOS"',
        'sec-fetch-dest'    : 'empty',
        'sec-fetch-mode'    : 'cors',
        'sec-fetch-site'    : 'same-origin',
        'user-agent'        : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
            random.randint(99, 999), 
            random.randint(99, 999)
        )
    }
    
    # Does not work with async requests
    response = requests.get('https://sdk.vercel.ai/openai.jpeg', 
                            headers=headers, proxies={"https": proxy}).text

    raw_data = json.loads(base64.b64decode(response, 
                                    validate=True))

    js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
        return (%s)(%s)''' % (raw_data['c'], raw_data['a'])

    raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']}, 
                        separators = (",", ":"))

    return base64.b64encode(raw_token.encode('utf-16le')).decode()

class ModelInfo(TypedDict):
    id: str
    default_params: dict[str, Any]

model_info: dict[str, ModelInfo] = {
    'claude-instant-v1': {
        'id': 'anthropic:claude-instant-v1',
        'default_params': {
            'temperature': 1,
            'maximumLength': 1024,
            'topP': 1,
            'topK': 1,
            'presencePenalty': 1,
            'frequencyPenalty': 1,
            'stopSequences': ['\n\nHuman:'],
        },
    },
    'claude-v1': {
        'id': 'anthropic:claude-v1',
        'default_params': {
            'temperature': 1,
            'maximumLength': 1024,
            'topP': 1,
            'topK': 1,
            'presencePenalty': 1,
            'frequencyPenalty': 1,
            'stopSequences': ['\n\nHuman:'],
        },
    },
    'claude-v2': {
        'id': 'anthropic:claude-v2',
        'default_params': {
            'temperature': 1,
            'maximumLength': 1024,
            'topP': 1,
            'topK': 1,
            'presencePenalty': 1,
            'frequencyPenalty': 1,
            'stopSequences': ['\n\nHuman:'],
        },
    },
    'a16z-infra/llama7b-v2-chat': {
        'id': 'replicate:a16z-infra/llama7b-v2-chat',
        'default_params': {
            'temperature': 0.75,
            'maximumLength': 3000,
            'topP': 1,
            'repetitionPenalty': 1,
        },
    },
    'a16z-infra/llama13b-v2-chat': {
        'id': 'replicate:a16z-infra/llama13b-v2-chat',
        'default_params': {
            'temperature': 0.75,
            'maximumLength': 3000,
            'topP': 1,
            'repetitionPenalty': 1,
        },
    },
    'replicate/llama-2-70b-chat': {
        'id': 'replicate:replicate/llama-2-70b-chat',
        'default_params': {
            'temperature': 0.75,
            'maximumLength': 3000,
            'topP': 1,
            'repetitionPenalty': 1,
        },
    },
    'bigscience/bloom': {
        'id': 'huggingface:bigscience/bloom',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 0.95,
            'topK': 4,
            'repetitionPenalty': 1.03,
        },
    },
    'google/flan-t5-xxl': {
        'id': 'huggingface:google/flan-t5-xxl',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 0.95,
            'topK': 4,
            'repetitionPenalty': 1.03,
        },
    },
    'EleutherAI/gpt-neox-20b': {
        'id': 'huggingface:EleutherAI/gpt-neox-20b',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 0.95,
            'topK': 4,
            'repetitionPenalty': 1.03,
            'stopSequences': [],
        },
    },
    'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
        'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
        'default_params': {
            'maximumLength': 1024,
            'typicalP': 0.2,
            'repetitionPenalty': 1,
        },
    },
    'OpenAssistant/oasst-sft-1-pythia-12b': {
        'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
        'default_params': {
            'maximumLength': 1024,
            'typicalP': 0.2,
            'repetitionPenalty': 1,
        },
    },
    'bigcode/santacoder': {
        'id': 'huggingface:bigcode/santacoder',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 0.95,
            'topK': 4,
            'repetitionPenalty': 1.03,
        },
    },
    'command-light-nightly': {
        'id': 'cohere:command-light-nightly',
        'default_params': {
            'temperature': 0.9,
            'maximumLength': 1024,
            'topP': 1,
            'topK': 0,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'command-nightly': {
        'id': 'cohere:command-nightly',
        'default_params': {
            'temperature': 0.9,
            'maximumLength': 1024,
            'topP': 1,
            'topK': 0,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'gpt-4': {
        'id': 'openai:gpt-4',
        'default_params': {
            'temperature': 0.7,
            'maximumLength': 8192,
            'topP': 1,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'gpt-4-0613': {
        'id': 'openai:gpt-4-0613',
        'default_params': {
            'temperature': 0.7,
            'maximumLength': 8192,
            'topP': 1,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'code-davinci-002': {
        'id': 'openai:code-davinci-002',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 1,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'gpt-3.5-turbo': {
        'id': 'openai:gpt-3.5-turbo',
        'default_params': {
            'temperature': 0.7,
            'maximumLength': 4096,
            'topP': 1,
            'topK': 1,
            'presencePenalty': 1,
            'frequencyPenalty': 1,
            'stopSequences': [],
        },
    },
    'gpt-3.5-turbo-16k': {
        'id': 'openai:gpt-3.5-turbo-16k',
        'default_params': {
            'temperature': 0.7,
            'maximumLength': 16280,
            'topP': 1,
            'topK': 1,
            'presencePenalty': 1,
            'frequencyPenalty': 1,
            'stopSequences': [],
        },
    },
    'gpt-3.5-turbo-16k-0613': {
        'id': 'openai:gpt-3.5-turbo-16k-0613',
        'default_params': {
            'temperature': 0.7,
            'maximumLength': 16280,
            'topP': 1,
            'topK': 1,
            'presencePenalty': 1,
            'frequencyPenalty': 1,
            'stopSequences': [],
        },
    },
    'text-ada-001': {
        'id': 'openai:text-ada-001',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 1,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'text-babbage-001': {
        'id': 'openai:text-babbage-001',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 1,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'text-curie-001': {
        'id': 'openai:text-curie-001',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 1,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'text-davinci-002': {
        'id': 'openai:text-davinci-002',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 1024,
            'topP': 1,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
    'text-davinci-003': {
        'id': 'openai:text-davinci-003',
        'default_params': {
            'temperature': 0.5,
            'maximumLength': 4097,
            'topP': 1,
            'presencePenalty': 0,
            'frequencyPenalty': 0,
            'stopSequences': [],
        },
    },
}