summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorabc <98614666+xtekky@users.noreply.github.com>2023-07-24 03:36:23 +0200
committerabc <98614666+xtekky@users.noreply.github.com>2023-07-24 03:36:23 +0200
commita8f24951427bbc6115649e039c059707327f8b57 (patch)
treeb367bcb5d62d623eac1c5fdedbea5c1778c741a1
parentUpdate setup.py (diff)
downloadgpt4free-a8f24951427bbc6115649e039c059707327f8b57.tar
gpt4free-a8f24951427bbc6115649e039c059707327f8b57.tar.gz
gpt4free-a8f24951427bbc6115649e039c059707327f8b57.tar.bz2
gpt4free-a8f24951427bbc6115649e039c059707327f8b57.tar.lz
gpt4free-a8f24951427bbc6115649e039c059707327f8b57.tar.xz
gpt4free-a8f24951427bbc6115649e039c059707327f8b57.tar.zst
gpt4free-a8f24951427bbc6115649e039c059707327f8b57.zip
-rw-r--r--g4f/Provider/Providers/Vercel.py117
1 files changed, 8 insertions, 109 deletions
diff --git a/g4f/Provider/Providers/Vercel.py b/g4f/Provider/Providers/Vercel.py
index e1e84f9a..03d9be17 100644
--- a/g4f/Provider/Providers/Vercel.py
+++ b/g4f/Provider/Providers/Vercel.py
@@ -42,120 +42,19 @@ vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant
'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
-# based on https://github.com/ading2210/vercel-llm-api // modified
-class Client:
- def __init__(self):
- self.session = requests.Session()
- self.headers = {
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Te': 'trailers',
- 'Upgrade-Insecure-Requests': '1'
- }
- self.session.headers.update(self.headers)
-
- def get_token(self):
- b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text
- data = json.loads(base64.b64decode(b64))
-
- code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % (
- data['c'], data['a'])
-
- token_string = json.dumps(separators=(',', ':'),
- obj={'r': execjs.compile(code).call('token'), 't': data['t']})
-
- return base64.b64encode(token_string.encode()).decode()
-
- def get_default_params(self, model_id):
- return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()}
-
- def generate(self, model_id: str, prompt: str, params: dict = {}):
- if not ':' in model_id:
- model_id = models[model_id]
-
- defaults = self.get_default_params(model_id)
-
- payload = defaults | params | {
- 'prompt': prompt,
- 'model': model_id,
- }
-
- headers = self.headers | {
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Custom-Encoding': self.get_token(),
- 'Host': 'sdk.vercel.ai',
- 'Origin': 'https://sdk.vercel.ai',
- 'Referrer': 'https://sdk.vercel.ai',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- }
-
- chunks_queue = queue.Queue()
- error = None
- response = None
-
- def callback(data):
- chunks_queue.put(data.decode())
-
- def request_thread():
- nonlocal response, error
- for _ in range(3):
- try:
- response = self.session.post('https://sdk.vercel.ai/api/generate',
- json=payload, headers=headers, content_callback=callback)
- response.raise_for_status()
-
- except Exception as e:
- if _ == 2:
- error = e
-
- else:
- continue
-
- thread = threading.Thread(target=request_thread, daemon=True)
- thread.start()
-
- text = ''
- index = 0
- while True:
- try:
- chunk = chunks_queue.get(block=True, timeout=0.1)
-
- except queue.Empty:
- if error:
- raise error
-
- elif response:
- break
-
- else:
- continue
-
- text += chunk
- lines = text.split('\n')
-
- if len(lines) - 1 > index:
- new = lines[index:-1]
- for word in new:
- yield json.loads(word)
- index = len(lines) - 1
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ return
+ # conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
- conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
-
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
+ # for message in messages:
+ # conversation += '%s: %s\n' % (message['role'], message['content'])
- conversation += 'assistant: '
+ # conversation += 'assistant: '
- completion = Client().generate(model, conversation)
+ # completion = Client().generate(model, conversation)
- for token in completion:
- yield token
+ # for token in completion:
+ # yield token
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file