summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Vercel.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/Vercel.py')
-rw-r--r--g4f/Provider/Vercel.py132
1 files changed, 72 insertions, 60 deletions
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index 2d856664..9b1292a0 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -2,9 +2,10 @@ from __future__ import annotations
import json, base64, requests, execjs, random, uuid
-from ..typing import Messages, TypedDict, CreateResult
+from ..typing import Messages, TypedDict, CreateResult, Any
from .base_provider import BaseProvider
from abc import abstractmethod
+from ..debug import logging
class Vercel(BaseProvider):
@@ -19,14 +20,16 @@ class Vercel(BaseProvider):
model: str,
messages: Messages,
stream: bool,
- proxy: str = None,
- **kwargs
- ) -> CreateResult:
+ proxy: str = None, **kwargs) -> CreateResult:
+
+ print(model)
+
if not model:
model = "gpt-3.5-turbo"
+
elif model not in model_info:
- raise ValueError(f"Model are not supported: {model}")
-
+ raise ValueError(f"Vercel does not support {model}")
+
headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',
@@ -110,40 +113,49 @@ class ModelInfo(TypedDict):
default_params: dict[str, Any]
model_info: dict[str, ModelInfo] = {
- 'claude-instant-v1': {
- 'id': 'anthropic:claude-instant-v1',
- 'default_params': {
- 'temperature': 1,
- 'maximumLength': 1024,
- 'topP': 1,
- 'topK': 1,
- 'presencePenalty': 1,
- 'frequencyPenalty': 1,
- 'stopSequences': ['\n\nHuman:'],
- },
- },
- 'claude-v1': {
- 'id': 'anthropic:claude-v1',
- 'default_params': {
- 'temperature': 1,
- 'maximumLength': 1024,
- 'topP': 1,
- 'topK': 1,
- 'presencePenalty': 1,
- 'frequencyPenalty': 1,
- 'stopSequences': ['\n\nHuman:'],
- },
- },
- 'claude-v2': {
- 'id': 'anthropic:claude-v2',
+ # 'claude-instant-v1': {
+ # 'id': 'anthropic:claude-instant-v1',
+ # 'default_params': {
+ # 'temperature': 1,
+ # 'maximumLength': 1024,
+ # 'topP': 1,
+ # 'topK': 1,
+ # 'presencePenalty': 1,
+ # 'frequencyPenalty': 1,
+ # 'stopSequences': ['\n\nHuman:'],
+ # },
+ # },
+ # 'claude-v1': {
+ # 'id': 'anthropic:claude-v1',
+ # 'default_params': {
+ # 'temperature': 1,
+ # 'maximumLength': 1024,
+ # 'topP': 1,
+ # 'topK': 1,
+ # 'presencePenalty': 1,
+ # 'frequencyPenalty': 1,
+ # 'stopSequences': ['\n\nHuman:'],
+ # },
+ # },
+ # 'claude-v2': {
+ # 'id': 'anthropic:claude-v2',
+ # 'default_params': {
+ # 'temperature': 1,
+ # 'maximumLength': 1024,
+ # 'topP': 1,
+ # 'topK': 1,
+ # 'presencePenalty': 1,
+ # 'frequencyPenalty': 1,
+ # 'stopSequences': ['\n\nHuman:'],
+ # },
+ # },
+ 'replicate/llama70b-v2-chat': {
+ 'id': 'replicate:replicate/llama-2-70b-chat',
'default_params': {
- 'temperature': 1,
- 'maximumLength': 1024,
+ 'temperature': 0.75,
+ 'maximumLength': 3000,
'topP': 1,
- 'topK': 1,
- 'presencePenalty': 1,
- 'frequencyPenalty': 1,
- 'stopSequences': ['\n\nHuman:'],
+ 'repetitionPenalty': 1,
},
},
'a16z-infra/llama7b-v2-chat': {
@@ -254,28 +266,28 @@ model_info: dict[str, ModelInfo] = {
'stopSequences': [],
},
},
- 'gpt-4': {
- 'id': 'openai:gpt-4',
- 'default_params': {
- 'temperature': 0.7,
- 'maximumLength': 8192,
- 'topP': 1,
- 'presencePenalty': 0,
- 'frequencyPenalty': 0,
- 'stopSequences': [],
- },
- },
- 'gpt-4-0613': {
- 'id': 'openai:gpt-4-0613',
- 'default_params': {
- 'temperature': 0.7,
- 'maximumLength': 8192,
- 'topP': 1,
- 'presencePenalty': 0,
- 'frequencyPenalty': 0,
- 'stopSequences': [],
- },
- },
+ # 'gpt-4': {
+ # 'id': 'openai:gpt-4',
+ # 'default_params': {
+ # 'temperature': 0.7,
+ # 'maximumLength': 8192,
+ # 'topP': 1,
+ # 'presencePenalty': 0,
+ # 'frequencyPenalty': 0,
+ # 'stopSequences': [],
+ # },
+ # },
+ # 'gpt-4-0613': {
+ # 'id': 'openai:gpt-4-0613',
+ # 'default_params': {
+ # 'temperature': 0.7,
+ # 'maximumLength': 8192,
+ # 'topP': 1,
+ # 'presencePenalty': 0,
+ # 'frequencyPenalty': 0,
+ # 'stopSequences': [],
+ # },
+ # },
'code-davinci-002': {
'id': 'openai:code-davinci-002',
'default_params': {