summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorabc <98614666+xtekky@users.noreply.github.com>2023-09-23 12:16:19 +0200
committerabc <98614666+xtekky@users.noreply.github.com>2023-09-23 12:16:19 +0200
commit6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb (patch)
treed57ab49f5f168cf602b31c4508b3f2e232d09aeb
parent~ (diff)
downloadgpt4free-6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb.tar
gpt4free-6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb.tar.gz
gpt4free-6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb.tar.bz2
gpt4free-6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb.tar.lz
gpt4free-6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb.tar.xz
gpt4free-6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb.tar.zst
gpt4free-6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb.zip
-rw-r--r--g4f/Provider/Vercel.py8
-rw-r--r--g4f/__init__.py27
2 files changed, 33 insertions, 2 deletions
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index df6a5df6..ca124fec 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -48,9 +48,11 @@ class Vercel(BaseProvider):
'playgroundId': str(uuid.uuid4()),
'chatIndex' : 0} | model_info[model]['default_params']
-
server_error = True
- while server_error:
+ retries = 0
+ max_retries = kwargs.get('max_retries', 20)
+
+ while server_error and not retries > max_retries:
response = requests.post('https://sdk.vercel.ai/api/generate',
headers=headers, json=json_data, stream=True)
@@ -58,6 +60,8 @@ class Vercel(BaseProvider):
if token != b'Internal Server Error':
server_error = False
yield (token.decode())
+
+ retries += 1
def AntiBotToken() -> str:
headers = {
diff --git a/g4f/__init__.py b/g4f/__init__.py
index f3a887f6..c0f70d8a 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -68,3 +68,30 @@ class ChatCompletion:
raise Exception(f"Provider: {provider.__name__} doesn't support create_async")
return await provider.create_async(model.name, messages, **kwargs)
+
+class Completion:
+ @staticmethod
+ def create(
+ model : Union[models.Model, str],
+ prompt : str,
+ provider : Union[type[BaseProvider], None] = None,
+ stream : bool = False, **kwargs) -> Union[CreateResult, str]:
+
+ allowed_models = [
+ 'code-davinci-002',
+ 'text-ada-001',
+ 'text-babbage-001',
+ 'text-curie-001',
+ 'text-davinci-002',
+ 'text-davinci-003'
+ ]
+
+ if model not in allowed_models:
+ raise Exception(f'ValueError: Can\'t use {model} with Completion.create()')
+
+ model, provider = get_model_and_provider(model, provider, stream)
+
+ result = provider.create_completion(model.name,
+ [{"role": "user", "content": prompt}], stream, **kwargs)
+
+ return result if stream else ''.join(result) \ No newline at end of file