diff options
author | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2023-11-17 11:16:49 +0100 |
---|---|---|
committer | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2023-11-17 11:16:49 +0100 |
commit | e4caf1d0ca06b83bc0e272eca0e5330feb417f60 (patch) | |
tree | a745c83959f02cb903f3a559473e37b379f6af24 /g4f/Provider/needs_auth/ThebApi.py | |
parent | Update Readme (diff) | |
download | gpt4free-e4caf1d0ca06b83bc0e272eca0e5330feb417f60.tar gpt4free-e4caf1d0ca06b83bc0e272eca0e5330feb417f60.tar.gz gpt4free-e4caf1d0ca06b83bc0e272eca0e5330feb417f60.tar.bz2 gpt4free-e4caf1d0ca06b83bc0e272eca0e5330feb417f60.tar.lz gpt4free-e4caf1d0ca06b83bc0e272eca0e5330feb417f60.tar.xz gpt4free-e4caf1d0ca06b83bc0e272eca0e5330feb417f60.tar.zst gpt4free-e4caf1d0ca06b83bc0e272eca0e5330feb417f60.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/needs_auth/ThebApi.py | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py new file mode 100644 index 00000000..0441f352 --- /dev/null +++ b/g4f/Provider/needs_auth/ThebApi.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import requests + +from ...typing import Any, CreateResult, Messages +from ..base_provider import BaseProvider + +models = { + "theb-ai": "TheB.AI", + "gpt-3.5-turbo": "GPT-3.5", + "gpt-3.5-turbo-16k": "GPT-3.5-16K", + "gpt-4-turbo": "GPT-4 Turbo", + "gpt-4": "GPT-4", + "gpt-4-32k": "GPT-4 32K", + "claude-2": "Claude 2", + "claude-1": "Claude", + "claude-1-100k": "Claude 100K", + "claude-instant-1": "Claude Instant", + "claude-instant-1-100k": "Claude Instant 100K", + "palm-2": "PaLM 2", + "palm-2-codey": "Codey", + "vicuna-13b-v1.5": "Vicuna v1.5 13B", + "llama-2-7b-chat": "Llama 2 7B", + "llama-2-13b-chat": "Llama 2 13B", + "llama-2-70b-chat": "Llama 2 70B", + "code-llama-7b": "Code Llama 7B", + "code-llama-13b": "Code Llama 13B", + "code-llama-34b": "Code Llama 34B", + "qwen-7b-chat": "Qwen 7B" +} + +class ThebApi(BaseProvider): + url = "https://theb.ai" + working = True + needs_auth = True + + @staticmethod + def create_completion( + model: str, + messages: Messages, + stream: bool, + auth: str, + proxy: str = None, + **kwargs + ) -> CreateResult: + if model and model not in models: + raise ValueError(f"Model are not supported: {model}") + headers = { + 'accept': 'application/json', + 'authorization': f'Bearer {auth}', + 'content-type': 'application/json', + } + # response = requests.get("https://api.baizhi.ai/v1/models", headers=headers).json()["data"] + # models = dict([(m["id"], m["name"]) for m in response]) + # print(json.dumps(models, indent=4)) + data: dict[str, Any] = { + "model": model if model else "gpt-3.5-turbo", + "messages": messages, + "stream": False, + "model_params": { + "system_prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."), + "temperature": 1, + "top_p": 1, + **kwargs + } + } + response = requests.post( + "https://api.theb.ai/v1/chat/completions", + headers=headers, + json=data, + proxies={"https": proxy} + ) + try: + response.raise_for_status() + yield response.json()["choices"][0]["message"]["content"] + except: + raise RuntimeError(f"Response: {next(response.iter_lines()).decode()}")
\ No newline at end of file |