summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/close-inactive-issues.yml31
-rw-r--r--.vscode/settings.json5
-rw-r--r--README.md6
-rw-r--r--g4f/Provider/Providers/DeepAi.py78
-rw-r--r--g4f/Provider/Providers/opchatgpts.py42
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/__init__.py6
-rw-r--r--g4f/models.py468
8 files changed, 376 insertions, 261 deletions
diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml
new file mode 100644
index 00000000..d81b727a
--- /dev/null
+++ b/.github/workflows/close-inactive-issues.yml
@@ -0,0 +1,31 @@
+name: Close inactive issues
+
+on:
+ schedule:
+ - cron: "5 0 * * *"
+
+jobs:
+ close-issues:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+ steps:
+ - uses: actions/stale@v5
+ with:
+ days-before-issue-stale: 7
+ days-before-issue-close: 7
+
+ days-before-pr-stale: 7
+ days-before-pr-close: 7
+
+ stale-issue-label: "stale"
+ stale-pr-label: "stale"
+
+ stale-issue-message: "Bumping this issue because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
+ close-issue-message: "Closing due to inactivity."
+
+ stale-pr-message: "Bumping this pull request because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
+ close-pr-message: "Closing due to inactivity."
+
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 9ee86e71..ae2a0b0e 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,6 +1,7 @@
{
"[python]": {
- "editor.defaultFormatter": "ms-python.autopep8"
+ "editor.defaultFormatter": "ms-python.black-formatter",
+ "editor.formatOnSave": true,
},
"python.formatting.provider": "none"
-} \ No newline at end of file
+}
diff --git a/README.md b/README.md
index fb267642..d167f3df 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,5 @@
-![image](https://github.com/onlpx/gpt4free-v2/assets/98614666/7886223b-c1d1-4260-82aa-da5741f303bb)
+
+![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9)
By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses.
@@ -94,7 +95,7 @@ for message in response:
print(message)
# normal response
-response = g4f.ChatCompletion.create(model=g4f.Model.gpt_4, messages=[
+response = g4f.ChatCompletion.create(model=g4f.models.gpt_4, messages=[
{"role": "user", "content": "hi"}]) # alterative model setting
print(response)
@@ -186,6 +187,7 @@ for token in chat_completion:
| [b.ai-huan.xyz](https://b.ai-huan.xyz) | `g4f.Provider.BingHuan` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [wewordle.org](https://wewordle.org/gptapi/v1/android/turbo) | `g4f.Provider.Wewordle` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgpt.ai](https://chatgpt.ai/gpt-4/) | `g4f.Provider.ChatgptAi` | ❌ | ✔️ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [opchatgpts.net](https://opchatgpts.net) | `g4f.Provider.opchatgpts` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
### Other Models
diff --git a/g4f/Provider/Providers/DeepAi.py b/g4f/Provider/Providers/DeepAi.py
index b34dd60d..27618cbb 100644
--- a/g4f/Provider/Providers/DeepAi.py
+++ b/g4f/Provider/Providers/DeepAi.py
@@ -1,48 +1,74 @@
-import os
import json
-import random
-import hashlib
+import os
import requests
-
+import js2py
from ...typing import sha256, Dict, get_type_hints
-url = 'https://deepai.org'
+
+url = "https://api.deepai.org/"
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
working = True
+token_js = """
+var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
+var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
+h = Math.round(1E11 * Math.random()) + "";
+f = function () {
+ for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
+
+ return function (t) {
+ var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
+ Z = [],
+ A = unescape(encodeURI(t)) + "\u0080",
+ z = A.length;
+ t = --z / 4 + 2 | 15;
+ for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
+ for (q = A = 0; q < t; q += 16) {
+ for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
+ for (A = 4; A;) ea[--A] += z[A]
+ }
+ for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
+ return t.split("").reverse().join("")
+ }
+}();
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- def md5(text: str) -> str:
- return hashlib.md5(text.encode()).hexdigest()[::-1]
+"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
+"""
+uuid4_js = """
+function uuidv4() {
+ for (var a = [], b = 0; 36 > b; b++) a[b] = "0123456789abcdef".substr(Math.floor(16 * Math.random()), 1);
+ a[14] = "4";
+ a[19] = "0123456789abcdef".substr(a[19] & 3 | 8, 1);
+ a[8] = a[13] = a[18] = a[23] = "-";
+ return a.join("")
+}
+uuidv4();"""
- def get_api_key(user_agent: str) -> str:
- part1 = str(random.randint(0, 10**11))
- part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
-
- return f"tryit-{part1}-{part2}"
+def create_session():
+ url = "https://api.deepai.org/save_chat_session"
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+ payload = {'uuid': js2py.eval_js(uuid4_js), "title":"", "chat_style": "chat", "messages": '[]'}
+ headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"}
- headers = {
- "api-key": get_api_key(user_agent),
- "user-agent": user_agent
- }
+ response = requests.request("POST", url, headers=headers, data=payload)
+ return response
- files = {
- "chat_style": (None, "chat"),
- "chatHistory": (None, json.dumps(messages))
- }
+def _create_completion(model: str, messages:list, stream: bool = True, **kwargs):
+ create_session()
+ url = "https://api.deepai.org/make_me_a_pizza"
- r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
+ payload = {'chas_style': "chat", "chatHistory": json.dumps(messages)}
+ api_key = js2py.eval_js(token_js)
+ headers = {"api-key": api_key, "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"}
- for chunk in r.iter_content(chunk_size=None):
- r.raise_for_status()
+ response = requests.request("POST", url, headers=headers, data=payload, stream=True)
+ for chunk in response.iter_content(chunk_size=None):
+ response.raise_for_status()
yield chunk.decode()
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join(
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/opchatgpts.py b/g4f/Provider/Providers/opchatgpts.py
new file mode 100644
index 00000000..0ff652fb
--- /dev/null
+++ b/g4f/Provider/Providers/opchatgpts.py
@@ -0,0 +1,42 @@
+import os
+import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://opchatgpts.net'
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = True
+
+def _create_completion(model: str, messages: list, stream: bool = False, temperature: float = 0.8, max_tokens: int = 1024, system_prompt: str = "Converse as if you were an AI assistant. Be friendly, creative.", **kwargs):
+
+ data = {
+ 'env': 'chatbot',
+ 'session': 'N/A',
+ 'prompt': "\n",
+ 'context': system_prompt,
+ 'messages': messages,
+ 'newMessage': messages[::-1][0]["content"],
+ 'userName': '<div class="mwai-name-text">User:</div>',
+ 'aiName': '<div class="mwai-name-text">AI:</div>',
+ 'model': 'gpt-3.5-turbo',
+ 'temperature': temperature,
+ 'maxTokens': max_tokens,
+ 'maxResults': 1,
+ 'apiKey': '',
+ 'service': 'openai',
+ 'embeddingsIndex': '',
+ 'stop': ''
+ }
+
+ response = requests.post('https://opchatgpts.net/wp-json/ai-chatbot/v1/chat', json=data).json()
+
+ if response["success"]:
+
+ return response["reply"] # `yield (response["reply"])` doesn't work
+
+ raise Exception("Request failed: " + response)
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index b64e44f5..ee434400 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -23,6 +23,7 @@ from .Providers import (
BingHuan,
Wewordle,
ChatgptAi,
+ opchatgpts,
)
Palm = Bard
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 09b24b55..e5d3d4bf 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -1,12 +1,12 @@
import sys
from . import Provider
-from g4f.models import Model, ModelUtils
+from g4f import models
logging = False
class ChatCompletion:
@staticmethod
- def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
+ def create(model: models.Model | str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
kwargs['auth'] = auth
if provider and provider.working == False:
return f'{provider.__name__} is not working'
@@ -19,7 +19,7 @@ class ChatCompletion:
try:
if isinstance(model, str):
try:
- model = ModelUtils.convert[model]
+ model = models.ModelUtils.convert[model]
except KeyError:
raise Exception(f'The model: {model} does not exist')
diff --git a/g4f/models.py b/g4f/models.py
index 95be4849..3a049614 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -1,232 +1,244 @@
-from g4f import Provider
+from types import ModuleType
+from . import Provider
+from dataclasses import dataclass
+@dataclass
class Model:
- class model:
- name: str
- base_provider: str
- best_provider: str
-
- class gpt_35_turbo:
- name: str = 'gpt-3.5-turbo'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Forefront
-
- class gpt_4:
- name: str = 'gpt-4'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Bing
- best_providers: list = [Provider.Bing, Provider.Lockchat]
-
- class claude_instant_v1_100k:
- name: str = 'claude-instant-v1-100k'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_instant_v1:
- name: str = 'claude-instant-v1'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_v1_100k:
- name: str = 'claude-v1-100k'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_v1:
- name: str = 'claude-v1'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class alpaca_7b:
- name: str = 'alpaca-7b'
- base_provider: str = 'replicate'
- best_provider: Provider.Provider = Provider.Vercel
-
- class stablelm_tuned_alpha_7b:
- name: str = 'stablelm-tuned-alpha-7b'
- base_provider: str = 'replicate'
- best_provider: Provider.Provider = Provider.Vercel
-
- class bloom:
- name: str = 'bloom'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class bloomz:
- name: str = 'bloomz'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class flan_t5_xxl:
- name: str = 'flan-t5-xxl'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class flan_ul2:
- name: str = 'flan-ul2'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class gpt_neox_20b:
- name: str = 'gpt-neox-20b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class oasst_sft_4_pythia_12b_epoch_35:
- name: str = 'oasst-sft-4-pythia-12b-epoch-3.5'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class santacoder:
- name: str = 'santacoder'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class command_medium_nightly:
- name: str = 'command-medium-nightly'
- base_provider: str = 'cohere'
- best_provider: Provider.Provider = Provider.Vercel
-
- class command_xlarge_nightly:
- name: str = 'command-xlarge-nightly'
- base_provider: str = 'cohere'
- best_provider: Provider.Provider = Provider.Vercel
-
- class code_cushman_001:
- name: str = 'code-cushman-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class code_davinci_002:
- name: str = 'code-davinci-002'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_ada_001:
- name: str = 'text-ada-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_babbage_001:
- name: str = 'text-babbage-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_curie_001:
- name: str = 'text-curie-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_davinci_002:
- name: str = 'text-davinci-002'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_davinci_003:
- name: str = 'text-davinci-003'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class palm:
- name: str = 'palm'
- base_provider: str = 'google'
- best_provider: Provider.Provider = Provider.Bard
-
-
- """ 'falcon-40b': Model.falcon_40b,
- 'falcon-7b': Model.falcon_7b,
- 'llama-13b': Model.llama_13b,"""
-
- class falcon_40b:
- name: str = 'falcon-40b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
- class falcon_7b:
- name: str = 'falcon-7b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
- class llama_13b:
- name: str = 'llama-13b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
- class gpt_35_turbo_16k:
- name: str = 'gpt-3.5-turbo-16k'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.EasyChat
-
- class gpt_35_turbo_0613:
- name: str = 'gpt-3.5-turbo-0613'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.EasyChat
-
- class gpt_35_turbo_16k_0613:
- name: str = 'gpt-3.5-turbo-16k-0613'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.EasyChat
-
- class gpt_4_32k:
- name: str = 'gpt-4-32k'
- base_provider: str = 'openai'
- best_provider = None
-
- class gpt_4_0613:
- name: str = 'gpt-4-0613'
- base_provider: str = 'openai'
- best_provider = None
-
+ name: str
+ base_provider: str
+ best_provider: ModuleType | None
+
+
+gpt_35_turbo = Model(
+ name="gpt-3.5-turbo",
+ base_provider="openai",
+ best_provider=Provider.Forefront,
+)
+
+gpt_4 = Model(
+ name="gpt-4",
+ base_provider="openai",
+ best_provider=Provider.Bing,
+)
+
+claude_instant_v1_100k = Model(
+ name="claude-instant-v1-100k",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_instant_v1 = Model(
+ name="claude-instant-v1",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_v1_100k = Model(
+ name="claude-v1-100k",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_v1 = Model(
+ name="claude-v1",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+alpaca_7b = Model(
+ name="alpaca-7b",
+ base_provider="replicate",
+ best_provider=Provider.Vercel,
+)
+
+stablelm_tuned_alpha_7b = Model(
+ name="stablelm-tuned-alpha-7b",
+ base_provider="replicate",
+ best_provider=Provider.Vercel,
+)
+
+bloom = Model(
+ name="bloom",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+bloomz = Model(
+ name="bloomz",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+flan_t5_xxl = Model(
+ name="flan-t5-xxl",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+flan_ul2 = Model(
+ name="flan-ul2",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+gpt_neox_20b = Model(
+ name="gpt-neox-20b",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+oasst_sft_4_pythia_12b_epoch_35 = Model(
+ name="oasst-sft-4-pythia-12b-epoch-3.5",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+santacoder = Model(
+ name="santacoder",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+command_medium_nightly = Model(
+ name="command-medium-nightly",
+ base_provider="cohere",
+ best_provider=Provider.Vercel,
+)
+
+command_xlarge_nightly = Model(
+ name="command-xlarge-nightly",
+ base_provider="cohere",
+ best_provider=Provider.Vercel,
+)
+
+code_cushman_001 = Model(
+ name="code-cushman-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+code_davinci_002 = Model(
+ name="code-davinci-002",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_ada_001 = Model(
+ name="text-ada-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_babbage_001 = Model(
+ name="text-babbage-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_curie_001 = Model(
+ name="text-curie-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_davinci_002 = Model(
+ name="text-davinci-002",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_davinci_003 = Model(
+ name="text-davinci-003",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+palm = Model(
+ name="palm",
+ base_provider="google",
+ best_provider=Provider.Bard,
+)
+
+falcon_40b = Model(
+ name="falcon-40b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+falcon_7b = Model(
+ name="falcon-7b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+llama_13b = Model(
+ name="llama-13b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+gpt_35_turbo_16k = Model(
+ name="gpt-3.5-turbo-16k",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_35_turbo_0613 = Model(
+ name="gpt-3.5-turbo-0613",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_35_turbo_16k_0613 = Model(
+ name="gpt-3.5-turbo-16k-0613",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_4_32k = Model(name="gpt-4-32k", base_provider="openai", best_provider=None)
+
+gpt_4_0613 = Model(name="gpt-4-0613", base_provider="openai", best_provider=None)
+
+
class ModelUtils:
- convert: dict = {
- 'gpt-3.5-turbo': Model.gpt_35_turbo,
- 'gpt-3.6-turbo-16k': Model.gpt_35_turbo_16k,
- 'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
- 'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
-
- 'gpt-4': Model.gpt_4,
- 'gpt-4-32k': Model.gpt_4_32k,
- 'gpt-4-0613': Model.gpt_4_0613,
-
- 'claude-instant-v1-100k': Model.claude_instant_v1_100k,
- 'claude-v1-100k': Model.claude_v1_100k,
- 'claude-instant-v1': Model.claude_instant_v1,
- 'claude-v1': Model.claude_v1,
-
- 'alpaca-7b': Model.alpaca_7b,
- 'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b,
-
- 'bloom': Model.bloom,
- 'bloomz': Model.bloomz,
-
- 'flan-t5-xxl': Model.flan_t5_xxl,
- 'flan-ul2': Model.flan_ul2,
-
- 'gpt-neox-20b': Model.gpt_neox_20b,
- 'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35,
- 'santacoder': Model.santacoder,
-
- 'command-medium-nightly': Model.command_medium_nightly,
- 'command-xlarge-nightly': Model.command_xlarge_nightly,
-
- 'code-cushman-001': Model.code_cushman_001,
- 'code-davinci-002': Model.code_davinci_002,
-
- 'text-ada-001': Model.text_ada_001,
- 'text-babbage-001': Model.text_babbage_001,
- 'text-curie-001': Model.text_curie_001,
- 'text-davinci-002': Model.text_davinci_002,
- 'text-davinci-003': Model.text_davinci_003,
-
- 'palm2': Model.palm,
- 'palm': Model.palm,
- 'google': Model.palm,
- 'google-bard': Model.palm,
- 'google-palm': Model.palm,
- 'bard': Model.palm,
-
- 'falcon-40b': Model.falcon_40b,
- 'falcon-7b': Model.falcon_7b,
- 'llama-13b': Model.llama_13b,
- } \ No newline at end of file
+ convert: dict[str, Model] = {
+ "gpt-3.5-turbo": gpt_35_turbo,
+ "gpt-3.5-turbo-16k": gpt_35_turbo_16k,
+ "gpt-3.5-turbo-0613": gpt_35_turbo_0613,
+ "gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613,
+ "gpt-4": gpt_4,
+ "gpt-4-32k": gpt_4_32k,
+ "gpt-4-0613": gpt_4_0613,
+ "claude-instant-v1-100k": claude_instant_v1_100k,
+ "claude-v1-100k": claude_v1_100k,
+ "claude-instant-v1": claude_instant_v1,
+ "claude-v1": claude_v1,
+ "alpaca-7b": alpaca_7b,
+ "stablelm-tuned-alpha-7b": stablelm_tuned_alpha_7b,
+ "bloom": bloom,
+ "bloomz": bloomz,
+ "flan-t5-xxl": flan_t5_xxl,
+ "flan-ul2": flan_ul2,
+ "gpt-neox-20b": gpt_neox_20b,
+ "oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35,
+ "santacoder": santacoder,
+ "command-medium-nightly": command_medium_nightly,
+ "command-xlarge-nightly": command_xlarge_nightly,
+ "code-cushman-001": code_cushman_001,
+ "code-davinci-002": code_davinci_002,
+ "text-ada-001": text_ada_001,
+ "text-babbage-001": text_babbage_001,
+ "text-curie-001": text_curie_001,
+ "text-davinci-002": text_davinci_002,
+ "text-davinci-003": text_davinci_003,
+ "palm2": palm,
+ "palm": palm,
+ "google": palm,
+ "google-bard": palm,
+ "google-palm": palm,
+ "bard": palm,
+ "falcon-40b": falcon_40b,
+ "falcon-7b": falcon_7b,
+ "llama-13b": llama_13b,
+ }