summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMIDORIBIN <aquarion123@gmail.com>2023-07-25 02:56:49 +0200
committerMIDORIBIN <aquarion123@gmail.com>2023-07-25 02:56:49 +0200
commit6dd8a5a1f44c2e480f1a7f8751eff321cae0876b (patch)
tree665b9be6f7bfbe89c06a04befe4a3566d602c3aa
parentMerge pull request #769 from fungamer2-2/main (diff)
downloadgpt4free-6dd8a5a1f44c2e480f1a7f8751eff321cae0876b.tar
gpt4free-6dd8a5a1f44c2e480f1a7f8751eff321cae0876b.tar.gz
gpt4free-6dd8a5a1f44c2e480f1a7f8751eff321cae0876b.tar.bz2
gpt4free-6dd8a5a1f44c2e480f1a7f8751eff321cae0876b.tar.lz
gpt4free-6dd8a5a1f44c2e480f1a7f8751eff321cae0876b.tar.xz
gpt4free-6dd8a5a1f44c2e480f1a7f8751eff321cae0876b.tar.zst
gpt4free-6dd8a5a1f44c2e480f1a7f8751eff321cae0876b.zip
-rw-r--r--.vscode/settings.json5
-rw-r--r--README.md2
-rw-r--r--g4f/__init__.py6
-rw-r--r--g4f/models.py468
4 files changed, 247 insertions, 234 deletions
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 9ee86e71..ae2a0b0e 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,6 +1,7 @@
{
"[python]": {
- "editor.defaultFormatter": "ms-python.autopep8"
+ "editor.defaultFormatter": "ms-python.black-formatter",
+ "editor.formatOnSave": true,
},
"python.formatting.provider": "none"
-} \ No newline at end of file
+}
diff --git a/README.md b/README.md
index fb267642..8d71beb6 100644
--- a/README.md
+++ b/README.md
@@ -94,7 +94,7 @@ for message in response:
print(message)
# normal response
-response = g4f.ChatCompletion.create(model=g4f.Model.gpt_4, messages=[
+response = g4f.ChatCompletion.create(model=g4f.models.gpt_4, messages=[
{"role": "user", "content": "hi"}]) # alterative model setting
print(response)
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 09b24b55..e5d3d4bf 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -1,12 +1,12 @@
import sys
from . import Provider
-from g4f.models import Model, ModelUtils
+from g4f import models
logging = False
class ChatCompletion:
@staticmethod
- def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
+ def create(model: models.Model | str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
kwargs['auth'] = auth
if provider and provider.working == False:
return f'{provider.__name__} is not working'
@@ -19,7 +19,7 @@ class ChatCompletion:
try:
if isinstance(model, str):
try:
- model = ModelUtils.convert[model]
+ model = models.ModelUtils.convert[model]
except KeyError:
raise Exception(f'The model: {model} does not exist')
diff --git a/g4f/models.py b/g4f/models.py
index 95be4849..3a049614 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -1,232 +1,244 @@
-from g4f import Provider
+from types import ModuleType
+from . import Provider
+from dataclasses import dataclass
+@dataclass
class Model:
- class model:
- name: str
- base_provider: str
- best_provider: str
-
- class gpt_35_turbo:
- name: str = 'gpt-3.5-turbo'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Forefront
-
- class gpt_4:
- name: str = 'gpt-4'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Bing
- best_providers: list = [Provider.Bing, Provider.Lockchat]
-
- class claude_instant_v1_100k:
- name: str = 'claude-instant-v1-100k'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_instant_v1:
- name: str = 'claude-instant-v1'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_v1_100k:
- name: str = 'claude-v1-100k'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_v1:
- name: str = 'claude-v1'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class alpaca_7b:
- name: str = 'alpaca-7b'
- base_provider: str = 'replicate'
- best_provider: Provider.Provider = Provider.Vercel
-
- class stablelm_tuned_alpha_7b:
- name: str = 'stablelm-tuned-alpha-7b'
- base_provider: str = 'replicate'
- best_provider: Provider.Provider = Provider.Vercel
-
- class bloom:
- name: str = 'bloom'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class bloomz:
- name: str = 'bloomz'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class flan_t5_xxl:
- name: str = 'flan-t5-xxl'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class flan_ul2:
- name: str = 'flan-ul2'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class gpt_neox_20b:
- name: str = 'gpt-neox-20b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class oasst_sft_4_pythia_12b_epoch_35:
- name: str = 'oasst-sft-4-pythia-12b-epoch-3.5'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class santacoder:
- name: str = 'santacoder'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class command_medium_nightly:
- name: str = 'command-medium-nightly'
- base_provider: str = 'cohere'
- best_provider: Provider.Provider = Provider.Vercel
-
- class command_xlarge_nightly:
- name: str = 'command-xlarge-nightly'
- base_provider: str = 'cohere'
- best_provider: Provider.Provider = Provider.Vercel
-
- class code_cushman_001:
- name: str = 'code-cushman-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class code_davinci_002:
- name: str = 'code-davinci-002'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_ada_001:
- name: str = 'text-ada-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_babbage_001:
- name: str = 'text-babbage-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_curie_001:
- name: str = 'text-curie-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_davinci_002:
- name: str = 'text-davinci-002'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_davinci_003:
- name: str = 'text-davinci-003'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class palm:
- name: str = 'palm'
- base_provider: str = 'google'
- best_provider: Provider.Provider = Provider.Bard
-
-
- """ 'falcon-40b': Model.falcon_40b,
- 'falcon-7b': Model.falcon_7b,
- 'llama-13b': Model.llama_13b,"""
-
- class falcon_40b:
- name: str = 'falcon-40b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
- class falcon_7b:
- name: str = 'falcon-7b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
- class llama_13b:
- name: str = 'llama-13b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
- class gpt_35_turbo_16k:
- name: str = 'gpt-3.5-turbo-16k'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.EasyChat
-
- class gpt_35_turbo_0613:
- name: str = 'gpt-3.5-turbo-0613'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.EasyChat
-
- class gpt_35_turbo_16k_0613:
- name: str = 'gpt-3.5-turbo-16k-0613'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.EasyChat
-
- class gpt_4_32k:
- name: str = 'gpt-4-32k'
- base_provider: str = 'openai'
- best_provider = None
-
- class gpt_4_0613:
- name: str = 'gpt-4-0613'
- base_provider: str = 'openai'
- best_provider = None
-
+ name: str
+ base_provider: str
+ best_provider: ModuleType | None
+
+
+gpt_35_turbo = Model(
+ name="gpt-3.5-turbo",
+ base_provider="openai",
+ best_provider=Provider.Forefront,
+)
+
+gpt_4 = Model(
+ name="gpt-4",
+ base_provider="openai",
+ best_provider=Provider.Bing,
+)
+
+claude_instant_v1_100k = Model(
+ name="claude-instant-v1-100k",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_instant_v1 = Model(
+ name="claude-instant-v1",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_v1_100k = Model(
+ name="claude-v1-100k",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_v1 = Model(
+ name="claude-v1",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+alpaca_7b = Model(
+ name="alpaca-7b",
+ base_provider="replicate",
+ best_provider=Provider.Vercel,
+)
+
+stablelm_tuned_alpha_7b = Model(
+ name="stablelm-tuned-alpha-7b",
+ base_provider="replicate",
+ best_provider=Provider.Vercel,
+)
+
+bloom = Model(
+ name="bloom",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+bloomz = Model(
+ name="bloomz",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+flan_t5_xxl = Model(
+ name="flan-t5-xxl",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+flan_ul2 = Model(
+ name="flan-ul2",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+gpt_neox_20b = Model(
+ name="gpt-neox-20b",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+oasst_sft_4_pythia_12b_epoch_35 = Model(
+ name="oasst-sft-4-pythia-12b-epoch-3.5",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+santacoder = Model(
+ name="santacoder",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+command_medium_nightly = Model(
+ name="command-medium-nightly",
+ base_provider="cohere",
+ best_provider=Provider.Vercel,
+)
+
+command_xlarge_nightly = Model(
+ name="command-xlarge-nightly",
+ base_provider="cohere",
+ best_provider=Provider.Vercel,
+)
+
+code_cushman_001 = Model(
+ name="code-cushman-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+code_davinci_002 = Model(
+ name="code-davinci-002",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_ada_001 = Model(
+ name="text-ada-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_babbage_001 = Model(
+ name="text-babbage-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_curie_001 = Model(
+ name="text-curie-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_davinci_002 = Model(
+ name="text-davinci-002",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_davinci_003 = Model(
+ name="text-davinci-003",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+palm = Model(
+ name="palm",
+ base_provider="google",
+ best_provider=Provider.Bard,
+)
+
+falcon_40b = Model(
+ name="falcon-40b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+falcon_7b = Model(
+ name="falcon-7b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+llama_13b = Model(
+ name="llama-13b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+gpt_35_turbo_16k = Model(
+ name="gpt-3.5-turbo-16k",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_35_turbo_0613 = Model(
+ name="gpt-3.5-turbo-0613",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_35_turbo_16k_0613 = Model(
+ name="gpt-3.5-turbo-16k-0613",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_4_32k = Model(name="gpt-4-32k", base_provider="openai", best_provider=None)
+
+gpt_4_0613 = Model(name="gpt-4-0613", base_provider="openai", best_provider=None)
+
+
class ModelUtils:
- convert: dict = {
- 'gpt-3.5-turbo': Model.gpt_35_turbo,
- 'gpt-3.6-turbo-16k': Model.gpt_35_turbo_16k,
- 'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
- 'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
-
- 'gpt-4': Model.gpt_4,
- 'gpt-4-32k': Model.gpt_4_32k,
- 'gpt-4-0613': Model.gpt_4_0613,
-
- 'claude-instant-v1-100k': Model.claude_instant_v1_100k,
- 'claude-v1-100k': Model.claude_v1_100k,
- 'claude-instant-v1': Model.claude_instant_v1,
- 'claude-v1': Model.claude_v1,
-
- 'alpaca-7b': Model.alpaca_7b,
- 'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b,
-
- 'bloom': Model.bloom,
- 'bloomz': Model.bloomz,
-
- 'flan-t5-xxl': Model.flan_t5_xxl,
- 'flan-ul2': Model.flan_ul2,
-
- 'gpt-neox-20b': Model.gpt_neox_20b,
- 'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35,
- 'santacoder': Model.santacoder,
-
- 'command-medium-nightly': Model.command_medium_nightly,
- 'command-xlarge-nightly': Model.command_xlarge_nightly,
-
- 'code-cushman-001': Model.code_cushman_001,
- 'code-davinci-002': Model.code_davinci_002,
-
- 'text-ada-001': Model.text_ada_001,
- 'text-babbage-001': Model.text_babbage_001,
- 'text-curie-001': Model.text_curie_001,
- 'text-davinci-002': Model.text_davinci_002,
- 'text-davinci-003': Model.text_davinci_003,
-
- 'palm2': Model.palm,
- 'palm': Model.palm,
- 'google': Model.palm,
- 'google-bard': Model.palm,
- 'google-palm': Model.palm,
- 'bard': Model.palm,
-
- 'falcon-40b': Model.falcon_40b,
- 'falcon-7b': Model.falcon_7b,
- 'llama-13b': Model.llama_13b,
- } \ No newline at end of file
+ convert: dict[str, Model] = {
+ "gpt-3.5-turbo": gpt_35_turbo,
+ "gpt-3.5-turbo-16k": gpt_35_turbo_16k,
+ "gpt-3.5-turbo-0613": gpt_35_turbo_0613,
+ "gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613,
+ "gpt-4": gpt_4,
+ "gpt-4-32k": gpt_4_32k,
+ "gpt-4-0613": gpt_4_0613,
+ "claude-instant-v1-100k": claude_instant_v1_100k,
+ "claude-v1-100k": claude_v1_100k,
+ "claude-instant-v1": claude_instant_v1,
+ "claude-v1": claude_v1,
+ "alpaca-7b": alpaca_7b,
+ "stablelm-tuned-alpha-7b": stablelm_tuned_alpha_7b,
+ "bloom": bloom,
+ "bloomz": bloomz,
+ "flan-t5-xxl": flan_t5_xxl,
+ "flan-ul2": flan_ul2,
+ "gpt-neox-20b": gpt_neox_20b,
+ "oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35,
+ "santacoder": santacoder,
+ "command-medium-nightly": command_medium_nightly,
+ "command-xlarge-nightly": command_xlarge_nightly,
+ "code-cushman-001": code_cushman_001,
+ "code-davinci-002": code_davinci_002,
+ "text-ada-001": text_ada_001,
+ "text-babbage-001": text_babbage_001,
+ "text-curie-001": text_curie_001,
+ "text-davinci-002": text_davinci_002,
+ "text-davinci-003": text_davinci_003,
+ "palm2": palm,
+ "palm": palm,
+ "google": palm,
+ "google-bard": palm,
+ "google-palm": palm,
+ "bard": palm,
+ "falcon-40b": falcon_40b,
+ "falcon-7b": falcon_7b,
+ "llama-13b": llama_13b,
+ }