summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-09-17 23:22:07 +0200
committerGitHub <noreply@github.com>2023-09-17 23:22:07 +0200
commit489748892b5a22e23f1b77e91fa33500ab119353 (patch)
tree81b3014035813641350c2a5c01ed35151d8f5fc9
parent~ (diff)
parent. (diff)
downloadgpt4free-489748892b5a22e23f1b77e91fa33500ab119353.tar
gpt4free-489748892b5a22e23f1b77e91fa33500ab119353.tar.gz
gpt4free-489748892b5a22e23f1b77e91fa33500ab119353.tar.bz2
gpt4free-489748892b5a22e23f1b77e91fa33500ab119353.tar.lz
gpt4free-489748892b5a22e23f1b77e91fa33500ab119353.tar.xz
gpt4free-489748892b5a22e23f1b77e91fa33500ab119353.tar.zst
gpt4free-489748892b5a22e23f1b77e91fa33500ab119353.zip
-rw-r--r--README.md10
-rw-r--r--g4f/Provider/AItianhu.py2
-rw-r--r--g4f/Provider/Ails.py2
-rw-r--r--g4f/Provider/Aivvm.py2
-rw-r--r--g4f/Provider/Bard.py2
-rw-r--r--g4f/Provider/ChatBase.py2
-rw-r--r--g4f/Provider/ChatgptLogin.py2
-rw-r--r--g4f/Provider/CodeLinkAva.py2
-rw-r--r--g4f/Provider/DfeHub.py2
-rw-r--r--g4f/Provider/EasyChat.py2
-rw-r--r--g4f/Provider/Equing.py2
-rw-r--r--g4f/Provider/FastGpt.py2
-rw-r--r--g4f/Provider/GetGpt.py2
-rw-r--r--g4f/Provider/H2o.py2
-rw-r--r--g4f/Provider/HuggingChat.py2
-rw-r--r--g4f/Provider/Liaobots.py2
-rw-r--r--g4f/Provider/Lockchat.py2
-rw-r--r--g4f/Provider/OpenAssistant.py2
-rw-r--r--g4f/Provider/OpenaiChat.py2
-rw-r--r--g4f/Provider/Raycast.py2
-rw-r--r--g4f/Provider/Theb.py2
-rw-r--r--g4f/Provider/V50.py2
-rw-r--r--g4f/Provider/Vitalentum.py2
-rw-r--r--g4f/Provider/Wuguokai.py2
-rw-r--r--g4f/Provider/Ylokh.py2
-rw-r--r--g4f/Provider/base_provider.py2
-rw-r--r--g4f/__init__.py4
-rw-r--r--g4f/models.py212
-rw-r--r--g4f/typing.py5
-rw-r--r--tool/provider_init.py4
-rw-r--r--tool/readme_table.py4
31 files changed, 39 insertions, 250 deletions
diff --git a/README.md b/README.md
index 98757e56..968743f3 100644
--- a/README.md
+++ b/README.md
@@ -145,7 +145,7 @@ import g4f
print(g4f.Provider.Ails.params) # supported args
-# Automatic selection of provider
+# Automatic selection of Provider
# streamed completion
response = g4f.ChatCompletion.create(
@@ -166,7 +166,7 @@ response = g4f.ChatCompletion.create(
print(response)
-# Set with provider
+# Set with Provider
response = g4f.ChatCompletion.create(
model="gpt-3.5-turbo",
provider=g4f.Provider.DeepAi,
@@ -474,8 +474,8 @@ if __name__ == "__main__":
to add another provider, its very simple:
-1. create a new file in [g4f/provider](./g4f/provider) with the name of the Provider
-2. Implement a class that extends [BaseProvider](./g4f/provider/base_provider.py).
+1. create a new file in [g4f/Provider](./g4f/Provider) with the name of the Provider
+2. Implement a class that extends [BaseProvider](./g4f/Provider/base_provider.py).
```py
from .base_provider import BaseProvider
@@ -499,7 +499,7 @@ class HogeService(BaseProvider):
3. Here, you can adjust the settings, for example if the website does support streaming, set `working` to `True`...
4. Write code to request the provider in `create_completion` and `yield` the response, _even if_ its a one-time response, do not hesitate to look at other providers for inspiration
-5. Add the Provider Name in [g4f/provider/**init**.py](./g4f/provider/__init__.py)
+5. Add the Provider Name in [g4f/provider/\_\_init__.py](./g4f/Provider/__init__.py)
```py
from .base_provider import BaseProvider
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py
index 2e129896..6aec2065 100644
--- a/g4f/Provider/AItianhu.py
+++ b/g4f/Provider/AItianhu.py
@@ -70,4 +70,4 @@ class AItianhu(AsyncGeneratorProvider):
("top_p", "int"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Ails.py b/g4f/Provider/Ails.py
index d533ae24..9ead63d9 100644
--- a/g4f/Provider/Ails.py
+++ b/g4f/Provider/Ails.py
@@ -85,7 +85,7 @@ class Ails(AsyncGeneratorProvider):
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
def _hash(json_data: dict[str, str]) -> SHA256:
diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py
index dbfc588d..c65fb6f1 100644
--- a/g4f/Provider/Aivvm.py
+++ b/g4f/Provider/Aivvm.py
@@ -75,4 +75,4 @@ class Aivvm(AsyncGeneratorProvider):
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Bard.py b/g4f/Provider/Bard.py
index 2137d820..9583dbef 100644
--- a/g4f/Provider/Bard.py
+++ b/g4f/Provider/Bard.py
@@ -88,4 +88,4 @@ class Bard(AsyncProvider):
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py
index b98fe565..6e596e2a 100644
--- a/g4f/Provider/ChatBase.py
+++ b/g4f/Provider/ChatBase.py
@@ -59,4 +59,4 @@ class ChatBase(AsyncGeneratorProvider):
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py
index 8b868f8e..165cc5ca 100644
--- a/g4f/Provider/ChatgptLogin.py
+++ b/g4f/Provider/ChatgptLogin.py
@@ -64,4 +64,4 @@ class ChatgptLogin(AsyncProvider):
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/CodeLinkAva.py b/g4f/Provider/CodeLinkAva.py
index 3ab4e264..31a0fabb 100644
--- a/g4f/Provider/CodeLinkAva.py
+++ b/g4f/Provider/CodeLinkAva.py
@@ -60,4 +60,4 @@ class CodeLinkAva(AsyncGeneratorProvider):
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/DfeHub.py b/g4f/Provider/DfeHub.py
index d40e0380..c1f4c059 100644
--- a/g4f/Provider/DfeHub.py
+++ b/g4f/Provider/DfeHub.py
@@ -74,4 +74,4 @@ class DfeHub(BaseProvider):
("top_p", "int"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py
index dae5196d..3b3b6a30 100644
--- a/g4f/Provider/EasyChat.py
+++ b/g4f/Provider/EasyChat.py
@@ -108,4 +108,4 @@ class EasyChat(BaseProvider):
("active_server", "int"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Equing.py b/g4f/Provider/Equing.py
index 261c53c0..7dfdce9a 100644
--- a/g4f/Provider/Equing.py
+++ b/g4f/Provider/Equing.py
@@ -78,4 +78,4 @@ class Equing(BaseProvider):
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/FastGpt.py b/g4f/Provider/FastGpt.py
index ef47f752..e8893965 100644
--- a/g4f/Provider/FastGpt.py
+++ b/g4f/Provider/FastGpt.py
@@ -83,4 +83,4 @@ class FastGpt(ABC):
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/GetGpt.py b/g4f/Provider/GetGpt.py
index b96efaac..6687eb93 100644
--- a/g4f/Provider/GetGpt.py
+++ b/g4f/Provider/GetGpt.py
@@ -66,7 +66,7 @@ class GetGpt(BaseProvider):
('max_tokens', 'int'),
]
param = ', '.join([': '.join(p) for p in params])
- return f'g4f.provider.{cls.__name__} supports: ({param})'
+ return f'g4f.Provider.{cls.__name__} supports: ({param})'
def _encrypt(e: str):
diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py
index 30090a58..fa837156 100644
--- a/g4f/Provider/H2o.py
+++ b/g4f/Provider/H2o.py
@@ -98,4 +98,4 @@ class H2o(AsyncGeneratorProvider):
("return_full_text", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 85f879f3..ce21bfda 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -103,4 +103,4 @@ class HuggingChat(AsyncGeneratorProvider):
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 33224d2e..e9078651 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -88,4 +88,4 @@ class Liaobots(AsyncGeneratorProvider):
("auth", "str"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Lockchat.py b/g4f/Provider/Lockchat.py
index c15eec8d..c9db82f1 100644
--- a/g4f/Provider/Lockchat.py
+++ b/g4f/Provider/Lockchat.py
@@ -61,4 +61,4 @@ class Lockchat(BaseProvider):
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/OpenAssistant.py b/g4f/Provider/OpenAssistant.py
index 3a931597..bef50ffb 100644
--- a/g4f/Provider/OpenAssistant.py
+++ b/g4f/Provider/OpenAssistant.py
@@ -99,4 +99,4 @@ class OpenAssistant(AsyncGeneratorProvider):
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/OpenaiChat.py b/g4f/Provider/OpenaiChat.py
index cbe886f0..999d6247 100644
--- a/g4f/Provider/OpenaiChat.py
+++ b/g4f/Provider/OpenaiChat.py
@@ -91,4 +91,4 @@ class OpenaiChat(AsyncProvider):
("cookies", "dict[str, str]")
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/Raycast.py b/g4f/Provider/Raycast.py
index 7ddc8acd..9d5f3ac9 100644
--- a/g4f/Provider/Raycast.py
+++ b/g4f/Provider/Raycast.py
@@ -69,4 +69,4 @@ class Raycast(BaseProvider):
("auth", "str"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Theb.py b/g4f/Provider/Theb.py
index 72fce3ac..500837f7 100644
--- a/g4f/Provider/Theb.py
+++ b/g4f/Provider/Theb.py
@@ -94,4 +94,4 @@ class Theb(BaseProvider):
("top_p", "int")
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/V50.py b/g4f/Provider/V50.py
index 81a95ba8..7b873979 100644
--- a/g4f/Provider/V50.py
+++ b/g4f/Provider/V50.py
@@ -64,4 +64,4 @@ class V50(BaseProvider):
("top_p", "int"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Vitalentum.py b/g4f/Provider/Vitalentum.py
index 31ad8b80..61e84409 100644
--- a/g4f/Provider/Vitalentum.py
+++ b/g4f/Provider/Vitalentum.py
@@ -63,4 +63,4 @@ class Vitalentum(AsyncGeneratorProvider):
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/Wuguokai.py b/g4f/Provider/Wuguokai.py
index 0a46f6ee..25cacd3d 100644
--- a/g4f/Provider/Wuguokai.py
+++ b/g4f/Provider/Wuguokai.py
@@ -60,4 +60,4 @@ class Wuguokai(BaseProvider):
("stream", "bool")
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py
index 1986b6d3..111ba160 100644
--- a/g4f/Provider/Ylokh.py
+++ b/g4f/Provider/Ylokh.py
@@ -73,4 +73,4 @@ class Ylokh(AsyncGeneratorProvider):
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.Provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 0f499c8c..d6bcc8e7 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -34,7 +34,7 @@ class BaseProvider(ABC):
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ return f"g4f.Provider.{cls.__name__} supports: ({param})"
_cookies = {}
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 90b05c85..1e8664c1 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -6,6 +6,7 @@ import random
logging = False
+
class ChatCompletion:
@staticmethod
def create(
@@ -21,9 +22,8 @@ class ChatCompletion:
except KeyError:
raise Exception(f'The model: {model} does not exist')
-
if not provider:
- if isinstance(model.best_provider, tuple):
+ if isinstance(model.best_provider, list):
provider = random.choice(model.best_provider)
else:
provider = model.best_provider
diff --git a/g4f/models.py b/g4f/models.py
deleted file mode 100644
index e095ce7e..00000000
--- a/g4f/models.py
+++ /dev/null
@@ -1,212 +0,0 @@
-from __future__ import annotations
-from dataclasses import dataclass
-from .Provider import BaseProvider, Bard, H2o, Vercel
-from .Provider import Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin, CodeLinkAva
-from .Provider import DeepAi, Vercel, Vitalentum, Ylokh, You, Yqcloud
-from .typing import Union
-
-@dataclass
-class Model:
- name: str
- base_provider: str
- best_provider: Union[type[BaseProvider], tuple[type[BaseProvider]]] = None
-
-# Config for HuggingChat, OpenAssistant
-# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
-default = Model(
- name="",
- base_provider="huggingface"
-)
-
-# GPT-3.5 / GPT-4
-gpt_35_turbo = Model(
- name = 'gpt-3.5-turbo',
- base_provider = 'openai',
- best_provider = (
- Vercel, Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin,
- CodeLinkAva, DeepAi, Vitalentum, Ylokh, You, Yqcloud
- )
-)
-
-gpt_4 = Model(
- name = 'gpt-4',
- base_provider = 'openai',
-)
-
-# Bard
-palm = Model(
- name = 'palm',
- base_provider = 'google',
- best_provider = Bard)
-
-# H2o
-falcon_7b = Model(
- name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
- base_provider = 'huggingface',
- best_provider = H2o)
-
-falcon_40b = Model(
- name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- base_provider = 'huggingface',
- best_provider = H2o)
-
-llama_13b = Model(
- name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b',
- base_provider = 'huggingface',
- best_provider = H2o)
-
-# Vercel
-claude_instant_v1 = Model(
- name = 'anthropic:claude-instant-v1',
- base_provider = 'anthropic',
- best_provider = Vercel)
-
-claude_v1 = Model(
- name = 'anthropic:claude-v1',
- base_provider = 'anthropic',
- best_provider = Vercel)
-
-claude_v2 = Model(
- name = 'anthropic:claude-v2',
- base_provider = 'anthropic',
- best_provider = Vercel)
-
-command_light_nightly = Model(
- name = 'cohere:command-light-nightly',
- base_provider = 'cohere',
- best_provider = Vercel)
-
-command_nightly = Model(
- name = 'cohere:command-nightly',
- base_provider = 'cohere',
- best_provider = Vercel)
-
-gpt_neox_20b = Model(
- name = 'huggingface:EleutherAI/gpt-neox-20b',
- base_provider = 'huggingface',
- best_provider = Vercel)
-
-oasst_sft_1_pythia_12b = Model(
- name = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
- base_provider = 'huggingface',
- best_provider = Vercel)
-
-oasst_sft_4_pythia_12b_epoch_35 = Model(
- name = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
- base_provider = 'huggingface',
- best_provider = Vercel)
-
-santacoder = Model(
- name = 'huggingface:bigcode/santacoder',
- base_provider = 'huggingface',
- best_provider = Vercel)
-
-bloom = Model(
- name = 'huggingface:bigscience/bloom',
- base_provider = 'huggingface',
- best_provider = Vercel)
-
-flan_t5_xxl = Model(
- name = 'huggingface:google/flan-t5-xxl',
- base_provider = 'huggingface',
- best_provider = Vercel)
-
-code_davinci_002 = Model(
- name = 'openai:code-davinci-002',
- base_provider = 'openai',
- best_provider = Vercel)
-
-gpt_35_turbo_16k = Model(
- name = 'openai:gpt-3.5-turbo-16k',
- base_provider = 'openai',
- best_provider = Vercel)
-
-gpt_35_turbo_16k_0613 = Model(
- name = 'openai:gpt-3.5-turbo-16k-0613',
- base_provider = 'openai')
-
-gpt_4_0613 = Model(
- name = 'openai:gpt-4-0613',
- base_provider = 'openai',
- best_provider = Vercel)
-
-text_ada_001 = Model(
- name = 'openai:text-ada-001',
- base_provider = 'openai',
- best_provider = Vercel)
-
-text_babbage_001 = Model(
- name = 'openai:text-babbage-001',
- base_provider = 'openai',
- best_provider = Vercel)
-
-text_curie_001 = Model(
- name = 'openai:text-curie-001',
- base_provider = 'openai',
- best_provider = Vercel)
-
-text_davinci_002 = Model(
- name = 'openai:text-davinci-002',
- base_provider = 'openai',
- best_provider = Vercel)
-
-text_davinci_003 = Model(
- name = 'openai:text-davinci-003',
- base_provider = 'openai',
- best_provider = Vercel)
-
-llama13b_v2_chat = Model(
- name = 'replicate:a16z-infra/llama13b-v2-chat',
- base_provider = 'replicate',
- best_provider = Vercel)
-
-llama7b_v2_chat = Model(
- name = 'replicate:a16z-infra/llama7b-v2-chat',
- base_provider = 'replicate',
- best_provider = Vercel)
-
-
-class ModelUtils:
- convert: dict[str, Model] = {
- # GPT-3.5 / GPT-4
- 'gpt-3.5-turbo' : gpt_35_turbo,
- 'gpt-4' : gpt_4,
-
- # Bard
- 'palm2' : palm,
- 'palm' : palm,
- 'google' : palm,
- 'google-bard' : palm,
- 'google-palm' : palm,
- 'bard' : palm,
-
- # H2o
- 'falcon-40b' : falcon_40b,
- 'falcon-7b' : falcon_7b,
- 'llama-13b' : llama_13b,
-
- # Vercel
- 'claude-instant-v1' : claude_instant_v1,
- 'claude-v1' : claude_v1,
- 'claude-v2' : claude_v2,
- 'command-nightly' : command_nightly,
- 'gpt-neox-20b' : gpt_neox_20b,
- 'santacoder' : santacoder,
- 'bloom' : bloom,
- 'flan-t5-xxl' : flan_t5_xxl,
- 'code-davinci-002' : code_davinci_002,
- 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
- 'gpt-4-0613' : gpt_4_0613,
- 'text-ada-001' : text_ada_001,
- 'text-babbage-001' : text_babbage_001,
- 'text-curie-001' : text_curie_001,
- 'text-davinci-002' : text_davinci_002,
- 'text-davinci-003' : text_davinci_003,
- 'llama13b-v2-chat' : llama13b_v2_chat,
- 'llama7b-v2-chat' : llama7b_v2_chat,
-
- 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b,
- 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35,
- 'command-light-nightly' : command_light_nightly,
- 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
- } \ No newline at end of file
diff --git a/g4f/typing.py b/g4f/typing.py
index 5f63c222..2a9396a8 100644
--- a/g4f/typing.py
+++ b/g4f/typing.py
@@ -12,9 +12,10 @@ CreateResult = Generator[str, None, None]
__all__ = [
'Any',
'AsyncGenerator',
+ 'CreateResult',
'Generator',
+ 'SHA256',
'Tuple',
'TypedDict',
- 'SHA256',
- 'CreateResult',
+ 'Union',
]
diff --git a/tool/provider_init.py b/tool/provider_init.py
index cd7f9333..fac099ed 100644
--- a/tool/provider_init.py
+++ b/tool/provider_init.py
@@ -3,13 +3,13 @@ from pathlib import Path
def main():
content = create_content()
- with open("g4f/provider/__init__.py", "w", encoding="utf-8") as f:
+ with open("g4f/Provider/__init__.py", "w", encoding="utf-8") as f:
f.write(content)
def create_content():
path = Path()
- paths = path.glob("g4f/provider/*.py")
+ paths = path.glob("g4f/Provider/*.py")
paths = [p for p in paths if p.name not in ["__init__.py", "base_provider.py"]]
classnames = [p.stem for p in paths]
diff --git a/tool/readme_table.py b/tool/readme_table.py
index 9e43b0ae..0598584d 100644
--- a/tool/readme_table.py
+++ b/tool/readme_table.py
@@ -81,7 +81,7 @@ def print_providers():
netloc = urlparse(_provider.url).netloc
website = f"[{netloc}]({_provider.url})"
- provider_name = f"g4f.provider.{_provider.__name__}"
+ provider_name = f"g4f.Provider.{_provider.__name__}"
has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "❌"
has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "❌"
@@ -128,7 +128,7 @@ def print_models():
name = re.split(r":|/", model.name)[-1]
base_provider = base_provider_names[model.base_provider]
- provider_name = f"g4f.provider.{model.best_provider.__name__}"
+ provider_name = f"g4f.Provider.{model.best_provider.__name__}"
provider_url = provider_urls[model.best_provider.__name__]
netloc = urlparse(provider_url).netloc
website = f"[{netloc}]({provider_url})"