summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-10-07 19:10:26 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-10-07 19:10:26 +0200
commit3430b04f870d982d7fba34e3b9d6e5cf3bd3b847 (patch)
tree0756a28c14a8796c37dd8eee30539ef6aa388ddd
parentImprove code with ai (diff)
downloadgpt4free-3430b04f870d982d7fba34e3b9d6e5cf3bd3b847.tar
gpt4free-3430b04f870d982d7fba34e3b9d6e5cf3bd3b847.tar.gz
gpt4free-3430b04f870d982d7fba34e3b9d6e5cf3bd3b847.tar.bz2
gpt4free-3430b04f870d982d7fba34e3b9d6e5cf3bd3b847.tar.lz
gpt4free-3430b04f870d982d7fba34e3b9d6e5cf3bd3b847.tar.xz
gpt4free-3430b04f870d982d7fba34e3b9d6e5cf3bd3b847.tar.zst
gpt4free-3430b04f870d982d7fba34e3b9d6e5cf3bd3b847.zip
-rw-r--r--README.md3
-rw-r--r--etc/testing/test_chat_completion.py2
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/Provider/deprecated/Aivvm.py (renamed from g4f/Provider/Aivvm.py)8
-rw-r--r--g4f/Provider/deprecated/__init__.py3
-rw-r--r--g4f/models.py24
6 files changed, 16 insertions, 25 deletions
diff --git a/README.md b/README.md
index 66c91ad6..d79454f0 100644
--- a/README.md
+++ b/README.md
@@ -215,7 +215,6 @@ from g4f.Provider import (
Acytoo,
Aichat,
Ails,
- Aivvm,
Bard,
Bing,
ChatBase,
@@ -278,7 +277,6 @@ import g4f, asyncio
_providers = [
g4f.Provider.Aichat,
- g4f.Provider.Aivvm,
g4f.Provider.ChatBase,
g4f.Provider.Bing,
g4f.Provider.CodeLinkAva,
@@ -371,7 +369,6 @@ if __name__ == "__main__":
| [chat.acytoo.com](https://chat.acytoo.com) | `g4f.Provider.Acytoo` | ✔️ | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat-gpt.org](https://chat-gpt.org/chat) | `g4f.Provider.Aichat` | ✔️ | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [ai.ls](https://ai.ls) | `g4f.Provider.Ails` | ✔️ | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chat.aivvm.com](https://chat.aivvm.com) | `g4f.Provider.Aivvm` | ✔️ | ✔️ | ✔️ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [www.chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔️ | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py
index d5fb5b29..ee523b86 100644
--- a/etc/testing/test_chat_completion.py
+++ b/etc/testing/test_chat_completion.py
@@ -19,7 +19,7 @@ print()
async def run_async():
response = await g4f.ChatCompletion.create_async(
model=g4f.models.gpt_35_turbo_16k_0613,
- provider=g4f.Provider.Aivvm,
+ provider=g4f.Provider.GptGod,
messages=[{"role": "user", "content": "hello!"}],
)
print("create_async:", response)
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 697f6185..7609744e 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -6,7 +6,6 @@ from .Aichat import Aichat
from .Ails import Ails
from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace
-from .Aivvm import Aivvm
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/deprecated/Aivvm.py
index 1e780953..bceb6faf 100644
--- a/g4f/Provider/Aivvm.py
+++ b/g4f/Provider/deprecated/Aivvm.py
@@ -1,8 +1,8 @@
from __future__ import annotations
-from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider
-from ..typing import AsyncGenerator
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider
+from ...typing import AsyncGenerator
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
@@ -18,8 +18,6 @@ models = {
class Aivvm(AsyncGeneratorProvider):
url = 'https://chat.aivvm.com'
- supports_stream = True
- working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index 5c66c87f..d6b93e8d 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -11,4 +11,5 @@ from .Equing import Equing
from .Wuguokai import Wuguokai
from .V50 import V50
from .FastGpt import FastGpt
-from .ChatgptLogin import ChatgptLogin \ No newline at end of file
+from .ChatgptLogin import ChatgptLogin
+from .Aivvm import Aivvm \ No newline at end of file
diff --git a/g4f/models.py b/g4f/models.py
index a91a3df4..ddd39993 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -20,7 +20,6 @@ from .Provider import (
DeepAi,
Aichat,
AiAsk,
- Aivvm,
GptGo,
Ylokh,
Bard,
@@ -44,7 +43,7 @@ default = Model(
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results
- Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
+ Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
])
)
@@ -53,7 +52,7 @@ gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- AiAsk, Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo,
+ AiAsk, Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo,
FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud
])
)
@@ -63,7 +62,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh
+ DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh
])
)
@@ -167,26 +166,23 @@ gpt_35_turbo_16k_0613 = Model(
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
- base_provider = 'openai',
- best_provider = RetryProvider([
- Aivvm, ChatgptLogin
- ])
+ base_provider = 'openai'
)
gpt_4_0613 = Model(
name = 'gpt-4-0613',
- base_provider = 'openai',
- best_provider = Aivvm)
+ base_provider = 'openai'
+)
gpt_4_32k = Model(
name = 'gpt-4-32k',
- base_provider = 'openai',
- best_provider = Aivvm)
+ base_provider = 'openai'
+)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
- base_provider = 'openai',
- best_provider = Aivvm)
+ base_provider = 'openai'
+)
text_ada_001 = Model(
name = 'text-ada-001',