summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRyan Jordan <ryjordan@gmail.com>2023-09-06 02:39:57 +0200
committerGitHub <noreply@github.com>2023-09-06 02:39:57 +0200
commitf81e618958318a092ca4c70a1b3ea15260bda97c (patch)
tree3ce022a8d719011268da7f1a0befc97bf32a60d8
parentfeat(docker): add Docker and Docker Compose support (diff)
parent~ | Merge pull request #869 from ahobsonsayers/add-console-script (diff)
downloadgpt4free-f81e618958318a092ca4c70a1b3ea15260bda97c.tar
gpt4free-f81e618958318a092ca4c70a1b3ea15260bda97c.tar.gz
gpt4free-f81e618958318a092ca4c70a1b3ea15260bda97c.tar.bz2
gpt4free-f81e618958318a092ca4c70a1b3ea15260bda97c.tar.lz
gpt4free-f81e618958318a092ca4c70a1b3ea15260bda97c.tar.xz
gpt4free-f81e618958318a092ca4c70a1b3ea15260bda97c.tar.zst
gpt4free-f81e618958318a092ca4c70a1b3ea15260bda97c.zip
-rw-r--r--README.md160
-rw-r--r--g4f/Provider/AItianhu.py15
-rw-r--r--g4f/Provider/Acytoo.py44
-rw-r--r--g4f/Provider/AiService.py10
-rw-r--r--g4f/Provider/Aichat.py18
-rw-r--r--g4f/Provider/Ails.py15
-rw-r--r--g4f/Provider/Bard.py45
-rw-r--r--g4f/Provider/Bing.py489
-rw-r--r--g4f/Provider/ChatgptAi.py68
-rw-r--r--g4f/Provider/ChatgptLogin.py120
-rw-r--r--g4f/Provider/DeepAi.py18
-rw-r--r--g4f/Provider/DfeHub.py59
-rw-r--r--g4f/Provider/EasyChat.py87
-rw-r--r--g4f/Provider/Equing.py68
-rw-r--r--g4f/Provider/FastGpt.py73
-rw-r--r--g4f/Provider/Forefront.py35
-rw-r--r--g4f/Provider/GetGpt.py89
-rw-r--r--g4f/Provider/H2o.py136
-rw-r--r--g4f/Provider/HuggingChat.py110
-rw-r--r--g4f/Provider/Liaobots.py100
-rw-r--r--g4f/Provider/Lockchat.py40
-rw-r--r--g4f/Provider/Opchatgpts.py72
-rw-r--r--g4f/Provider/OpenAssistant.py102
-rw-r--r--g4f/Provider/OpenaiChat.py86
-rw-r--r--g4f/Provider/Raycast.py17
-rw-r--r--g4f/Provider/Theb.py93
-rw-r--r--g4f/Provider/V50.py67
-rw-r--r--g4f/Provider/Vercel.py19
-rw-r--r--g4f/Provider/Wewordle.py81
-rw-r--r--g4f/Provider/Wuguokai.py68
-rw-r--r--g4f/Provider/You.py77
-rw-r--r--g4f/Provider/Yqcloud.py42
-rw-r--r--g4f/Provider/__init__.py122
-rw-r--r--g4f/Provider/base_provider.py107
-rw-r--r--g4f/__init__.py38
-rw-r--r--g4f/models.py278
-rw-r--r--g4f/typing.py19
-rw-r--r--interference/app.py7
-rw-r--r--requirements.txt3
-rw-r--r--setup.py16
-rw-r--r--testing/log_time.py25
-rw-r--r--testing/test_chat_completion.py2
-rw-r--r--testing/test_needs_auth.py96
-rw-r--r--testing/test_providers.py63
-rw-r--r--tool/provider_init.py2
-rw-r--r--tool/readme_table.py99
46 files changed, 2030 insertions, 1370 deletions
diff --git a/README.md b/README.md
index 4d2ce6fc..4a84c914 100644
--- a/README.md
+++ b/README.md
@@ -178,26 +178,100 @@ for message in response:
print(message)
```
-providers:
+##### Providers:
```py
from g4f.Provider import (
Acytoo,
Aichat,
Ails,
- AiService,
- AItianhu,
Bard,
Bing,
ChatgptAi,
ChatgptLogin,
DeepAi,
- GetGpt
+ EasyChat,
+ Equing,
+ GetGpt,
+ H2o,
+ HuggingChat,
+ Opchatgpts,
+ OpenAssistant,
+ OpenaiChat,
+ Raycast,
+ Theb,
+ Vercel,
+ Wewordle,
+ Wuguokai,
+ You,
+ Yqcloud
)
+# Usage:
+response = g4f.ChatCompletion.create(..., provider=ProviderName)
+```
+##### Needs cookies:
-# usage:
-response = g4f.ChatCompletion.create(..., provider=ProviderName)
+Many providers need cookies to work.
+In Bing you need a session, where you have passed the captcha.
+And in others providers you have to log-in into your account.
+If you run the g4l package locally,
+cookies from your browsers are readed with `get_cookies`.
+Else you have pass them in the parameter `cookies`:
+```py
+import g4f
+from g4f.Provider import (
+ Bard,
+ Bing,
+ H2o,
+ HuggingChat,
+ OpenAssistant,
+ OpenaiChat,
+ You,
+)
+# Usage:
+response = g4f.ChatCompletion.create(
+ model=g4f.models.default,
+ messages=[{"role": "user", "content": "Hello"}],
+ provider=Bard,
+ #cookies=g4f.get_cookies(".google.com"),
+ cookies={"cookie_name": "value", "cookie_name2": "value2"},
+ auth=True
+)
+```
+
+##### Async support:
+
+Run providers `async` to improve speed / performance.
+The full execution time corresponds to the execution time of the slowest provider.
+
+```py
+import g4f, asyncio
+
+async def run_async():
+ _providers = [
+ g4f.Provider.Bard,
+ g4f.Provider.Bing,
+ g4f.Provider.H2o,
+ g4f.Provider.HuggingChat,
+ g4f.Provider.Liaobots,
+ g4f.Provider.OpenAssistant,
+ g4f.Provider.OpenaiChat,
+ g4f.Provider.You,
+ g4f.Provider.Yqcloud,
+ ]
+ responses = [
+ provider.create_async(
+ model=None,
+ messages=[{"role": "user", "content": "Hello"}],
+ )
+ for provider in _providers
+ ]
+ responses = await asyncio.gather(*responses)
+ for idx, provider in enumerate(_providers):
+ print(f"{provider.__name__}:", responses[idx])
+
+asyncio.run(run_async())
```
### interference openai-proxy api (use with openai python package)
@@ -247,32 +321,40 @@ if __name__ == "__main__":
### gpt-3.5 / gpt-4
-| Website | Provider | gpt-3.5 | gpt-4 | Streaming | Status | Auth |
-| ----------------------------------------------------------------------------- | ------------------------- | ------- | ----- | --------- | ---------------------------------------------------------- | ---- |
-| [www.aitianhu.com](https://www.aitianhu.com/api/chat-process) | g4f.Provider.AItianhu | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat.acytoo.com](https://chat.acytoo.com/api/completions) | g4f.Provider.Acytoo | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [aiservice.vercel.app](https://aiservice.vercel.app/api/chat/answer) | g4f.Provider.AiService | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat-gpt.org](https://chat-gpt.org/chat) | g4f.Provider.Aichat | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [ai.ls](https://ai.ls) | g4f.Provider.Ails | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [bard.google.com](https://bard.google.com) | g4f.Provider.Bard | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
-| [bing.com](https://bing.com/chat) | g4f.Provider.Bing | ❌ | ✔️ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgpt.ai](https://chatgpt.ai/gpt-4/) | g4f.Provider.ChatgptAi | ❌ | ✔️ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chatgptlogin.ac](https://chatgptlogin.ac) | g4f.Provider.ChatgptLogin | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [deepai.org](https://deepai.org) | g4f.Provider.DeepAi | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chat.dfehub.com](https://chat.dfehub.com/api/chat) | g4f.Provider.DfeHub | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [free.easychat.work](https://free.easychat.work) | g4f.Provider.EasyChat | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [forefront.com](https://forefront.com) | g4f.Provider.Forefront | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat.getgpt.world](https://chat.getgpt.world/) | g4f.Provider.GetGpt | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [gpt-gm.h2o.ai](https://gpt-gm.h2o.ai) | g4f.Provider.H2o | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [liaobots.com](https://liaobots.com) | g4f.Provider.Liaobots | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ |
-| [supertest.lockchat.app](http://supertest.lockchat.app) | g4f.Provider.Lockchat | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [opchatgpts.net](https://opchatgpts.net) | g4f.Provider.Opchatgpts | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [backend.raycast.com](https://backend.raycast.com/api/v1/ai/chat_completions) | g4f.Provider.Raycast | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
-| [theb.ai](https://theb.ai) | g4f.Provider.Theb | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [play.vercel.ai](https://play.vercel.ai) | g4f.Provider.Vercel | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [wewordle.org](https://wewordle.org/gptapi/v1/android/turbo) | g4f.Provider.Wewordle | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [you.com](https://you.com) | g4f.Provider.You | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chat9.yqcloud.top](https://chat9.yqcloud.top/) | g4f.Provider.Yqcloud | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| Website| Provider| gpt-3.5 | gpt-4 | Streaming | Status | Auth |
+| ------ | ------- | ------- | ----- | --------- | ------ | ---- |
+| [chat.acytoo.com](https://chat.acytoo.com/) | g4f.provider.Acytoo | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chat-gpt.org](https://chat-gpt.org/chat) | g4f.provider.Aichat | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [ai.ls](https://ai.ls) | g4f.provider.Ails | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [bard.google.com](https://bard.google.com) | g4f.provider.Bard | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
+| [bing.com](https://bing.com/chat) | g4f.provider.Bing | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
+| [chatgpt.ai](https://chatgpt.ai/gpt-4/) | g4f.provider.ChatgptAi | ❌ | ✔️ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [opchatgpts.net](https://opchatgpts.net) | g4f.provider.ChatgptLogin | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [deepai.org](https://deepai.org) | g4f.provider.DeepAi | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [free.easychat.work](https://free.easychat.work) | g4f.provider.EasyChat | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [next.eqing.tech](https://next.eqing.tech/) | g4f.provider.Equing | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chat.getgpt.world](https://chat.getgpt.world/) | g4f.provider.GetGpt | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [gpt-gm.h2o.ai](https://gpt-gm.h2o.ai) | g4f.provider.H2o | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [huggingface.co](https://huggingface.co/chat/) | g4f.provider.HuggingChat | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
+| [liaobots.com](https://liaobots.com) | g4f.provider.Liaobots | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [opchatgpts.net](https://opchatgpts.net) | g4f.provider.Opchatgpts | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [open-assistant.io](https://open-assistant.io/chat) | g4f.provider.OpenAssistant | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
+| [chat.openai.com](https://chat.openai.com) | g4f.provider.OpenaiChat | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
+| [raycast.com](https://raycast.com) | g4f.provider.Raycast | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
+| [theb.ai](https://theb.ai) | g4f.provider.Theb | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
+| [play.vercel.ai](https://play.vercel.ai) | g4f.provider.Vercel | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [wewordle.org](https://wewordle.org/) | g4f.provider.Wewordle | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chat.wuguokai.xyz](https://chat.wuguokai.xyz) | g4f.provider.Wuguokai | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [you.com](https://you.com) | g4f.provider.You | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chat9.yqcloud.top](https://chat9.yqcloud.top/) | g4f.provider.Yqcloud | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [www.aitianhu.com](https://www.aitianhu.com/) | g4f.provider.AItianhu | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [aiservice.vercel.app](https://aiservice.vercel.app/) | g4f.provider.AiService | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat.dfehub.com](https://chat.dfehub.com/) | g4f.provider.DfeHub | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat9.fastgpt.me](https://chat9.fastgpt.me/) | g4f.provider.FastGpt | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [forefront.com](https://forefront.com) | g4f.provider.Forefront | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [supertest.lockchat.app](http://supertest.lockchat.app) | g4f.provider.Lockchat | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [p5.v50.ltd](https://p5.v50.ltd) | g4f.provider.V50 | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+
### Other Models
@@ -360,6 +442,20 @@ if __name__ == "__main__":
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
</tr>
+ <tr>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme"><b>Action Translate Readme</b></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/issues"><img alt="Issues" src="https://img.shields.io/github/issues/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
+ </tr>
+ <tr>
+ <td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit"><b>Langchain Document GPT</b></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/Lin-jun-xiang/docGPT-streamlit?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/Lin-jun-xiang/docGPT-streamlit?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit/issues"><img alt="Issues" src="https://img.shields.io/github/issues/Lin-jun-xiang/docGPT-streamlit?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/docGPT-streamlit/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/Lin-jun-xiang/docGPT-streamlit?style=flat-square&labelColor=343b41"/></a></td>
+ </tr>
</tbody>
</table>
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py
index e8e5714a..0982d3c6 100644
--- a/g4f/Provider/AItianhu.py
+++ b/g4f/Provider/AItianhu.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import json
import requests
@@ -7,7 +9,7 @@ from .base_provider import BaseProvider
class AItianhu(BaseProvider):
- url = "https://www.aitianhu.com/api/chat-process"
+ url = "https://www.aitianhu.com/"
working = False
supports_gpt_35_turbo = True
@@ -15,13 +17,10 @@ class AItianhu(BaseProvider):
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- base = ""
- for message in messages:
- base += "%s: %s\n" % (message["role"], message["content"])
- base += "assistant:"
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
+ base += "\nassistant: "
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py
index 2edd9efd..48a3a344 100644
--- a/g4f/Provider/Acytoo.py
+++ b/g4f/Provider/Acytoo.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import time
import requests
@@ -7,42 +9,42 @@ from .base_provider import BaseProvider
class Acytoo(BaseProvider):
- url = "https://chat.acytoo.com/api/completions"
- working = True
+ url = 'https://chat.acytoo.com/'
+ working = True
supports_gpt_35_turbo = True
- @staticmethod
+ @classmethod
def create_completion(
+ cls,
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- headers = _create_header()
- payload = _create_payload(messages, kwargs.get('temperature', 0.5))
-
- url = "https://chat.acytoo.com/api/completions"
- response = requests.post(url=url, headers=headers, json=payload)
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ response = requests.post(f'{cls.url}api/completions',
+ headers=_create_header(), json=_create_payload(messages, kwargs.get('temperature', 0.5)))
+
response.raise_for_status()
- response.encoding = "utf-8"
+ response.encoding = 'utf-8'
+
yield response.text
def _create_header():
return {
- "accept": "*/*",
- "content-type": "application/json",
+ 'accept': '*/*',
+ 'content-type': 'application/json',
}
def _create_payload(messages: list[dict[str, str]], temperature):
payload_messages = [
- message | {"createdAt": int(time.time()) * 1000} for message in messages
+ message | {'createdAt': int(time.time()) * 1000} for message in messages
]
+
return {
- "key": "",
- "model": "gpt-3.5-turbo",
- "messages": payload_messages,
- "temperature": temperature,
- "password": "",
- }
+ 'key' : '',
+ 'model' : 'gpt-3.5-turbo',
+ 'messages' : payload_messages,
+ 'temperature' : temperature,
+ 'password' : ''
+ } \ No newline at end of file
diff --git a/g4f/Provider/AiService.py b/g4f/Provider/AiService.py
index 2c0d5de2..2b5a6e7d 100644
--- a/g4f/Provider/AiService.py
+++ b/g4f/Provider/AiService.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import requests
from ..typing import Any, CreateResult
@@ -5,7 +7,7 @@ from .base_provider import BaseProvider
class AiService(BaseProvider):
- url = "https://aiservice.vercel.app/api/chat/answer"
+ url = "https://aiservice.vercel.app/"
working = False
supports_gpt_35_turbo = True
@@ -16,10 +18,8 @@ class AiService(BaseProvider):
stream: bool,
**kwargs: Any,
) -> CreateResult:
- base = ""
- for message in messages:
- base += "%s: %s\n" % (message["role"], message["content"])
- base += "assistant:"
+ base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
+ base += "\nassistant: "
headers = {
"accept": "*/*",
diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py
index a1d90db7..59640533 100644
--- a/g4f/Provider/Aichat.py
+++ b/g4f/Provider/Aichat.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import requests
from ..typing import Any, CreateResult
@@ -5,22 +7,18 @@ from .base_provider import BaseProvider
class Aichat(BaseProvider):
- url = "https://chat-gpt.org/chat"
- working = True
+ url = "https://chat-gpt.org/chat"
+ working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- base = ""
-
- for message in messages:
- base += "%s: %s\n" % (message["role"], message["content"])
- base += "assistant:"
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ chat = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
+ chat += "\nassistant: "
headers = {
"authority": "chat-gpt.org",
diff --git a/g4f/Provider/Ails.py b/g4f/Provider/Ails.py
index 52b3745d..4eb21729 100644
--- a/g4f/Provider/Ails.py
+++ b/g4f/Provider/Ails.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import hashlib
import json
import time
@@ -11,18 +13,17 @@ from .base_provider import BaseProvider
class Ails(BaseProvider):
- url: str = "https://ai.ls"
- working = True
- supports_stream = True
+ url: str = "https://ai.ls"
+ working = True
+ supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
headers = {
"authority": "api.caipacity.com",
"accept": "*/*",
@@ -72,6 +73,8 @@ class Ails(BaseProvider):
if b"content" in token:
completion_chunk = json.loads(token.decode().replace("data: ", ""))
token = completion_chunk["choices"][0]["delta"].get("content")
+ if "ai.ls" in token.lower() or "ai.ci" in token.lower():
+ raise Exception("Response Error: " + token)
if token != None:
yield token
diff --git a/g4f/Provider/Bard.py b/g4f/Provider/Bard.py
index cbe728cd..2137d820 100644
--- a/g4f/Provider/Bard.py
+++ b/g4f/Provider/Bard.py
@@ -1,51 +1,33 @@
+from __future__ import annotations
+
import json
import random
import re
-import browser_cookie3
from aiohttp import ClientSession
-import asyncio
-from ..typing import Any, CreateResult
-from .base_provider import BaseProvider
+from .base_provider import AsyncProvider, format_prompt, get_cookies
+
-class Bard(BaseProvider):
+class Bard(AsyncProvider):
url = "https://bard.google.com"
needs_auth = True
working = True
@classmethod
- def create_completion(
- cls,
- model: str,
- messages: list[dict[str, str]],
- stream: bool,
- proxy: str = None,
- cookies: dict = {},
- **kwargs: Any,
- ) -> CreateResult:
- yield asyncio.run(cls.create_async(str, messages, proxy, cookies))
-
- @classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
- cookies: dict = {},
- **kwargs: Any,
+ cookies: dict = None,
+ **kwargs
) -> str:
- if not cookies:
- for cookie in browser_cookie3.load(domain_name='.google.com'):
- cookies[cookie.name] = cookie.value
-
- formatted = "\n".join(
- ["%s: %s" % (message["role"], message["content"]) for message in messages]
- )
- prompt = f"{formatted}\nAssistant:"
-
+ prompt = format_prompt(messages)
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
+ if not cookies:
+ cookies = get_cookies(".google.com")
headers = {
'authority': 'bard.google.com',
@@ -62,10 +44,11 @@ class Bard(BaseProvider):
) as session:
async with session.get(cls.url, proxy=proxy) as response:
text = await response.text()
-
+
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
- if match:
- snlm0e = match.group(1)
+ if not match:
+ raise RuntimeError("No snlm0e value.")
+ snlm0e = match.group(1)
params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 48b5477d..cec82108 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -1,294 +1,94 @@
+from __future__ import annotations
+
import asyncio
import json
import os
import random
-import ssl
-import uuid
import aiohttp
-import certifi
-import requests
-
-from ..typing import Any, AsyncGenerator, CreateResult, Tuple, Union
-from .base_provider import BaseProvider
+from aiohttp import ClientSession
+from ..typing import Any, AsyncGenerator, CreateResult, Union
+from .base_provider import AsyncGeneratorProvider, get_cookies
-class Bing(BaseProvider):
- url = "https://bing.com/chat"
- supports_gpt_4 = True
+class Bing(AsyncGeneratorProvider):
+ url = "https://bing.com/chat"
+ needs_auth = True
+ working = True
+ supports_gpt_4 = True
+ supports_stream = True
+
@staticmethod
- def create_completion(
- model: str,
- messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ def create_async_generator(
+ model: str,
+ messages: list[dict[str, str]],
+ cookies: dict = None, **kwargs) -> AsyncGenerator:
+
+ if not cookies:
+ cookies = get_cookies(".bing.com")
if len(messages) < 2:
prompt = messages[0]["content"]
- context = False
-
+ context = None
else:
prompt = messages[-1]["content"]
- context = convert(messages[:-1])
-
- response = run(stream_generate(prompt, jailbreak, context))
- for token in response:
- yield token
-
-
-def convert(messages: list[dict[str, str]]):
- context = ""
+ context = create_context(messages[:-1])
+
+ if cookies and "SRCHD" in cookies:
+ #TODO: Will implement proper cookie retrieval later and use a try-except mechanism in 'stream_generate' instead of defaulting the cookie value like this
+ cookies_dict = {
+ 'SRCHD' : cookies["SRCHD"],
+ 'PPLState' : '1',
+ 'KievRPSSecAuth': '',
+ 'SUID' : '',
+ 'SRCHUSR' : '',
+ 'SRCHHPGUSR' : '',
+ }
+
+ return stream_generate(prompt, context, cookies_dict)
- for message in messages:
- context += "[%s](#message)\n%s\n\n" % (message["role"], message["content"])
+def create_context(messages: list[dict[str, str]]):
+ context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
return context
-
-jailbreak = {
- "optionsSets": [
- "saharasugg",
- "enablenewsfc",
- "clgalileo",
- "gencontentv3",
- "nlu_direct_response_filter",
- "deepleo",
- "disable_emoji_spoken_text",
- "responsible_ai_policy_235",
- "enablemm",
- "h3precise"
- # "harmonyv3",
- "dtappid",
- "cricinfo",
- "cricinfov2",
- "dv3sugg",
- "nojbfedge",
- ]
-}
-
-
-ssl_context = ssl.create_default_context()
-ssl_context.load_verify_locations(certifi.where())
-
-
-def _format(msg: dict[str, Any]) -> str:
- return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
-
-
-async def stream_generate(
- prompt: str,
- mode: dict[str, list[str]] = jailbreak,
- context: Union[bool, str] = False,
-):
- timeout = aiohttp.ClientTimeout(total=900)
- session = aiohttp.ClientSession(timeout=timeout)
-
- conversationId, clientId, conversationSignature = await create_conversation()
-
- wss = await session.ws_connect(
- "wss://sydney.bing.com/sydney/ChatHub",
- ssl=ssl_context,
- autoping=False,
- headers={
- "accept": "application/json",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json",
- "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
- "sec-ch-ua-arch": '"x86"',
- "sec-ch-ua-bitness": '"64"',
- "sec-ch-ua-full-version": '"109.0.1518.78"',
- "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-model": "",
- "sec-ch-ua-platform": '"Windows"',
- "sec-ch-ua-platform-version": '"15.0.0"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "x-ms-client-request-id": str(uuid.uuid4()),
- "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
- "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
- "Referrer-Policy": "origin-when-cross-origin",
- "x-forwarded-for": Defaults.ip_address,
- },
- )
-
- await wss.send_str(_format({"protocol": "json", "version": 1}))
- await wss.receive(timeout=900)
-
- argument: dict[str, Any] = {
- **mode,
+class Conversation():
+ def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None:
+ self.conversationId = conversationId
+ self.clientId = clientId
+ self.conversationSignature = conversationSignature
+
+async def create_conversation(session: ClientSession) -> Conversation:
+ url = 'https://www.bing.com/turing/conversation/create'
+ async with await session.get(url) as response:
+ response = await response.json()
+ conversationId = response.get('conversationId')
+ clientId = response.get('clientId')
+ conversationSignature = response.get('conversationSignature')
+
+ if not conversationId or not clientId or not conversationSignature:
+ raise Exception('Failed to create conversation.')
+
+ return Conversation(conversationId, clientId, conversationSignature)
+
+async def list_conversations(session: ClientSession) -> list:
+ url = "https://www.bing.com/turing/conversation/chats"
+ async with session.get(url) as response:
+ response = await response.json()
+ return response["chats"]
+
+async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
+ url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
+ json = {
+ "conversationId": conversation.conversationId,
+ "conversationSignature": conversation.conversationSignature,
+ "participant": {"id": conversation.clientId},
"source": "cib",
- "allowedMessageTypes": Defaults.allowedMessageTypes,
- "sliceIds": Defaults.sliceIds,
- "traceId": os.urandom(16).hex(),
- "isStartOfSession": True,
- "message": Defaults.location
- | {
- "author": "user",
- "inputMethod": "Keyboard",
- "text": prompt,
- "messageType": "Chat",
- },
- "conversationSignature": conversationSignature,
- "participant": {"id": clientId},
- "conversationId": conversationId,
+ "optionsSets": ["autosave"]
}
-
- if context:
- argument["previousMessages"] = [
- {
- "author": "user",
- "description": context,
- "contextType": "WebPage",
- "messageType": "Context",
- "messageId": "discover-web--page-ping-mriduna-----",
- }
- ]
-
- struct: dict[str, list[dict[str, Any]] | str | int] = {
- "arguments": [argument],
- "invocationId": "0",
- "target": "chat",
- "type": 4,
- }
-
- await wss.send_str(_format(struct))
-
- final = False
- draw = False
- resp_txt = ""
- result_text = ""
- resp_txt_no_link = ""
- cache_text = ""
-
- while not final:
- msg = await wss.receive(timeout=900)
- objects = msg.data.split(Defaults.delimiter) # type: ignore
-
- for obj in objects: # type: ignore
- if obj is None or not obj:
- continue
-
- response = json.loads(obj) # type: ignore
- if response.get("type") == 1 and response["arguments"][0].get(
- "messages",
- ):
- if not draw:
- if (
- response["arguments"][0]["messages"][0]["contentOrigin"]
- != "Apology"
- ) and not draw:
- resp_txt = result_text + response["arguments"][0]["messages"][
- 0
- ]["adaptiveCards"][0]["body"][0].get("text", "")
- resp_txt_no_link = result_text + response["arguments"][0][
- "messages"
- ][0].get("text", "")
-
- if response["arguments"][0]["messages"][0].get(
- "messageType",
- ):
- resp_txt = (
- resp_txt
- + response["arguments"][0]["messages"][0][
- "adaptiveCards"
- ][0]["body"][0]["inlines"][0].get("text")
- + "\n"
- )
- result_text = (
- result_text
- + response["arguments"][0]["messages"][0][
- "adaptiveCards"
- ][0]["body"][0]["inlines"][0].get("text")
- + "\n"
- )
-
- if cache_text.endswith(" "):
- final = True
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
- yield (resp_txt.replace(cache_text, ""))
- cache_text = resp_txt
-
- elif response.get("type") == 2:
- if response["item"]["result"].get("error"):
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
- raise Exception(
- f"{response['item']['result']['value']}: {response['item']['result']['message']}"
- )
-
- if draw:
- cache = response["item"]["messages"][1]["adaptiveCards"][0]["body"][
- 0
- ]["text"]
- response["item"]["messages"][1]["adaptiveCards"][0]["body"][0][
- "text"
- ] = (cache + resp_txt)
-
- if (
- response["item"]["messages"][-1]["contentOrigin"] == "Apology"
- and resp_txt
- ):
- response["item"]["messages"][-1]["text"] = resp_txt_no_link
- response["item"]["messages"][-1]["adaptiveCards"][0]["body"][0][
- "text"
- ] = resp_txt
-
- # print('Preserved the message from being deleted', file=sys.stderr)
-
- final = True
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
-
-async def create_conversation() -> Tuple[str, str, str]:
- create = requests.get(
- "https://www.bing.com/turing/conversation/create",
- headers={
- "authority": "edgeservices.bing.com",
- "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "max-age=0",
- "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
- "sec-ch-ua-arch": '"x86"',
- "sec-ch-ua-bitness": '"64"',
- "sec-ch-ua-full-version": '"110.0.1587.69"',
- "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-model": '""',
- "sec-ch-ua-platform": '"Windows"',
- "sec-ch-ua-platform-version": '"15.0.0"',
- "sec-fetch-dest": "document",
- "sec-fetch-mode": "navigate",
- "sec-fetch-site": "none",
- "sec-fetch-user": "?1",
- "upgrade-insecure-requests": "1",
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
- "x-edge-shopping-flag": "1",
- "x-forwarded-for": Defaults.ip_address,
- },
- )
-
- conversationId = create.json().get("conversationId")
- clientId = create.json().get("clientId")
- conversationSignature = create.json().get("conversationSignature")
-
- if not conversationId or not clientId or not conversationSignature:
- raise Exception("Failed to create conversation.")
-
- return conversationId, clientId, conversationSignature
-
+ async with session.post(url, json=json) as response:
+ response = await response.json()
+ return response["result"]["value"] == "Success"
class Defaults:
delimiter = "\x1e"
@@ -309,9 +109,6 @@ class Defaults:
]
sliceIds = [
- # "222dtappid",
- # "225cricinfo",
- # "224locals0"
"winmuid3tf",
"osbsdusgreccf",
"ttstmout",
@@ -349,6 +146,149 @@ class Defaults:
],
}
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'max-age=0',
+ 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"110.0.1587.69"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
+ 'x-edge-shopping-flag': '1',
+ 'x-forwarded-for': ip_address,
+ }
+
+ optionsSets = {
+ "optionsSets": [
+ 'saharasugg',
+ 'enablenewsfc',
+ 'clgalileo',
+ 'gencontentv3',
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise"
+ "dtappid",
+ "cricinfo",
+ "cricinfov2",
+ "dv3sugg",
+ "nojbfedge"
+ ]
+ }
+
+def format_message(msg: dict) -> str:
+ return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
+
+def create_message(conversation: Conversation, prompt: str, context: str=None) -> str:
+ struct = {
+ 'arguments': [
+ {
+ **Defaults.optionsSets,
+ 'source': 'cib',
+ 'allowedMessageTypes': Defaults.allowedMessageTypes,
+ 'sliceIds': Defaults.sliceIds,
+ 'traceId': os.urandom(16).hex(),
+ 'isStartOfSession': True,
+ 'message': Defaults.location | {
+ 'author': 'user',
+ 'inputMethod': 'Keyboard',
+ 'text': prompt,
+ 'messageType': 'Chat'
+ },
+ 'conversationSignature': conversation.conversationSignature,
+ 'participant': {
+ 'id': conversation.clientId
+ },
+ 'conversationId': conversation.conversationId
+ }
+ ],
+ 'invocationId': '0',
+ 'target': 'chat',
+ 'type': 4
+ }
+
+ if context:
+ struct['arguments'][0]['previousMessages'] = [{
+ "author": "user",
+ "description": context,
+ "contextType": "WebPage",
+ "messageType": "Context",
+ "messageId": "discover-web--page-ping-mriduna-----"
+ }]
+ return format_message(struct)
+
+async def stream_generate(
+ prompt: str,
+ context: str=None,
+ cookies: dict=None
+ ):
+ async with ClientSession(
+ timeout=aiohttp.ClientTimeout(total=900),
+ cookies=cookies,
+ headers=Defaults.headers,
+ ) as session:
+ conversation = await create_conversation(session)
+ try:
+ async with session.ws_connect(
+ 'wss://sydney.bing.com/sydney/ChatHub',
+ autoping=False,
+ ) as wss:
+
+ await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
+ msg = await wss.receive(timeout=900)
+
+ await wss.send_str(create_message(conversation, prompt, context))
+
+ response_txt = ''
+ result_text = ''
+ returned_text = ''
+ final = False
+
+ while not final:
+ msg = await wss.receive(timeout=900)
+ objects = msg.data.split(Defaults.delimiter)
+ for obj in objects:
+ if obj is None or not obj:
+ continue
+
+ response = json.loads(obj)
+ if response.get('type') == 1 and response['arguments'][0].get('messages'):
+ message = response['arguments'][0]['messages'][0]
+ if (message['contentOrigin'] != 'Apology'):
+ response_txt = result_text + \
+ message['adaptiveCards'][0]['body'][0].get('text', '')
+
+ if message.get('messageType'):
+ inline_txt = message['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
+ response_txt += inline_txt + '\n'
+ result_text += inline_txt + '\n'
+
+ if response_txt.startswith(returned_text):
+ new = response_txt[len(returned_text):]
+ if new != "\n":
+ yield new
+ returned_text = response_txt
+ elif response.get('type') == 2:
+ result = response['item']['result']
+ if result.get('error'):
+ raise Exception(f"{result['value']}: {result['message']}")
+ final = True
+ break
+ finally:
+ await delete_conversation(session, conversation)
def run(generator: AsyncGenerator[Union[Any, str], Any]):
loop = asyncio.get_event_loop()
@@ -360,3 +300,4 @@ def run(generator: AsyncGenerator[Union[Any, str], Any]):
except StopAsyncIteration:
break
+
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py
index 53518f65..7613ccf1 100644
--- a/g4f/Provider/ChatgptAi.py
+++ b/g4f/Provider/ChatgptAi.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import re
import requests
@@ -7,55 +9,51 @@ from .base_provider import BaseProvider
class ChatgptAi(BaseProvider):
- url = "https://chatgpt.ai/gpt-4/"
- working = True
- supports_gpt_4 = True
+ url: str = "https://chatgpt.ai/gpt-4/"
+ working = True
+ supports_gpt_4 = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- chat = ""
- for message in messages:
- chat += "%s: %s\n" % (message["role"], message["content"])
- chat += "assistant: "
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ chat = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
+ chat += "\nassistant: "
response = requests.get("https://chatgpt.ai/")
nonce, post_id, _, bot_id = re.findall(
r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width',
- response.text,
- )[0]
+ response.text)[0]
headers = {
- "authority": "chatgpt.ai",
- "accept": "*/*",
- "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
- "cache-control": "no-cache",
- "origin": "https://chatgpt.ai",
- "pragma": "no-cache",
- "referer": "https://chatgpt.ai/gpt-4/",
- "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Windows"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "authority" : "chatgpt.ai",
+ "accept" : "*/*",
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "cache-control" : "no-cache",
+ "origin" : "https://chatgpt.ai",
+ "pragma" : "no-cache",
+ "referer" : "https://chatgpt.ai/gpt-4/",
+ "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile" : "?0",
+ "sec-ch-ua-platform" : '"Windows"',
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
}
data = {
- "_wpnonce": nonce,
- "post_id": post_id,
- "url": "https://chatgpt.ai/gpt-4",
- "action": "wpaicg_chat_shortcode_message",
- "message": chat,
- "bot_id": bot_id,
+ "_wpnonce" : nonce,
+ "post_id" : post_id,
+ "url" : "https://chatgpt.ai/gpt-4",
+ "action" : "wpaicg_chat_shortcode_message",
+ "message" : chat,
+ "bot_id" : bot_id,
}
response = requests.post(
- "https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data
- )
+ "https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data)
+
response.raise_for_status()
- yield response.json()["data"]
+ yield response.json()["data"] \ No newline at end of file
diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py
index da9fda40..e4584d32 100644
--- a/g4f/Provider/ChatgptLogin.py
+++ b/g4f/Provider/ChatgptLogin.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import base64
import os
import re
@@ -9,61 +11,58 @@ from .base_provider import BaseProvider
class ChatgptLogin(BaseProvider):
- url = "https://opchatgpts.net"
+ url = "https://opchatgpts.net"
supports_gpt_35_turbo = True
- working = True
+ working = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
headers = {
- "authority": "chatgptlogin.ac",
- "accept": "*/*",
- "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
- "content-type": "application/json",
- "origin": "https://opchatgpts.net",
- "referer": "https://opchatgpts.net/chatgpt-free-use/",
- "sec-ch-ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Windows"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
- "x-wp-nonce": _get_nonce(),
+ "authority" : "chatgptlogin.ac",
+ "accept" : "*/*",
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "content-type" : "application/json",
+ "origin" : "https://opchatgpts.net",
+ "referer" : "https://opchatgpts.net/chatgpt-free-use/",
+ "sec-ch-ua" : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+ "sec-ch-ua-mobile" : "?0",
+ "sec-ch-ua-platform" : '"Windows"',
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "x-wp-nonce" : _get_nonce(),
}
conversation = _transform(messages)
json_data = {
- "env": "chatbot",
- "session": "N/A",
- "prompt": "Converse as if you were an AI assistant. Be friendly, creative.",
- "context": "Converse as if you were an AI assistant. Be friendly, creative.",
- "messages": conversation,
- "newMessage": messages[-1]["content"],
- "userName": '<div class="mwai-name-text">User:</div>',
- "aiName": '<div class="mwai-name-text">AI:</div>',
- "model": "gpt-3.5-turbo",
- "temperature": kwargs.get("temperature", 0.8),
- "maxTokens": 1024,
- "maxResults": 1,
- "apiKey": "",
- "service": "openai",
+ "env" : "chatbot",
+ "session" : "N/A",
+ "prompt" : "Converse as if you were an AI assistant. Be friendly, creative.",
+ "context" : "Converse as if you were an AI assistant. Be friendly, creative.",
+ "messages" : conversation,
+ "newMessage" : messages[-1]["content"],
+ "userName" : '<div class="mwai-name-text">User:</div>',
+ "aiName" : '<div class="mwai-name-text">AI:</div>',
+ "model" : "gpt-3.5-turbo",
+ "temperature" : kwargs.get("temperature", 0.8),
+ "maxTokens" : 1024,
+ "maxResults" : 1,
+ "apiKey" : "",
+ "service" : "openai",
"embeddingsIndex": "",
- "stop": "",
- "clientId": os.urandom(6).hex(),
+ "stop" : "",
+ "clientId" : os.urandom(6).hex()
}
- response = requests.post(
- "https://opchatgpts.net/wp-json/ai-chatbot/v1/chat",
- headers=headers,
- json=json_data,
- )
+ response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat",
+ headers=headers, json=json_data)
+
response.raise_for_status()
yield response.json()["reply"]
@@ -81,24 +80,21 @@ class ChatgptLogin(BaseProvider):
def _get_nonce() -> str:
- res = requests.get(
- "https://opchatgpts.net/chatgpt-free-use/",
- headers={
- "Referer": "https://opchatgpts.net/chatgpt-free-use/",
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
- },
- )
+ res = requests.get("https://opchatgpts.net/chatgpt-free-use/",
+ headers = {
+ "Referer" : "https://opchatgpts.net/chatgpt-free-use/",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"})
result = re.search(
r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">',
- res.text,
- )
+ res.text)
+
if result is None:
return ""
-
- src = result.group(1)
+
+ src = result.group(1)
decoded_string = base64.b64decode(src.split(",")[-1]).decode("utf-8")
- result = re.search(r"let restNonce = '(.*?)';", decoded_string)
+ result = re.search(r"let restNonce = '(.*?)';", decoded_string)
return "" if result is None else result.group(1)
@@ -106,11 +102,11 @@ def _get_nonce() -> str:
def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]:
return [
{
- "id": os.urandom(6).hex(),
- "role": message["role"],
+ "id" : os.urandom(6).hex(),
+ "role" : message["role"],
"content": message["content"],
- "who": "AI: " if message["role"] == "assistant" else "User: ",
- "html": _html_encode(message["content"]),
+ "who" : "AI: " if message["role"] == "assistant" else "User: ",
+ "html" : _html_encode(message["content"]),
}
for message in messages
]
@@ -118,14 +114,14 @@ def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]:
def _html_encode(string: str) -> str:
table = {
- '"': "&quot;",
- "'": "&#39;",
- "&": "&amp;",
- ">": "&gt;",
- "<": "&lt;",
+ '"' : "&quot;",
+ "'" : "&#39;",
+ "&" : "&amp;",
+ ">" : "&gt;",
+ "<" : "&lt;",
"\n": "<br>",
"\t": "&nbsp;&nbsp;&nbsp;&nbsp;",
- " ": "&nbsp;",
+ " " : "&nbsp;",
}
for key in table:
diff --git a/g4f/Provider/DeepAi.py b/g4f/Provider/DeepAi.py
index 01dc426d..feba6b41 100644
--- a/g4f/Provider/DeepAi.py
+++ b/g4f/Provider/DeepAi.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import json
import js2py
@@ -8,19 +10,17 @@ from .base_provider import BaseProvider
class DeepAi(BaseProvider):
- url = "https://deepai.org"
- working = True
- supports_stream = True
+ url: str = "https://deepai.org"
+ working = True
+ supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- url = "https://api.deepai.org/make_me_a_pizza"
+ stream: bool, **kwargs: Any) -> CreateResult:
+
token_js = """
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
@@ -54,7 +54,9 @@ f = function () {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
}
- response = requests.post(url, headers=headers, data=payload, stream=True)
+ response = requests.post("https://api.deepai.org/make_me_a_pizza",
+ headers=headers, data=payload, stream=True)
+
for chunk in response.iter_content(chunk_size=None):
response.raise_for_status()
yield chunk.decode()
diff --git a/g4f/Provider/DfeHub.py b/g4f/Provider/DfeHub.py
index 4093d0e4..d40e0380 100644
--- a/g4f/Provider/DfeHub.py
+++ b/g4f/Provider/DfeHub.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import json
import re
import time
@@ -9,48 +11,45 @@ from .base_provider import BaseProvider
class DfeHub(BaseProvider):
- url = "https://chat.dfehub.com/api/chat"
- supports_stream = True
+ url = "https://chat.dfehub.com/"
+ supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
headers = {
- "authority": "chat.dfehub.com",
- "accept": "*/*",
- "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
- "content-type": "application/json",
- "origin": "https://chat.dfehub.com",
- "referer": "https://chat.dfehub.com/",
- "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- "sec-ch-ua-mobile": "?0",
+ "authority" : "chat.dfehub.com",
+ "accept" : "*/*",
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "content-type" : "application/json",
+ "origin" : "https://chat.dfehub.com",
+ "referer" : "https://chat.dfehub.com/",
+ "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": '"macOS"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
- "x-requested-with": "XMLHttpRequest",
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "x-requested-with" : "XMLHttpRequest",
}
json_data = {
- "messages": messages,
- "model": "gpt-3.5-turbo",
- "temperature": kwargs.get("temperature", 0.5),
- "presence_penalty": kwargs.get("presence_penalty", 0),
- "frequency_penalty": kwargs.get("frequency_penalty", 0),
- "top_p": kwargs.get("top_p", 1),
- "stream": True,
+ "messages" : messages,
+ "model" : "gpt-3.5-turbo",
+ "temperature" : kwargs.get("temperature", 0.5),
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
+ "top_p" : kwargs.get("top_p", 1),
+ "stream" : True
}
- response = requests.post(
- "https://chat.dfehub.com/api/openai/v1/chat/completions",
- headers=headers,
- json=json_data,
- )
+
+ response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
+ headers=headers, json=json_data, timeout=3)
for chunk in response.iter_lines():
if b"detail" in chunk:
diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py
index 2a61346c..946d4a4d 100644
--- a/g4f/Provider/EasyChat.py
+++ b/g4f/Provider/EasyChat.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
import json
+import random
import requests
@@ -7,18 +10,17 @@ from .base_provider import BaseProvider
class EasyChat(BaseProvider):
- url = "https://free.easychat.work"
- supports_stream = True
+ url: str = "https://free.easychat.work"
+ supports_stream = True
supports_gpt_35_turbo = True
- working = True
+ working = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
active_servers = [
"https://chat10.fastgpt.me",
"https://chat9.fastgpt.me",
@@ -28,64 +30,69 @@ class EasyChat(BaseProvider):
"https://chat4.fastgpt.me",
"https://gxos1h1ddt.fastgpt.me"
]
- server = active_servers[kwargs.get("active_server", 0)]
+
+ server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
headers = {
- "authority": f"{server}".replace("https://", ""),
- "accept": "text/event-stream",
- "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
- "content-type": "application/json",
- "origin": f"{server}",
- "referer": f"{server}/",
- "x-requested-with": "XMLHttpRequest",
- 'plugins': '0',
- 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
- 'sec-ch-ua-mobile': '?0',
+ "authority" : f"{server}".replace("https://", ""),
+ "accept" : "text/event-stream",
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
+ "content-type" : "application/json",
+ "origin" : f"{server}",
+ "referer" : f"{server}/",
+ "x-requested-with" : "XMLHttpRequest",
+ 'plugins' : '0',
+ 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+ 'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
- 'usesearch': 'false',
- 'x-requested-with': 'XMLHttpRequest'
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest'
}
json_data = {
- "messages": messages,
- "stream": stream,
- "model": model,
- "temperature": kwargs.get("temperature", 0.5),
- "presence_penalty": kwargs.get("presence_penalty", 0),
- "frequency_penalty": kwargs.get("frequency_penalty", 0),
- "top_p": kwargs.get("top_p", 1),
+ "messages" : messages,
+ "stream" : stream,
+ "model" : model,
+ "temperature" : kwargs.get("temperature", 0.5),
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
+ "top_p" : kwargs.get("top_p", 1)
}
session = requests.Session()
# init cookies from server
session.get(f"{server}/")
- response = session.post(
- f"{server}/api/openai/v1/chat/completions",
- headers=headers,
- json=json_data,
- stream=stream,
- )
+ response = session.post(f"{server}/api/openai/v1/chat/completions",
+ headers=headers, json=json_data, stream=stream)
+
if response.status_code == 200:
+
if stream == False:
json_data = response.json()
+
if "choices" in json_data:
yield json_data["choices"][0]["message"]["content"]
else:
- yield Exception("No response from server")
+ raise Exception("No response from server")
+
else:
for chunk in response.iter_lines():
+
if b"content" in chunk:
- splitData = chunk.decode().split("data: ")
+ splitData = chunk.decode().split("data:")
+
if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
+ else:
+ continue
else:
- yield Exception(f"Error {response.status_code} from server")
-
+ raise Exception(f"Error {response.status_code} from server : {response.reason}")
+
@classmethod
@property
diff --git a/g4f/Provider/Equing.py b/g4f/Provider/Equing.py
index 90c865d9..0ebb93a5 100644
--- a/g4f/Provider/Equing.py
+++ b/g4f/Provider/Equing.py
@@ -1,58 +1,62 @@
-import requests, json
+from __future__ import annotations
+
+import json
from abc import ABC, abstractmethod
+import requests
+
from ..typing import Any, CreateResult
class Equing(ABC):
- url: str = 'https://next.eqing.tech/'
- working = True
- needs_auth = False
- supports_stream = True
+ url: str = 'https://next.eqing.tech/'
+ working = True
+ needs_auth = False
+ supports_stream = True
supports_gpt_35_turbo = True
- supports_gpt_4 = False
+ supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
headers = {
- 'authority': 'next.eqing.tech',
- 'accept': 'text/event-stream',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'origin': 'https://next.eqing.tech',
- 'plugins': '0',
- 'pragma': 'no-cache',
- 'referer': 'https://next.eqing.tech/',
- 'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
- 'sec-ch-ua-mobile': '?0',
+ 'authority' : 'next.eqing.tech',
+ 'accept' : 'text/event-stream',
+ 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control' : 'no-cache',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://next.eqing.tech',
+ 'plugins' : '0',
+ 'pragma' : 'no-cache',
+ 'referer' : 'https://next.eqing.tech/',
+ 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
+ 'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
- 'usesearch': 'false',
- 'x-requested-with': 'XMLHttpRequest',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest'
}
json_data = {
- 'messages': messages,
- 'stream': stream,
- 'model': model,
- 'temperature': kwargs.get('temperature', 0.5),
- 'presence_penalty': kwargs.get('presence_penalty', 0),
- 'frequency_penalty': kwargs.get('frequency_penalty', 0),
- 'top_p': kwargs.get('top_p', 1),
+ 'messages' : messages,
+ 'stream' : stream,
+ 'model' : model,
+ 'temperature' : kwargs.get('temperature', 0.5),
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'top_p' : kwargs.get('top_p', 1),
}
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
+
if not stream:
yield response.json()["choices"][0]["message"]["content"]
return
diff --git a/g4f/Provider/FastGpt.py b/g4f/Provider/FastGpt.py
index 950abab1..ef47f752 100644
--- a/g4f/Provider/FastGpt.py
+++ b/g4f/Provider/FastGpt.py
@@ -1,55 +1,58 @@
-import requests, json, random
+from __future__ import annotations
+
+import json
+import random
from abc import ABC, abstractmethod
+import requests
+
from ..typing import Any, CreateResult
class FastGpt(ABC):
- url: str = 'https://chat9.fastgpt.me/'
- working = True
- needs_auth = False
- supports_stream = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = False
+ url: str = 'https://chat9.fastgpt.me/'
+ working = False
+ needs_auth = False
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
headers = {
- 'authority': 'chat9.fastgpt.me',
- 'accept': 'text/event-stream',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- # 'cookie': 'cf_clearance=idIAwtoSCn0uCzcWLGuD.KtiAJv9a1GsPduEOqIkyHU-1692278595-0-1-cb11fd7a.ab1546d4.ccf35fd7-0.2.1692278595; Hm_lvt_563fb31e93813a8a7094966df6671d3f=1691966491,1692278597; Hm_lpvt_563fb31e93813a8a7094966df6671d3f=1692278597',
- 'origin': 'https://chat9.fastgpt.me',
- 'plugins': '0',
- 'pragma': 'no-cache',
- 'referer': 'https://chat9.fastgpt.me/',
- 'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
- 'sec-ch-ua-mobile': '?0',
+ 'authority' : 'chat9.fastgpt.me',
+ 'accept' : 'text/event-stream',
+ 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control' : 'no-cache',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://chat9.fastgpt.me',
+ 'plugins' : '0',
+ 'pragma' : 'no-cache',
+ 'referer' : 'https://chat9.fastgpt.me/',
+ 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
+ 'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
- 'usesearch': 'false',
- 'x-requested-with': 'XMLHttpRequest',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest',
}
json_data = {
- 'messages': messages,
- 'stream': stream,
- 'model': model,
- 'temperature': kwargs.get('temperature', 0.5),
- 'presence_penalty': kwargs.get('presence_penalty', 0),
- 'frequency_penalty': kwargs.get('frequency_penalty', 0),
- 'top_p': kwargs.get('top_p', 1),
+ 'messages' : messages,
+ 'stream' : stream,
+ 'model' : model,
+ 'temperature' : kwargs.get('temperature', 0.5),
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'top_p' : kwargs.get('top_p', 1),
}
subdomain = random.choice([
@@ -58,7 +61,7 @@ class FastGpt(ABC):
])
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
- headers=headers, json=json_data, stream=stream)
+ headers=headers, json=json_data, stream=stream)
for line in response.iter_lines():
if line:
diff --git a/g4f/Provider/Forefront.py b/g4f/Provider/Forefront.py
index 76f6c780..8f51fb57 100644
--- a/g4f/Provider/Forefront.py
+++ b/g4f/Provider/Forefront.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import json
import requests
@@ -7,34 +9,31 @@ from .base_provider import BaseProvider
class Forefront(BaseProvider):
- url = "https://forefront.com"
- supports_stream = True
+ url = "https://forefront.com"
+ supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
json_data = {
- "text": messages[-1]["content"],
- "action": "noauth",
- "id": "",
- "parentId": "",
- "workspaceId": "",
+ "text" : messages[-1]["content"],
+ "action" : "noauth",
+ "id" : "",
+ "parentId" : "",
+ "workspaceId" : "",
"messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
- "model": "gpt-4",
- "messages": messages[:-1] if len(messages) > 1 else [],
- "internetMode": "auto",
+ "model" : "gpt-4",
+ "messages" : messages[:-1] if len(messages) > 1 else [],
+ "internetMode" : "auto",
}
- response = requests.post(
- "https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
- json=json_data,
- stream=True,
- )
+ response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
+ json=json_data, stream=True)
+
response.raise_for_status()
for token in response.iter_lines():
if b"delta" in token:
diff --git a/g4f/Provider/GetGpt.py b/g4f/Provider/GetGpt.py
index fb581ecb..74e772b0 100644
--- a/g4f/Provider/GetGpt.py
+++ b/g4f/Provider/GetGpt.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import json
import os
import uuid
@@ -10,78 +12,77 @@ from .base_provider import BaseProvider
class GetGpt(BaseProvider):
- url = "https://chat.getgpt.world/"
- supports_stream = True
- working = True
+ url = 'https://chat.getgpt.world/'
+ supports_stream = True
+ working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
headers = {
- "Content-Type": "application/json",
- "Referer": "https://chat.getgpt.world/",
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ 'Content-Type' : 'application/json',
+ 'Referer' : 'https://chat.getgpt.world/',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
}
+
data = json.dumps(
{
- "messages": messages,
- "frequency_penalty": kwargs.get("frequency_penalty", 0),
- "max_tokens": kwargs.get("max_tokens", 4000),
- "model": "gpt-3.5-turbo",
- "presence_penalty": kwargs.get("presence_penalty", 0),
- "temperature": kwargs.get("temperature", 1),
- "top_p": kwargs.get("top_p", 1),
- "stream": True,
- "uuid": str(uuid.uuid4()),
+ 'messages' : messages,
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'max_tokens' : kwargs.get('max_tokens', 4000),
+ 'model' : 'gpt-3.5-turbo',
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'temperature' : kwargs.get('temperature', 1),
+ 'top_p' : kwargs.get('top_p', 1),
+ 'stream' : True,
+ 'uuid' : str(uuid.uuid4())
}
)
- res = requests.post(
- "https://chat.getgpt.world/api/chat/stream",
- headers=headers,
- json={"signature": _encrypt(data)},
- stream=True,
- )
+ res = requests.post('https://chat.getgpt.world/api/chat/stream',
+ headers=headers, json={'signature': _encrypt(data)}, stream=True)
res.raise_for_status()
for line in res.iter_lines():
- if b"content" in line:
- line_json = json.loads(line.decode("utf-8").split("data: ")[1])
- yield (line_json["choices"][0]["delta"]["content"])
+ if b'content' in line:
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ yield (line_json['choices'][0]['delta']['content'])
@classmethod
@property
def params(cls):
params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("presence_penalty", "int"),
- ("frequency_penalty", "int"),
- ("top_p", "int"),
- ("max_tokens", "int"),
+ ('model', 'str'),
+ ('messages', 'list[dict[str, str]]'),
+ ('stream', 'bool'),
+ ('temperature', 'float'),
+ ('presence_penalty', 'int'),
+ ('frequency_penalty', 'int'),
+ ('top_p', 'int'),
+ ('max_tokens', 'int'),
]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ param = ', '.join([': '.join(p) for p in params])
+ return f'g4f.provider.{cls.__name__} supports: ({param})'
def _encrypt(e: str):
- t = os.urandom(8).hex().encode("utf-8")
- n = os.urandom(8).hex().encode("utf-8")
- r = e.encode("utf-8")
- cipher = AES.new(t, AES.MODE_CBC, n)
+ t = os.urandom(8).hex().encode('utf-8')
+ n = os.urandom(8).hex().encode('utf-8')
+ r = e.encode('utf-8')
+
+ cipher = AES.new(t, AES.MODE_CBC, n)
ciphertext = cipher.encrypt(_pad_data(r))
- return ciphertext.hex() + t.decode("utf-8") + n.decode("utf-8")
+
+ return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
def _pad_data(data: bytes) -> bytes:
- block_size = AES.block_size
+ block_size = AES.block_size
padding_size = block_size - len(data) % block_size
- padding = bytes([padding_size] * padding_size)
+ padding = bytes([padding_size] * padding_size)
+
return data + padding
diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py
index f9b799bb..b62da977 100644
--- a/g4f/Provider/H2o.py
+++ b/g4f/Provider/H2o.py
@@ -1,86 +1,88 @@
+from __future__ import annotations
+
import json
import uuid
-import requests
+from aiohttp import ClientSession
-from ..typing import Any, CreateResult
-from .base_provider import BaseProvider
+from ..typing import AsyncGenerator
+from .base_provider import AsyncGeneratorProvider, format_prompt
-class H2o(BaseProvider):
+class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai"
working = True
supports_stream = True
+ model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
- @staticmethod
- def create_completion(
+ @classmethod
+ async def create_async_generator(
+ cls,
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- conversation = ""
- for message in messages:
- conversation += "%s: %s\n" % (message["role"], message["content"])
- conversation += "assistant: "
-
- session = requests.Session()
-
- headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"}
- data = {
- "ethicsModalAccepted": "true",
- "shareConversationsWithModelAuthors": "true",
- "ethicsModalAcceptedAt": "",
- "activeModel": model,
- "searchEnabled": "true",
- }
- session.post(
- "https://gpt-gm.h2o.ai/settings",
- headers=headers,
- data=data,
- )
-
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncGenerator:
+ model = model if model else cls.model
headers = {"Referer": "https://gpt-gm.h2o.ai/"}
- data = {"model": model}
-
- response = session.post(
- "https://gpt-gm.h2o.ai/conversation",
- headers=headers,
- json=data,
- )
- conversation_id = response.json()["conversationId"]
- data = {
- "inputs": conversation,
- "parameters": {
- "temperature": kwargs.get("temperature", 0.4),
- "truncate": kwargs.get("truncate", 2048),
- "max_new_tokens": kwargs.get("max_new_tokens", 1024),
- "do_sample": kwargs.get("do_sample", True),
- "repetition_penalty": kwargs.get("repetition_penalty", 1.2),
- "return_full_text": kwargs.get("return_full_text", False),
- },
- "stream": True,
- "options": {
- "id": kwargs.get("id", str(uuid.uuid4())),
- "response_id": kwargs.get("response_id", str(uuid.uuid4())),
- "is_retry": False,
- "use_cache": False,
- "web_search_id": "",
- },
- }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ data = {
+ "ethicsModalAccepted": "true",
+ "shareConversationsWithModelAuthors": "true",
+ "ethicsModalAcceptedAt": "",
+ "activeModel": model,
+ "searchEnabled": "true",
+ }
+ async with session.post(
+ "https://gpt-gm.h2o.ai/settings",
+ proxy=proxy,
+ data=data
+ ) as response:
+ response.raise_for_status()
- response = session.post(
- f"https://gpt-gm.h2o.ai/conversation/{conversation_id}",
- headers=headers,
- json=data,
- )
- response.raise_for_status()
- response.encoding = "utf-8"
- generated_text = response.text.replace("\n", "").split("data:")
- generated_text = json.loads(generated_text[-1])
+ async with session.post(
+ "https://gpt-gm.h2o.ai/conversation",
+ proxy=proxy,
+ json={"model": model},
+ ) as response:
+ response.raise_for_status()
+ conversationId = (await response.json())["conversationId"]
- yield generated_text["generated_text"]
+ data = {
+ "inputs": format_prompt(messages),
+ "parameters": {
+ "temperature": 0.4,
+ "truncate": 2048,
+ "max_new_tokens": 1024,
+ "do_sample": True,
+ "repetition_penalty": 1.2,
+ "return_full_text": False,
+ **kwargs
+ },
+ "stream": True,
+ "options": {
+ "id": str(uuid.uuid4()),
+ "response_id": str(uuid.uuid4()),
+ "is_retry": False,
+ "use_cache": False,
+ "web_search_id": "",
+ },
+ }
+ async with session.post(
+ f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
+ proxy=proxy,
+ json=data
+ ) as response:
+ start = "data:"
+ async for line in response.content:
+ line = line.decode("utf-8")
+ if line and line.startswith(start):
+ line = json.loads(line[len(start):-1])
+ if not line["token"]["special"]:
+ yield line["token"]["text"]
@classmethod
@property
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
new file mode 100644
index 00000000..7b62b342
--- /dev/null
+++ b/g4f/Provider/HuggingChat.py
@@ -0,0 +1,110 @@
+from __future__ import annotations
+
+import json
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncGenerator
+from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
+
+
+class HuggingChat(AsyncGeneratorProvider):
+ url = "https://huggingface.co/chat/"
+ needs_auth = True
+ working = True
+ model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = True,
+ proxy: str = None,
+ cookies: dict = None,
+ **kwargs
+ ) -> AsyncGenerator:
+ if not cookies:
+ cookies = get_cookies(".huggingface.co")
+ model = model if model else cls.model
+ if proxy and "://" not in proxy:
+ proxy = f"http://{proxy}"
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
+ }
+ async with ClientSession(
+ cookies=cookies,
+ headers=headers
+ ) as session:
+ async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
+ conversation_id = (await response.json())["conversationId"]
+
+ send = {
+ "inputs": format_prompt(messages),
+ "parameters": {
+ "temperature": 0.2,
+ "truncate": 1000,
+ "max_new_tokens": 1024,
+ "stop": ["</s>"],
+ "top_p": 0.95,
+ "repetition_penalty": 1.2,
+ "top_k": 50,
+ "return_full_text": False,
+ **kwargs
+ },
+ "stream": stream,
+ "options": {
+ "id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
+ "response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
+ "is_retry": False,
+ "use_cache": False,
+ "web_search_id": ""
+ }
+ }
+ start = "data:"
+ first = True
+ async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
+ async for line in response.content:
+ line = line.decode("utf-8")
+ if not line:
+ continue
+ if not stream:
+ try:
+ data = json.loads(line)
+ except json.decoder.JSONDecodeError:
+ raise RuntimeError(f"No json: {line}")
+ if "error" in data:
+ raise RuntimeError(data["error"])
+ elif isinstance(data, list):
+ yield data[0]["generated_text"]
+ else:
+ raise RuntimeError(f"Response: {line}")
+ elif line.startswith(start):
+ line = json.loads(line[len(start):-1])
+ if not line:
+ continue
+ if "token" not in line:
+ raise RuntimeError(f"Response: {line}")
+ if not line["token"]["special"]:
+ if first:
+ yield line["token"]["text"].lstrip()
+ first = False
+ else:
+ yield line["token"]["text"]
+
+ async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
+ response.raise_for_status()
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index a969b643..2360c8a5 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -1,64 +1,81 @@
+from __future__ import annotations
+
+import json
import uuid
-import requests
+from aiohttp import ClientSession
-from ..typing import Any, CreateResult
-from .base_provider import BaseProvider
+from ..typing import AsyncGenerator
+from .base_provider import AsyncGeneratorProvider
+models = {
+ "gpt-4": {
+ "id": "gpt-4",
+ "name": "GPT-4",
+ "maxLength": 24000,
+ "tokenLimit": 8000,
+ },
+ "gpt-3.5-turbo": {
+ "id": "gpt-3.5-turbo",
+ "name": "GPT-3.5",
+ "maxLength": 12000,
+ "tokenLimit": 4000,
+ },
+ "gpt-3.5-turbo-16k": {
+ "id": "gpt-3.5-turbo-16k",
+ "name": "GPT-3.5-16k",
+ "maxLength": 48000,
+ "tokenLimit": 16000,
+ },
+}
-class Liaobots(BaseProvider):
+class Liaobots(AsyncGeneratorProvider):
url = "https://liaobots.com"
+ working = True
supports_stream = True
- needs_auth = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
+ _auth_code = None
- @staticmethod
- def create_completion(
+ @classmethod
+ async def create_async_generator(
+ cls,
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ auth: str = None,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncGenerator:
+ if proxy and "://" not in proxy:
+ proxy = f"http://{proxy}"
headers = {
"authority": "liaobots.com",
"content-type": "application/json",
"origin": "https://liaobots.com",
"referer": "https://liaobots.com/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
- "x-auth-code": str(kwargs.get("auth")),
- }
- models = {
- "gpt-4": {
- "id": "gpt-4",
- "name": "GPT-4",
- "maxLength": 24000,
- "tokenLimit": 8000,
- },
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5",
- "maxLength": 12000,
- "tokenLimit": 4000,
- },
- }
- json_data = {
- "conversationId": str(uuid.uuid4()),
- "model": models[model],
- "messages": messages,
- "key": "",
- "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
}
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ model = model if model in models else "gpt-3.5-turbo"
+ auth_code = auth if isinstance(auth, str) else cls._auth_code
+ if not auth_code:
+ async with session.post("https://liaobots.com/api/user", proxy=proxy, json={"authcode": ""}) as response:
+ response.raise_for_status()
+ auth_code = cls._auth_code = json.loads((await response.text()))["authCode"]
+ data = {
+ "conversationId": str(uuid.uuid4()),
+ "model": models[model],
+ "messages": messages,
+ "key": "",
+ "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
+ }
+ async with session.post("https://liaobots.com/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ yield line.decode("utf-8")
- response = requests.post(
- "https://liaobots.com/api/chat",
- headers=headers,
- json=json_data,
- stream=True,
- )
- response.raise_for_status()
- for token in response.iter_content(chunk_size=2046):
- yield token.decode("utf-8")
@classmethod
@property
@@ -67,6 +84,7 @@ class Liaobots(BaseProvider):
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
+ ("proxy", "str"),
("auth", "str"),
]
param = ", ".join([": ".join(p) for p in params])
diff --git a/g4f/Provider/Lockchat.py b/g4f/Provider/Lockchat.py
index 974d1331..c15eec8d 100644
--- a/g4f/Provider/Lockchat.py
+++ b/g4f/Provider/Lockchat.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import json
import requests
@@ -7,46 +9,42 @@ from .base_provider import BaseProvider
class Lockchat(BaseProvider):
- url = "http://supertest.lockchat.app"
- supports_stream = True
+ url: str = "http://supertest.lockchat.app"
+ supports_stream = True
supports_gpt_35_turbo = True
- supports_gpt_4 = True
+ supports_gpt_4 = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
temperature = float(kwargs.get("temperature", 0.7))
payload = {
"temperature": temperature,
- "messages": messages,
- "model": model,
- "stream": True,
+ "messages" : messages,
+ "model" : model,
+ "stream" : True,
}
headers = {
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
}
- response = requests.post(
- "http://supertest.lockchat.app/v1/chat/completions",
- json=payload,
- headers=headers,
- stream=True,
- )
+ response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
+ json=payload, headers=headers, stream=True)
+
response.raise_for_status()
for token in response.iter_lines():
if b"The model: `gpt-4` does not exist" in token:
print("error, retrying...")
Lockchat.create_completion(
- model=model,
- messages=messages,
- stream=stream,
- temperature=temperature,
- **kwargs,
- )
+ model = model,
+ messages = messages,
+ stream = stream,
+ temperature = temperature,
+ **kwargs)
+
if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
token = token["choices"][0]["delta"].get("content")
diff --git a/g4f/Provider/Opchatgpts.py b/g4f/Provider/Opchatgpts.py
index 9daa0ed9..241646f0 100644
--- a/g4f/Provider/Opchatgpts.py
+++ b/g4f/Provider/Opchatgpts.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import requests
from ..typing import Any, CreateResult
@@ -5,33 +7,30 @@ from .base_provider import BaseProvider
class Opchatgpts(BaseProvider):
- url = "https://opchatgpts.net"
- working = True
+ url = "https://opchatgpts.net"
+ working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- temperature = kwargs.get("temperature", 0.8)
- max_tokens = kwargs.get("max_tokens", 1024)
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ temperature = kwargs.get("temperature", 0.8)
+ max_tokens = kwargs.get("max_tokens", 1024)
system_prompt = kwargs.get(
"system_prompt",
- "Converse as if you were an AI assistant. Be friendly, creative.",
- )
+ "Converse as if you were an AI assistant. Be friendly, creative.")
+
payload = _create_payload(
- messages=messages,
- temperature=temperature,
- max_tokens=max_tokens,
- system_prompt=system_prompt,
- )
-
- response = requests.post(
- "https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload
- )
+ messages = messages,
+ temperature = temperature,
+ max_tokens = max_tokens,
+ system_prompt = system_prompt)
+
+ response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload)
+
response.raise_for_status()
yield response.json()["reply"]
@@ -39,24 +38,23 @@ class Opchatgpts(BaseProvider):
def _create_payload(
messages: list[dict[str, str]],
temperature: float,
- max_tokens: int,
- system_prompt: str,
-):
+ max_tokens: int, system_prompt: str) -> dict:
+
return {
- "env": "chatbot",
- "session": "N/A",
- "prompt": "\n",
- "context": system_prompt,
- "messages": messages,
- "newMessage": messages[::-1][0]["content"],
- "userName": '<div class="mwai-name-text">User:</div>',
- "aiName": '<div class="mwai-name-text">AI:</div>',
- "model": "gpt-3.5-turbo",
- "temperature": temperature,
- "maxTokens": max_tokens,
- "maxResults": 1,
- "apiKey": "",
- "service": "openai",
- "embeddingsIndex": "",
- "stop": "",
+ "env" : "chatbot",
+ "session" : "N/A",
+ "prompt" : "\n",
+ "context" : system_prompt,
+ "messages" : messages,
+ "newMessage" : messages[::-1][0]["content"],
+ "userName" : '<div class="mwai-name-text">User:</div>',
+ "aiName" : '<div class="mwai-name-text">AI:</div>',
+ "model" : "gpt-3.5-turbo",
+ "temperature" : temperature,
+ "maxTokens" : max_tokens,
+ "maxResults" : 1,
+ "apiKey" : "",
+ "service" : "openai",
+ "embeddingsIndex" : "",
+ "stop" : "",
}
diff --git a/g4f/Provider/OpenAssistant.py b/g4f/Provider/OpenAssistant.py
new file mode 100644
index 00000000..3a931597
--- /dev/null
+++ b/g4f/Provider/OpenAssistant.py
@@ -0,0 +1,102 @@
+from __future__ import annotations
+
+import json
+
+from aiohttp import ClientSession
+
+from ..typing import Any, AsyncGenerator
+from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
+
+
+class OpenAssistant(AsyncGeneratorProvider):
+ url = "https://open-assistant.io/chat"
+ needs_auth = True
+ working = True
+ model = "OA_SFT_Llama_30B_6"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ cookies: dict = None,
+ **kwargs: Any
+ ) -> AsyncGenerator:
+ if proxy and "://" not in proxy:
+ proxy = f"http://{proxy}"
+ if not cookies:
+ cookies = get_cookies("open-assistant.io")
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
+ }
+ async with ClientSession(
+ cookies=cookies,
+ headers=headers
+ ) as session:
+ async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
+ chat_id = (await response.json())["id"]
+
+ data = {
+ "chat_id": chat_id,
+ "content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
+ "parent_id": None
+ }
+ async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
+ parent_id = (await response.json())["id"]
+
+ data = {
+ "chat_id": chat_id,
+ "parent_id": parent_id,
+ "model_config_name": model if model else cls.model,
+ "sampling_parameters":{
+ "top_k": 50,
+ "top_p": None,
+ "typical_p": None,
+ "temperature": 0.35,
+ "repetition_penalty": 1.1111111111111112,
+ "max_new_tokens": 1024,
+ **kwargs
+ },
+ "plugins":[]
+ }
+ async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
+ data = await response.json()
+ if "id" in data:
+ message_id = data["id"]
+ elif "message" in data:
+ raise RuntimeError(data["message"])
+ else:
+ response.raise_for_status()
+
+ params = {
+ 'chat_id': chat_id,
+ 'message_id': message_id,
+ }
+ async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
+ start = "data: "
+ async for line in response.content:
+ line = line.decode("utf-8")
+ if line and line.startswith(start):
+ line = json.loads(line[len(start):])
+ if line["event_type"] == "token":
+ yield line["text"]
+
+ params = {
+ 'chat_id': chat_id,
+ }
+ async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
+ response.raise_for_status()
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/OpenaiChat.py b/g4f/Provider/OpenaiChat.py
new file mode 100644
index 00000000..f2d1ed6f
--- /dev/null
+++ b/g4f/Provider/OpenaiChat.py
@@ -0,0 +1,86 @@
+from __future__ import annotations
+
+has_module = True
+try:
+ from revChatGPT.V1 import AsyncChatbot
+except ImportError:
+ has_module = False
+
+import json
+
+from httpx import AsyncClient
+
+from ..typing import AsyncGenerator
+from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
+
+
+class OpenaiChat(AsyncGeneratorProvider):
+ url = "https://chat.openai.com"
+ needs_auth = True
+ working = has_module
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+ _access_token = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ access_token: str = _access_token,
+ cookies: dict = None,
+ **kwargs: dict
+ ) -> AsyncGenerator:
+
+ config = {"access_token": access_token, "model": model}
+ if proxy:
+ if "://" not in proxy:
+ proxy = f"http://{proxy}"
+ config["proxy"] = proxy
+
+ bot = AsyncChatbot(
+ config=config
+ )
+
+ if not access_token:
+ cookies = cookies if cookies else get_cookies("chat.openai.com")
+ cls._access_token = await get_access_token(bot.session, cookies)
+ bot.set_access_token(cls._access_token)
+
+ returned = None
+ async for message in bot.ask(format_prompt(messages)):
+ message = message["message"]
+ if returned:
+ if message.startswith(returned):
+ new = message[len(returned):]
+ if new:
+ yield new
+ else:
+ yield message
+ returned = message
+
+ await bot.delete_conversation(bot.conversation_id)
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+async def get_access_token(session: AsyncClient, cookies: dict):
+ response = await session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
+ response.raise_for_status()
+ try:
+ return response.json()["accessToken"]
+ except json.decoder.JSONDecodeError:
+ raise RuntimeError(f"Response: {response.text}") \ No newline at end of file
diff --git a/g4f/Provider/Raycast.py b/g4f/Provider/Raycast.py
index 1f13c9fa..7ddc8acd 100644
--- a/g4f/Provider/Raycast.py
+++ b/g4f/Provider/Raycast.py
@@ -1,17 +1,20 @@
+from __future__ import annotations
+
import json
+
import requests
+
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
class Raycast(BaseProvider):
- url = "https://raycast.com"
- # model = ['gpt-3.5-turbo', 'gpt-4']
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
- supports_stream = True
- needs_auth = True
- working = True
+ url = "https://raycast.com"
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+ needs_auth = True
+ working = True
@staticmethod
def create_completion(
diff --git a/g4f/Provider/Theb.py b/g4f/Provider/Theb.py
index 09c94c24..72fce3ac 100644
--- a/g4f/Provider/Theb.py
+++ b/g4f/Provider/Theb.py
@@ -1,74 +1,75 @@
-import json,random,requests
-# from curl_cffi import requests
+from __future__ import annotations
+
+import json
+import random
+
+import requests
+
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
class Theb(BaseProvider):
- url = "https://theb.ai"
- working = True
- supports_stream = True
- supports_gpt_35_turbo = True
- needs_auth = True
+ url = "https://theb.ai"
+ working = True
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ needs_auth = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- conversation = ''
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
+ conversation += "\nassistant: "
- conversation += 'assistant: '
auth = kwargs.get("auth", {
"bearer_token":"free",
"org_id":"theb",
})
+
bearer_token = auth["bearer_token"]
- org_id = auth["org_id"]
+ org_id = auth["org_id"]
+
headers = {
- 'authority': 'beta.theb.ai',
- 'accept': 'text/event-stream',
- 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
- 'authorization': 'Bearer '+bearer_token,
- 'content-type': 'application/json',
- 'origin': 'https://beta.theb.ai',
- 'referer': 'https://beta.theb.ai/home',
- 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
- 'sec-ch-ua-mobile': '?0',
+ 'authority' : 'beta.theb.ai',
+ 'accept' : 'text/event-stream',
+ 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'authorization' : 'Bearer '+bearer_token,
+ 'content-type' : 'application/json',
+ 'origin' : 'https://beta.theb.ai',
+ 'referer' : 'https://beta.theb.ai/home',
+ 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+ 'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
- 'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
+ 'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8',
}
- # generate 10 random number
- # 0.1 - 0.9
+
req_rand = random.randint(100000000, 9999999999)
json_data: dict[str, Any] = {
- "text": conversation,
- "category": "04f58f64a4aa4191a957b47290fee864",
- "model": "ee8d4f29cb7047f78cbe84313ed6ace8",
+ "text" : conversation,
+ "category" : "04f58f64a4aa4191a957b47290fee864",
+ "model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
"model_params": {
- "system_prompt": "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
- "temperature": kwargs.get("temperature", 1),
- "top_p": kwargs.get("top_p", 1),
- "frequency_penalty": kwargs.get("frequency_penalty", 0),
- "presence_penalty": kwargs.get("presence_penalty", 0),
- "long_term_memory": "auto"
+ "system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
+ "temperature" : kwargs.get("temperature", 1),
+ "top_p" : kwargs.get("top_p", 1),
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
+ "long_term_memory" : "auto"
}
}
- response = requests.post(
- "https://beta.theb.ai/api/conversation?org_id="+org_id+"&req_rand="+str(req_rand),
- headers=headers,
- json=json_data,
- stream=True,
- )
+
+ response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
+ headers=headers, json=json_data, stream=True)
+
response.raise_for_status()
content = ""
next_content = ""
diff --git a/g4f/Provider/V50.py b/g4f/Provider/V50.py
index 125dd7c5..81a95ba8 100644
--- a/g4f/Provider/V50.py
+++ b/g4f/Provider/V50.py
@@ -1,52 +1,57 @@
-import uuid, requests
+from __future__ import annotations
+
+import uuid
+
+import requests
+
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
class V50(BaseProvider):
- url = 'https://p5.v50.ltd'
- supports_gpt_35_turbo = True
- supports_stream = False
- needs_auth = False
- working = True
+ url = 'https://p5.v50.ltd'
+ supports_gpt_35_turbo = True
+ supports_stream = False
+ needs_auth = False
+ working = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- conversation = ''
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
+ stream: bool, **kwargs: Any) -> CreateResult:
- conversation += 'assistant: '
+ conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
+ conversation += "\nassistant: "
+
payload = {
- "prompt": conversation,
- "options": {},
- "systemMessage": ".",
- "temperature": kwargs.get("temperature", 0.4),
- "top_p": kwargs.get("top_p", 0.4),
- "model": model,
- "user": str(uuid.uuid4())
+ "prompt" : conversation,
+ "options" : {},
+ "systemMessage" : ".",
+ "temperature" : kwargs.get("temperature", 0.4),
+ "top_p" : kwargs.get("top_p", 0.4),
+ "model" : model,
+ "user" : str(uuid.uuid4())
}
+
headers = {
- 'authority': 'p5.v50.ltd',
- 'accept': 'application/json, text/plain, */*',
- 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
- 'content-type': 'application/json',
- 'origin': 'https://p5.v50.ltd',
- 'referer': 'https://p5.v50.ltd/',
+ 'authority' : 'p5.v50.ltd',
+ 'accept' : 'application/json, text/plain, */*',
+ 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://p5.v50.ltd',
+ 'referer' : 'https://p5.v50.ltd/',
'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
response = requests.post("https://p5.v50.ltd/api/chat-process",
json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
- yield response.text
+
+ if "https://fk1.v50.ltd" not in response.text:
+ yield response.text
@classmethod
@property
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index 186662c4..8aaf5656 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import base64
import json
import uuid
@@ -10,17 +12,16 @@ from .base_provider import BaseProvider
class Vercel(BaseProvider):
- url = "https://play.vercel.ai"
- working = True
+ url = "https://play.vercel.ai"
+ working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
if model in ["gpt-3.5-turbo", "gpt-4"]:
model = "openai:" + model
yield _chat(model_id=model, messages=messages)
@@ -29,8 +30,8 @@ class Vercel(BaseProvider):
def _chat(model_id: str, messages: list[dict[str, str]]) -> str:
session = requests.Session(impersonate="chrome107")
- url = "https://sdk.vercel.ai/api/generate"
- header = _create_header(session)
+ url = "https://sdk.vercel.ai/api/generate"
+ header = _create_header(session)
payload = _create_payload(model_id, messages)
response = session.post(url=url, headers=header, json=payload)
@@ -44,15 +45,13 @@ def _create_payload(model_id: str, messages: list[dict[str, str]]) -> dict[str,
"messages": messages,
"playgroundId": str(uuid.uuid4()),
"chatIndex": 0,
- "model": model_id,
- } | default_params
+ "model": model_id} | default_params
def _create_header(session: requests.Session):
custom_encoding = _get_custom_encoding(session)
return {"custom-encoding": custom_encoding}
-
# based on https://github.com/ading2210/vercel-llm-api
def _get_custom_encoding(session: requests.Session):
url = "https://sdk.vercel.ai/openai.jpeg"
diff --git a/g4f/Provider/Wewordle.py b/g4f/Provider/Wewordle.py
index f7f47ee0..99c81a84 100644
--- a/g4f/Provider/Wewordle.py
+++ b/g4f/Provider/Wewordle.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import json
import random
import string
@@ -10,65 +12,62 @@ from .base_provider import BaseProvider
class Wewordle(BaseProvider):
- url = "https://wewordle.org/gptapi/v1/android/turbo"
- working = True
- supports_gpt_35_turbo = True
+ url = "https://wewordle.org/"
+ working = True
+ supports_gpt_35_turbo = True
- @staticmethod
+ @classmethod
def create_completion(
+ cls,
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- base = ""
-
- for message in messages:
- base += "%s: %s\n" % (message["role"], message["content"])
- base += "assistant:"
+ stream: bool, **kwargs: Any) -> CreateResult:
+
# randomize user id and app id
_user_id = "".join(
- random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)
- )
+ random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
+
_app_id = "".join(
- random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)
- )
+ random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
+
# make current date with format utc
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
headers = {
- "accept": "*/*",
- "pragma": "no-cache",
- "Content-Type": "application/json",
- "Connection": "keep-alive"
+ "accept" : "*/*",
+ "pragma" : "no-cache",
+ "Content-Type" : "application/json",
+ "Connection" : "keep-alive"
# user agent android client
# 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
}
+
data: dict[str, Any] = {
- "user": _user_id,
- "messages": [{"role": "user", "content": base}],
+ "user" : _user_id,
+ "messages" : messages,
"subscriber": {
- "originalPurchaseDate": None,
- "originalApplicationVersion": None,
- "allPurchaseDatesMillis": {},
- "entitlements": {"active": {}, "all": {}},
- "allPurchaseDates": {},
- "allExpirationDatesMillis": {},
- "allExpirationDates": {},
- "originalAppUserId": f"$RCAnonymousID:{_app_id}",
- "latestExpirationDate": None,
- "requestDate": _request_date,
- "latestExpirationDateMillis": None,
- "nonSubscriptionTransactions": [],
- "originalPurchaseDateMillis": None,
- "managementURL": None,
+ "originalPurchaseDate" : None,
+ "originalApplicationVersion" : None,
+ "allPurchaseDatesMillis" : {},
+ "entitlements" : {"active": {}, "all": {}},
+ "allPurchaseDates" : {},
+ "allExpirationDatesMillis" : {},
+ "allExpirationDates" : {},
+ "originalAppUserId" : f"$RCAnonymousID:{_app_id}",
+ "latestExpirationDate" : None,
+ "requestDate" : _request_date,
+ "latestExpirationDateMillis" : None,
+ "nonSubscriptionTransactions" : [],
+ "originalPurchaseDateMillis" : None,
+ "managementURL" : None,
"allPurchasedProductIdentifiers": [],
- "firstSeen": _request_date,
- "activeSubscriptions": [],
- },
+ "firstSeen" : _request_date,
+ "activeSubscriptions" : [],
+ }
}
- url = "https://wewordle.org/gptapi/v1/android/turbo"
- response = requests.post(url, headers=headers, data=json.dumps(data))
+ response = requests.post(f"{cls.url}gptapi/v1/android/turbo",
+ headers=headers, data=json.dumps(data))
+
response.raise_for_status()
_json = response.json()
if "message" in _json:
diff --git a/g4f/Provider/Wuguokai.py b/g4f/Provider/Wuguokai.py
new file mode 100644
index 00000000..a9614626
--- /dev/null
+++ b/g4f/Provider/Wuguokai.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+import random
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Wuguokai(BaseProvider):
+ url = 'https://chat.wuguokai.xyz'
+ supports_gpt_35_turbo = True
+ working = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ 'authority': 'ai-api.wuguokai.xyz',
+ 'accept': 'application/json, text/plain, */*',
+ 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.wuguokai.xyz',
+ 'referer': 'https://chat.wuguokai.xyz/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+ }
+ data ={
+ "prompt": base,
+ "options": {},
+ "userId": f"#/chat/{random.randint(1,99999999)}",
+ "usingContext": True
+ }
+ response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
+ _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
+ if response.status_code == 200:
+ if len(_split) > 1:
+ yield _split[1].strip()
+ else:
+ yield _split[0].strip()
+ else:
+ raise Exception(f"Error: {response.status_code} {response.reason}")
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool")
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 0d8114a8..121d1dbd 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -1,59 +1,40 @@
-import re
-import urllib.parse
+from __future__ import annotations
-from curl_cffi import requests
+import json
-from ..typing import Any, CreateResult
-from .base_provider import BaseProvider
+from aiohttp import ClientSession
+from ..typing import AsyncGenerator
+from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
-class You(BaseProvider):
+
+class You(AsyncGeneratorProvider):
url = "https://you.com"
working = True
supports_gpt_35_turbo = True
+ supports_stream = True
@staticmethod
- def create_completion(
+ async def create_async_generator(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- url_param = _create_url_param(messages)
- headers = _create_header()
- url = f"https://you.com/api/streamingSearch?{url_param}"
- response = requests.get(
- url,
- headers=headers,
- impersonate="chrome107",
- )
- response.raise_for_status()
- yield _parse_output(response.text)
-
-
-def _create_url_param(messages: list[dict[str, str]]):
- prompt = messages.pop()["content"]
- chat = _convert_chat(messages)
- param = {"q": prompt, "domain": "youchat", "chat": chat}
- return urllib.parse.urlencode(param)
-
-
-def _convert_chat(messages: list[dict[str, str]]):
- message_iter = iter(messages)
- return [
- {"question": user["content"], "answer": assistant["content"]}
- for user, assistant in zip(message_iter, message_iter)
- ]
-
-
-def _create_header():
- return {
- "accept": "text/event-stream",
- "referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
- }
-
-
-def _parse_output(output: str) -> str:
- regex = r"^data:\s{\"youChatToken\": \"(.*)\"}$"
- tokens = [token for token in re.findall(regex, output, re.MULTILINE)]
- return "".join(tokens)
+ cookies: dict = None,
+ **kwargs,
+ ) -> AsyncGenerator:
+ if not cookies:
+ cookies = get_cookies("you.com")
+ headers = {
+ "Accept": "text/event-stream",
+ "Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0"
+ }
+ async with ClientSession(headers=headers, cookies=cookies) as session:
+ async with session.get(
+ "https://you.com/api/streamingSearch",
+ params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
+ ) as response:
+ start = 'data: {"youChatToken": '
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith(start):
+ yield json.loads(line[len(start): -2]) \ No newline at end of file
diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py
index a3147c2d..731e4ecb 100644
--- a/g4f/Provider/Yqcloud.py
+++ b/g4f/Provider/Yqcloud.py
@@ -1,45 +1,45 @@
-import requests
+from __future__ import annotations
-from ..typing import Any, CreateResult
-from .base_provider import BaseProvider
+from aiohttp import ClientSession
+from .base_provider import AsyncProvider, format_prompt
-class Yqcloud(BaseProvider):
+
+class Yqcloud(AsyncProvider):
url = "https://chat9.yqcloud.top/"
working = True
supports_gpt_35_turbo = True
@staticmethod
- def create_completion(
+ async def create_async(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
- headers = _create_header()
- payload = _create_payload(messages)
-
- url = "https://api.aichatos.cloud/api/generateStream"
- response = requests.post(url=url, headers=headers, json=payload)
- response.raise_for_status()
- response.encoding = 'utf-8'
- yield response.text
+ proxy: str = None,
+ **kwargs,
+ ) -> str:
+ async with ClientSession(
+ headers=_create_header()
+ ) as session:
+ payload = _create_payload(messages)
+ async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
+ response.raise_for_status()
+ return await response.text()
def _create_header():
return {
- "accept": "application/json, text/plain, */*",
- "content-type": "application/json",
- "origin": "https://chat9.yqcloud.top",
+ "accept" : "application/json, text/plain, */*",
+ "content-type" : "application/json",
+ "origin" : "https://chat9.yqcloud.top",
}
def _create_payload(messages: list[dict[str, str]]):
- prompt = messages[-1]["content"]
return {
- "prompt": prompt,
+ "prompt": format_prompt(messages),
"network": True,
"system": "",
"withoutContext": False,
"stream": False,
+ "userId": "#/chat/1693025544336"
}
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index e27dee5d..fa1bdb87 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,59 +1,69 @@
-from .Acytoo import Acytoo
-from .Aichat import Aichat
-from .Ails import Ails
-from .AiService import AiService
-from .AItianhu import AItianhu
-from .Bard import Bard
-from .base_provider import BaseProvider
-from .Bing import Bing
-from .ChatgptAi import ChatgptAi
-from .ChatgptLogin import ChatgptLogin
-from .DeepAi import DeepAi
-from .DfeHub import DfeHub
-from .EasyChat import EasyChat
-from .Forefront import Forefront
-from .GetGpt import GetGpt
-from .H2o import H2o
-from .Liaobots import Liaobots
-from .Lockchat import Lockchat
-from .Opchatgpts import Opchatgpts
-from .Raycast import Raycast
-from .Theb import Theb
-from .Vercel import Vercel
-from .Wewordle import Wewordle
-from .You import You
-from .Yqcloud import Yqcloud
-from .Equing import Equing
-from .FastGpt import FastGpt
-from .V50 import V50
+from __future__ import annotations
+from .Acytoo import Acytoo
+from .Aichat import Aichat
+from .Ails import Ails
+from .AiService import AiService
+from .AItianhu import AItianhu
+from .Bard import Bard
+from .Bing import Bing
+from .ChatgptAi import ChatgptAi
+from .ChatgptLogin import ChatgptLogin
+from .DeepAi import DeepAi
+from .DfeHub import DfeHub
+from .EasyChat import EasyChat
+from .Forefront import Forefront
+from .GetGpt import GetGpt
+from .H2o import H2o
+from .HuggingChat import HuggingChat
+from .Liaobots import Liaobots
+from .Lockchat import Lockchat
+from .Opchatgpts import Opchatgpts
+from .OpenaiChat import OpenaiChat
+from .OpenAssistant import OpenAssistant
+from .Raycast import Raycast
+from .Theb import Theb
+from .Vercel import Vercel
+from .Wewordle import Wewordle
+from .You import You
+from .Yqcloud import Yqcloud
+from .Equing import Equing
+from .FastGpt import FastGpt
+from .V50 import V50
+from .Wuguokai import Wuguokai
+
+from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
__all__ = [
- "BaseProvider",
- "Acytoo",
- "Aichat",
- "Ails",
- "AiService",
- "AItianhu",
- "Bard",
- "Bing",
- "ChatgptAi",
- "ChatgptLogin",
- "DeepAi",
- "DfeHub",
- "EasyChat",
- "Forefront",
- "GetGpt",
- "H2o",
- "Liaobots",
- "Lockchat",
- "Opchatgpts",
- "Raycast",
- "Theb",
- "Vercel",
- "Wewordle",
- "You",
- "Yqcloud",
- "Equing",
- "FastGpt",
- "V50"
+ 'BaseProvider',
+ 'Acytoo',
+ 'Aichat',
+ 'Ails',
+ 'AiService',
+ 'AItianhu',
+ 'Bard',
+ 'Bing',
+ 'ChatgptAi',
+ 'ChatgptLogin',
+ 'DeepAi',
+ 'DfeHub',
+ 'EasyChat',
+ 'Forefront',
+ 'GetGpt',
+ 'H2o',
+ 'HuggingChat',
+ 'Liaobots',
+ 'Lockchat',
+ 'Opchatgpts',
+ 'Raycast',
+ 'OpenaiChat',
+ 'OpenAssistant',
+ 'Theb',
+ 'Vercel',
+ 'Wewordle',
+ 'You',
+ 'Yqcloud',
+ 'Equing',
+ 'FastGpt',
+ 'Wuguokai',
+ 'V50'
]
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 98ad3514..e667819a 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,24 +1,28 @@
+from __future__ import annotations
+
+import asyncio
from abc import ABC, abstractmethod
-from ..typing import Any, CreateResult
+import browser_cookie3
+
+from ..typing import Any, AsyncGenerator, CreateResult, Union
class BaseProvider(ABC):
url: str
- working = False
- needs_auth = False
- supports_stream = False
+ working = False
+ needs_auth = False
+ supports_stream = False
supports_gpt_35_turbo = False
- supports_gpt_4 = False
+ supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ stream: bool, **kwargs: Any) -> CreateResult:
+
raise NotImplementedError()
@classmethod
@@ -30,4 +34,89 @@ class BaseProvider(ABC):
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+_cookies = {}
+
+def get_cookies(cookie_domain: str) -> dict:
+ if cookie_domain not in _cookies:
+ _cookies[cookie_domain] = {}
+
+ for cookie in browser_cookie3.load(cookie_domain):
+ _cookies[cookie_domain][cookie.name] = cookie.value
+
+ return _cookies[cookie_domain]
+
+
+def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
+ if add_special_tokens or len(messages) > 1:
+ formatted = "\n".join(
+ ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
+ )
+ return f"{formatted}\nAssistant:"
+ else:
+ return messages.pop()["content"]
+
+
+
+class AsyncProvider(BaseProvider):
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = False, **kwargs: Any) -> CreateResult:
+
+ yield asyncio.run(cls.create_async(model, messages, **kwargs))
+
+ @staticmethod
+ @abstractmethod
+ async def create_async(
+ model: str,
+ messages: list[dict[str, str]], **kwargs: Any) -> str:
+ raise NotImplementedError()
+
+
+class AsyncGeneratorProvider(AsyncProvider):
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = True,
+ **kwargs
+ ) -> CreateResult:
+ yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs))
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> str:
+ chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)]
+ if chunks:
+ return "".join(chunks)
+
+ @staticmethod
+ @abstractmethod
+ def create_async_generator(
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> AsyncGenerator:
+ raise NotImplementedError()
+
+
+def run_generator(generator: AsyncGenerator[Union[Any, str], Any]):
+ loop = asyncio.new_event_loop()
+ gen = generator.__aiter__()
+
+ while True:
+ try:
+ yield loop.run_until_complete(gen.__anext__())
+
+ except StopAsyncIteration:
+ break
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 47d2a7a3..065acee6 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -1,45 +1,43 @@
-from . import models
-from .Provider import BaseProvider
-from .typing import Any, CreateResult, Union
+from __future__ import annotations
+from . import models
+from .Provider import BaseProvider
+from .typing import Any, CreateResult, Union
logging = False
-
class ChatCompletion:
@staticmethod
def create(
- model: Union[models.Model, str],
- messages: list[dict[str, str]],
- provider: Union[type[BaseProvider], None] = None,
- stream: bool = False,
- auth: Union[str, None] = None,
- **kwargs: Any,
- ) -> Union[CreateResult, str]:
+ model : Union[models.Model, str],
+ messages : list[dict[str, str]],
+ provider : Union[type[BaseProvider], None] = None,
+ stream : bool = False,
+ auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]:
+
if isinstance(model, str):
try:
model = models.ModelUtils.convert[model]
except KeyError:
- raise Exception(f"The model: {model} does not exist")
+ raise Exception(f'The model: {model} does not exist')
provider = model.best_provider if provider == None else provider
if not provider.working:
- raise Exception(f"{provider.__name__} is not working")
+ raise Exception(f'{provider.__name__} is not working')
if provider.needs_auth and not auth:
raise Exception(
- f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)'
- )
+ f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
+
if provider.needs_auth:
- kwargs["auth"] = auth
+ kwargs['auth'] = auth
if not provider.supports_stream and stream:
raise Exception(
- f"ValueError: {provider.__name__} does not support 'stream' argument"
- )
+ f'ValueError: {provider.__name__} does not support "stream" argument')
if logging:
- print(f"Using {provider.__name__} provider")
+ print(f'Using {provider.__name__} provider')
result = provider.create_completion(model.name, messages, stream, **kwargs)
- return result if stream else "".join(result)
+ return result if stream else ''.join(result)
diff --git a/g4f/models.py b/g4f/models.py
index 64951792..0c5eb961 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -1,225 +1,207 @@
+from __future__ import annotations
from dataclasses import dataclass
-
from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing
-
@dataclass
class Model:
name: str
base_provider: str
best_provider: type[BaseProvider]
+# Config for HuggingChat, OpenAssistant
+# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
+default = Model(
+ name="",
+ base_provider="huggingface",
+ best_provider=H2o,
+)
# GPT-3.5 / GPT-4
gpt_35_turbo = Model(
- name="gpt-3.5-turbo",
- base_provider="openai",
- best_provider=GetGpt,
-)
+ name = 'gpt-3.5-turbo',
+ base_provider = 'openai',
+ best_provider = GetGpt)
gpt_4 = Model(
- name="gpt-4",
- base_provider="openai",
- best_provider=Liaobots,
-)
+ name = 'gpt-4',
+ base_provider = 'openai',
+ best_provider = Liaobots)
# Bard
palm = Model(
- name="palm",
- base_provider="google",
- best_provider=Bard,
-)
+ name = 'palm',
+ base_provider = 'google',
+ best_provider = Bard)
# H2o
falcon_7b = Model(
- name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
- base_provider="huggingface",
- best_provider=H2o,
-)
+ name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
+ base_provider = 'huggingface',
+ best_provider = H2o)
falcon_40b = Model(
- name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
- base_provider="huggingface",
- best_provider=H2o,
-)
+ name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
+ base_provider = 'huggingface',
+ best_provider = H2o)
llama_13b = Model(
- name="h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b",
- base_provider="huggingface",
- best_provider=H2o,
-)
+ name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b',
+ base_provider = 'huggingface',
+ best_provider = H2o)
# Vercel
claude_instant_v1 = Model(
- name="anthropic:claude-instant-v1",
- base_provider="anthropic",
- best_provider=Vercel,
-)
+ name = 'anthropic:claude-instant-v1',
+ base_provider = 'anthropic',
+ best_provider = Vercel)
claude_v1 = Model(
- name="anthropic:claude-v1",
- base_provider="anthropic",
- best_provider=Vercel,
-)
+ name = 'anthropic:claude-v1',
+ base_provider = 'anthropic',
+ best_provider = Vercel)
claude_v2 = Model(
- name="anthropic:claude-v2",
- base_provider="anthropic",
- best_provider=Vercel,
-)
+ name = 'anthropic:claude-v2',
+ base_provider = 'anthropic',
+ best_provider = Vercel)
command_light_nightly = Model(
- name="cohere:command-light-nightly",
- base_provider="cohere",
- best_provider=Vercel,
-)
+ name = 'cohere:command-light-nightly',
+ base_provider = 'cohere',
+ best_provider = Vercel)
command_nightly = Model(
- name="cohere:command-nightly",
- base_provider="cohere",
- best_provider=Vercel,
-)
+ name = 'cohere:command-nightly',
+ base_provider = 'cohere',
+ best_provider = Vercel)
gpt_neox_20b = Model(
- name="huggingface:EleutherAI/gpt-neox-20b",
- base_provider="huggingface",
- best_provider=Vercel,
-)
+ name = 'huggingface:EleutherAI/gpt-neox-20b',
+ base_provider = 'huggingface',
+ best_provider = Vercel)
oasst_sft_1_pythia_12b = Model(
- name="huggingface:OpenAssistant/oasst-sft-1-pythia-12b",
- base_provider="huggingface",
- best_provider=Vercel,
-)
+ name = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
+ base_provider = 'huggingface',
+ best_provider = Vercel)
oasst_sft_4_pythia_12b_epoch_35 = Model(
- name="huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
- base_provider="huggingface",
- best_provider=Vercel,
-)
+ name = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
+ base_provider = 'huggingface',
+ best_provider = Vercel)
santacoder = Model(
- name="huggingface:bigcode/santacoder",
- base_provider="huggingface",
- best_provider=Vercel,
-)
+ name = 'huggingface:bigcode/santacoder',
+ base_provider = 'huggingface',
+ best_provider = Vercel)
bloom = Model(
- name="huggingface:bigscience/bloom",
- base_provider="huggingface",
- best_provider=Vercel,
-)
+ name = 'huggingface:bigscience/bloom',
+ base_provider = 'huggingface',
+ best_provider = Vercel)
flan_t5_xxl = Model(
- name="huggingface:google/flan-t5-xxl",
- base_provider="huggingface",
- best_provider=Vercel,
-)
+ name = 'huggingface:google/flan-t5-xxl',
+ base_provider = 'huggingface',
+ best_provider = Vercel)
code_davinci_002 = Model(
- name="openai:code-davinci-002",
- base_provider="openai",
- best_provider=Vercel,
-)
+ name = 'openai:code-davinci-002',
+ base_provider = 'openai',
+ best_provider = Vercel)
gpt_35_turbo_16k = Model(
- name="openai:gpt-3.5-turbo-16k",
- base_provider="openai",
- best_provider=Vercel,
-)
+ name = 'openai:gpt-3.5-turbo-16k',
+ base_provider = 'openai',
+ best_provider = Vercel)
gpt_35_turbo_16k_0613 = Model(
- name="openai:gpt-3.5-turbo-16k-0613",
- base_provider="openai",
- best_provider=Equing,
-)
+ name = 'openai:gpt-3.5-turbo-16k-0613',
+ base_provider = 'openai',
+ best_provider = Equing)
gpt_4_0613 = Model(
- name="openai:gpt-4-0613",
- base_provider="openai",
- best_provider=Vercel,
-)
+ name = 'openai:gpt-4-0613',
+ base_provider = 'openai',
+ best_provider = Vercel)
text_ada_001 = Model(
- name="openai:text-ada-001",
- base_provider="openai",
- best_provider=Vercel,
-)
+ name = 'openai:text-ada-001',
+ base_provider = 'openai',
+ best_provider = Vercel)
text_babbage_001 = Model(
- name="openai:text-babbage-001",
- base_provider="openai",
- best_provider=Vercel,
-)
+ name = 'openai:text-babbage-001',
+ base_provider = 'openai',
+ best_provider = Vercel)
text_curie_001 = Model(
- name="openai:text-curie-001",
- base_provider="openai",
- best_provider=Vercel,
-)
+ name = 'openai:text-curie-001',
+ base_provider = 'openai',
+ best_provider = Vercel)
text_davinci_002 = Model(
- name="openai:text-davinci-002",
- base_provider="openai",
- best_provider=Vercel,
-)
+ name = 'openai:text-davinci-002',
+ base_provider = 'openai',
+ best_provider = Vercel)
text_davinci_003 = Model(
- name="openai:text-davinci-003",
- base_provider="openai",
- best_provider=Vercel,
-)
+ name = 'openai:text-davinci-003',
+ base_provider = 'openai',
+ best_provider = Vercel)
llama13b_v2_chat = Model(
- name="replicate:a16z-infra/llama13b-v2-chat",
- base_provider="replicate",
- best_provider=Vercel,
-)
+ name = 'replicate:a16z-infra/llama13b-v2-chat',
+ base_provider = 'replicate',
+ best_provider = Vercel)
llama7b_v2_chat = Model(
- name="replicate:a16z-infra/llama7b-v2-chat",
- base_provider="replicate",
- best_provider=Vercel,
-)
+ name = 'replicate:a16z-infra/llama7b-v2-chat',
+ base_provider = 'replicate',
+ best_provider = Vercel)
class ModelUtils:
convert: dict[str, Model] = {
# GPT-3.5 / GPT-4
- "gpt-3.5-turbo": gpt_35_turbo,
- "gpt-4": gpt_4,
+ 'gpt-3.5-turbo' : gpt_35_turbo,
+ 'gpt-4' : gpt_4,
+
# Bard
- "palm2": palm,
- "palm": palm,
- "google": palm,
- "google-bard": palm,
- "google-palm": palm,
- "bard": palm,
+ 'palm2' : palm,
+ 'palm' : palm,
+ 'google' : palm,
+ 'google-bard' : palm,
+ 'google-palm' : palm,
+ 'bard' : palm,
+
# H2o
- "falcon-40b": falcon_40b,
- "falcon-7b": falcon_7b,
- "llama-13b": llama_13b,
+ 'falcon-40b' : falcon_40b,
+ 'falcon-7b' : falcon_7b,
+ 'llama-13b' : llama_13b,
+
# Vercel
- "claude-instant-v1": claude_instant_v1,
- "claude-v1": claude_v1,
- "claude-v2": claude_v2,
- "command-light-nightly": command_light_nightly,
- "command-nightly": command_nightly,
- "gpt-neox-20b": gpt_neox_20b,
- "oasst-sft-1-pythia-12b": oasst_sft_1_pythia_12b,
- "oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35,
- "santacoder": santacoder,
- "bloom": bloom,
- "flan-t5-xxl": flan_t5_xxl,
- "code-davinci-002": code_davinci_002,
- "gpt-3.5-turbo-16k": gpt_35_turbo_16k,
- "gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613,
- "gpt-4-0613": gpt_4_0613,
- "text-ada-001": text_ada_001,
- "text-babbage-001": text_babbage_001,
- "text-curie-001": text_curie_001,
- "text-davinci-002": text_davinci_002,
- "text-davinci-003": text_davinci_003,
- "llama13b-v2-chat": llama13b_v2_chat,
- "llama7b-v2-chat": llama7b_v2_chat,
- }
+ 'claude-instant-v1' : claude_instant_v1,
+ 'claude-v1' : claude_v1,
+ 'claude-v2' : claude_v2,
+ 'command-nightly' : command_nightly,
+ 'gpt-neox-20b' : gpt_neox_20b,
+ 'santacoder' : santacoder,
+ 'bloom' : bloom,
+ 'flan-t5-xxl' : flan_t5_xxl,
+ 'code-davinci-002' : code_davinci_002,
+ 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
+ 'gpt-4-0613' : gpt_4_0613,
+ 'text-ada-001' : text_ada_001,
+ 'text-babbage-001' : text_babbage_001,
+ 'text-curie-001' : text_curie_001,
+ 'text-davinci-002' : text_davinci_002,
+ 'text-davinci-003' : text_davinci_003,
+ 'llama13b-v2-chat' : llama13b_v2_chat,
+ 'llama7b-v2-chat' : llama7b_v2_chat,
+
+ 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b,
+ 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35,
+ 'command-light-nightly' : command_light_nightly,
+ 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
+ } \ No newline at end of file
diff --git a/g4f/typing.py b/g4f/typing.py
index 2e123112..02386037 100644
--- a/g4f/typing.py
+++ b/g4f/typing.py
@@ -1,15 +1,14 @@
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, TypedDict, Union
-SHA256 = NewType("sha_256_hash", str)
+SHA256 = NewType('sha_256_hash', str)
CreateResult = Generator[str, None, None]
-
__all__ = [
- "Any",
- "AsyncGenerator",
- "Generator",
- "Tuple",
- "TypedDict",
- "SHA256",
- "CreateResult",
-]
+ 'Any',
+ 'AsyncGenerator',
+ 'Generator',
+ 'Tuple',
+ 'TypedDict',
+ 'SHA256',
+ 'CreateResult',
+] \ No newline at end of file
diff --git a/interference/app.py b/interference/app.py
index 8018356f..836a751d 100644
--- a/interference/app.py
+++ b/interference/app.py
@@ -12,6 +12,7 @@ from g4f import ChatCompletion
app = Flask(__name__)
CORS(app)
+
@app.route("/chat/completions", methods=["POST"])
def chat_completions():
model = request.get_json().get("model", "gpt-3.5-turbo")
@@ -87,5 +88,9 @@ def chat_completions():
return app.response_class(streaming(), mimetype="text/event-stream")
+def main():
+ app.run(host="0.0.0.0", port=1337, debug=True)
+
+
if __name__ == "__main__":
- app.run(host="0.0.0.0", port=1337, debug=True) \ No newline at end of file
+ main()
diff --git a/requirements.txt b/requirements.txt
index 5caad54e..6188ef5b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,4 +8,5 @@ websockets
js2py
quickjs
flask
-flask-cors \ No newline at end of file
+flask-cors
+httpx
diff --git a/setup.py b/setup.py
index 10743750..698c6f6d 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,10 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
with open("requirements.txt") as f:
required = f.read().splitlines()
-VERSION = '0.0.2.3'
+with open("interference/requirements.txt") as f:
+ api_required = f.read().splitlines()
+
+VERSION = "0.0.2.6"
DESCRIPTION = (
"The official gpt4free repository | various collection of powerful language models"
)
@@ -26,11 +29,16 @@ setup(
long_description_content_type="text/markdown",
long_description=long_description,
packages=find_packages(),
+ data_files=["interference/app.py"],
install_requires=required,
- url='https://github.com/xtekky/gpt4free', # Link to your GitHub repository
+ extras_require={"api": api_required},
+ entry_points={
+ "console_scripts": ["g4f=interference.app:main"],
+ },
+ url="https://github.com/xtekky/gpt4free", # Link to your GitHub repository
project_urls={
- 'Source Code': 'https://github.com/xtekky/gpt4free', # GitHub link
- 'Bug Tracker': 'https://github.com/xtekky/gpt4free/issues', # Link to issue tracker
+ "Source Code": "https://github.com/xtekky/gpt4free", # GitHub link
+ "Bug Tracker": "https://github.com/xtekky/gpt4free/issues", # Link to issue tracker
},
keywords=[
"python",
diff --git a/testing/log_time.py b/testing/log_time.py
new file mode 100644
index 00000000..7d268128
--- /dev/null
+++ b/testing/log_time.py
@@ -0,0 +1,25 @@
+from time import time
+
+
+async def log_time_async(method: callable, **kwargs):
+ start = time()
+ result = await method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
+
+
+def log_time_yield(method: callable, **kwargs):
+ start = time()
+ result = yield from method(**kwargs)
+ yield f" {round(time() - start, 2)} secs"
+
+
+def log_time(method: callable, **kwargs):
+ start = time()
+ result = method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
diff --git a/testing/test_chat_completion.py b/testing/test_chat_completion.py
index d091d47b..32c069be 100644
--- a/testing/test_chat_completion.py
+++ b/testing/test_chat_completion.py
@@ -14,4 +14,4 @@ response = g4f.ChatCompletion.create(
active_server=5,
)
-print(response) \ No newline at end of file
+print(response)
diff --git a/testing/test_needs_auth.py b/testing/test_needs_auth.py
new file mode 100644
index 00000000..3cef1c61
--- /dev/null
+++ b/testing/test_needs_auth.py
@@ -0,0 +1,96 @@
+import sys
+from pathlib import Path
+import asyncio
+
+sys.path.append(str(Path(__file__).parent.parent))
+
+import g4f
+from testing.log_time import log_time, log_time_async, log_time_yield
+
+
+_providers = [
+ g4f.Provider.H2o,
+ g4f.Provider.You,
+ g4f.Provider.HuggingChat,
+ g4f.Provider.OpenAssistant,
+ g4f.Provider.Bing,
+ g4f.Provider.Bard
+]
+
+_instruct = "Hello, tell about you in one sentence."
+
+_example = """
+OpenaiChat: Hello! How can I assist you today? 2.0 secs
+Bard: Hello! How can I help you today? 3.44 secs
+Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
+Async Total: 4.25 secs
+
+OpenaiChat: Hello! How can I assist you today? 1.85 secs
+Bard: Hello! How can I help you today? 3.38 secs
+Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
+Stream Total: 11.37 secs
+
+OpenaiChat: Hello! How can I help you today? 3.28 secs
+Bard: Hello there! How can I help you today? 3.58 secs
+Bing: Hello! How can I help you today? 3.28 secs
+No Stream Total: 10.14 secs
+"""
+
+print("Bing: ", end="")
+for response in log_time_yield(
+ g4f.ChatCompletion.create,
+ model=g4f.models.gpt_35_turbo,
+ messages=[{"role": "user", "content": _instruct}],
+ provider=g4f.Provider.Bing,
+ #cookies=g4f.get_cookies(".huggingface.co"),
+ #stream=True,
+ auth=True
+):
+ print(response, end="")
+print()
+print()
+
+
+async def run_async():
+ responses = [
+ log_time_async(
+ provider.create_async,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ )
+ for provider in _providers
+ ]
+ responses = await asyncio.gather(*responses)
+ for idx, provider in enumerate(_providers):
+ print(f"{provider.__name__}:", responses[idx])
+print("Async Total:", asyncio.run(log_time_async(run_async)))
+print()
+
+
+def run_stream():
+ for provider in _providers:
+ print(f"{provider.__name__}: ", end="")
+ for response in log_time_yield(
+ provider.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ ):
+ print(response, end="")
+ print()
+print("Stream Total:", log_time(run_stream))
+print()
+
+
+def create_no_stream():
+ for provider in _providers:
+ print(f"{provider.__name__}:", end=" ")
+ for response in log_time_yield(
+ provider.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ stream=False
+ ):
+ print(response, end="")
+ print()
+print("No Stream Total:", log_time(create_no_stream))
+print() \ No newline at end of file
diff --git a/testing/test_providers.py b/testing/test_providers.py
index a5c6f87b..c4fcbc0c 100644
--- a/testing/test_providers.py
+++ b/testing/test_providers.py
@@ -1,67 +1,76 @@
import sys
from pathlib import Path
+from colorama import Fore
sys.path.append(str(Path(__file__).parent.parent))
-from g4f import BaseProvider, models, provider
+from g4f import BaseProvider, models, Provider
+logging = False
def main():
providers = get_providers()
- results: list[list[str | bool]] = []
+ failed_providers = []
for _provider in providers:
- print("start", _provider.__name__)
- actual_working = judge(_provider)
- expected_working = _provider.working
- match = actual_working == expected_working
+ if _provider.needs_auth:
+ continue
+ print("Provider:", _provider.__name__)
+ result = test(_provider)
+ print("Result:", result)
+ if _provider.working and not result:
+ failed_providers.append(_provider)
- results.append([_provider.__name__, expected_working, actual_working, match])
+ print()
- print("failed provider list")
- for result in results:
- if not result[3]:
- print(result)
+ if failed_providers:
+ print(f"{Fore.RED}Failed providers:\n")
+ for _provider in failed_providers:
+ print(f"{Fore.RED}{_provider.__name__}")
+ else:
+ print(f"{Fore.GREEN}All providers are working")
def get_providers() -> list[type[BaseProvider]]:
- provider_names = dir(provider)
+ provider_names = dir(Provider)
ignore_names = [
"base_provider",
- "BaseProvider",
+ "BaseProvider"
]
provider_names = [
provider_name
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
- return [getattr(provider, provider_name) for provider_name in provider_names]
+ return [getattr(Provider, provider_name) for provider_name in sorted(provider_names)]
def create_response(_provider: type[BaseProvider]) -> str:
- model = (
- models.gpt_35_turbo.name
- if _provider is not provider.H2o
- else models.falcon_7b.name
- )
+ if _provider.supports_gpt_35_turbo:
+ model = models.gpt_35_turbo.name
+ elif _provider.supports_gpt_4:
+ model = models.gpt_4
+ elif hasattr(_provider, "model"):
+ model = _provider.model
+ else:
+ model = None
response = _provider.create_completion(
model=model,
- messages=[{"role": "user", "content": "Hello world!, plz yourself"}],
+ messages=[{"role": "user", "content": "Hello"}],
stream=False,
)
return "".join(response)
-
-def judge(_provider: type[BaseProvider]) -> bool:
- if _provider.needs_auth:
- return _provider.working
-
+
+def test(_provider: type[BaseProvider]) -> bool:
try:
response = create_response(_provider)
assert type(response) is str
- return len(response) > 1
+ assert len(response) > 0
+ return response
except Exception as e:
- print(e)
+ if logging:
+ print(e)
return False
diff --git a/tool/provider_init.py b/tool/provider_init.py
index 22f21d4d..cd7f9333 100644
--- a/tool/provider_init.py
+++ b/tool/provider_init.py
@@ -30,4 +30,4 @@ def create_content():
if __name__ == "__main__":
- main()
+ main() \ No newline at end of file
diff --git a/tool/readme_table.py b/tool/readme_table.py
index 10735ba0..522c66a7 100644
--- a/tool/readme_table.py
+++ b/tool/readme_table.py
@@ -6,14 +6,35 @@ from urllib.parse import urlparse
sys.path.append(str(Path(__file__).parent.parent))
from g4f import models, Provider
-from g4f.Provider.base_provider import BaseProvider
-
-
-def main():
- print_providers()
- print("\n", "-" * 50, "\n")
- print_models()
-
+from g4f.Provider.base_provider import BaseProvider, AsyncProvider
+from testing.test_providers import test
+
+
+def print_imports():
+ print("##### Providers:")
+ print("```py")
+ print("from g4f.Provider import (")
+ for _provider in get_providers():
+ if _provider.working:
+ print(f" {_provider.__name__},")
+ print(")")
+ print("# Usage:")
+ print("response = g4f.ChatCompletion.create(..., provider=ProviderName)")
+ print("```")
+ print()
+ print()
+
+def print_async():
+ print("##### Async support:")
+ print("```py")
+ print("from g4f.Provider import (")
+ for _provider in get_providers():
+ if issubclass(_provider, AsyncProvider):
+ print(f" {_provider.__name__},")
+ print(")")
+ print("```")
+ print()
+ print()
def print_providers():
lines = [
@@ -21,40 +42,50 @@ def print_providers():
"| ------ | ------- | ------- | ----- | --------- | ------ | ---- |",
]
providers = get_providers()
- for _provider in providers:
- netloc = urlparse(_provider.url).netloc
- website = f"[{netloc}]({_provider.url})"
-
- provider_name = f"g4f.provider.{_provider.__name__}"
-
- has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "❌"
- has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "❌"
- stream = "✔️" if _provider.supports_stream else "❌"
- status = (
- "![Active](https://img.shields.io/badge/Active-brightgreen)"
- if _provider.working
- else "![Inactive](https://img.shields.io/badge/Inactive-red)"
- )
- auth = "✔️" if _provider.needs_auth else "❌"
-
- lines.append(
- f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {status} | {auth} |"
- )
+ for is_working in (True, False):
+ for _provider in providers:
+ if is_working != _provider.working:
+ continue
+ netloc = urlparse(_provider.url).netloc
+ website = f"[{netloc}]({_provider.url})"
+
+ provider_name = f"g4f.provider.{_provider.__name__}"
+
+ has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "❌"
+ has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "❌"
+ stream = "✔️" if _provider.supports_stream else "❌"
+ if _provider.working:
+ if test(_provider):
+ status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
+ else:
+ status = '![Unknown](https://img.shields.io/badge/Unknown-grey)'
+ else:
+ status = '![Inactive](https://img.shields.io/badge/Inactive-red)'
+ auth = "✔️" if _provider.needs_auth else "❌"
+
+ lines.append(
+ f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {status} | {auth} |"
+ )
print("\n".join(lines))
-def get_providers() -> list[type[BaseProvider]]:
+def get_provider_names() -> list[str]:
provider_names = dir(Provider)
ignore_names = [
"base_provider",
"BaseProvider",
+ "AsyncProvider",
+ "AsyncGeneratorProvider"
]
- provider_names = [
+ return [
provider_name
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
- return [getattr(Provider, provider_name) for provider_name in provider_names]
+
+
+def get_providers() -> list[type[BaseProvider]]:
+ return [getattr(Provider, provider_name) for provider_name in get_provider_names()]
def print_models():
@@ -79,6 +110,8 @@ def print_models():
_models = get_models()
for model in _models:
+ if model.best_provider.__name__ not in provider_urls:
+ continue
split_name = re.split(r":|/", model.name)
name = split_name[-1]
@@ -100,4 +133,8 @@ def get_models():
if __name__ == "__main__":
- main()
+ print_imports()
+ print_async()
+ print_providers()
+ print("\n", "-" * 50, "\n")
+ print_models() \ No newline at end of file