summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-03-12 02:06:06 +0100
committerGitHub <noreply@github.com>2024-03-12 02:06:06 +0100
commit6ef282de3a3245acbfecd08ae48dba85ff91d031 (patch)
tree0236c9678eea8f9c78ed7c09f3d86eaf3d7c691c
parentUpdate .gitignore (diff)
downloadgpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.gz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.bz2
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.lz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.xz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.zst
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.zip
-rw-r--r--README.md77
-rw-r--r--g4f/Provider/Aura.py21
-rw-r--r--g4f/Provider/Bing.py177
-rw-r--r--g4f/Provider/ChatForAi.py31
-rw-r--r--g4f/Provider/Chatgpt4Online.py2
-rw-r--r--g4f/Provider/ChatgptAi.py22
-rw-r--r--g4f/Provider/ChatgptFree.py10
-rw-r--r--g4f/Provider/ChatgptNext.py20
-rw-r--r--g4f/Provider/ChatgptX.py6
-rw-r--r--g4f/Provider/FlowGpt.py7
-rw-r--r--g4f/Provider/FreeChatgpt.py7
-rw-r--r--g4f/Provider/FreeGpt.py16
-rw-r--r--g4f/Provider/GeminiProChat.py15
-rw-r--r--g4f/Provider/GptTalkRu.py50
-rw-r--r--g4f/Provider/Koala.py26
-rw-r--r--g4f/Provider/Liaobots.py13
-rw-r--r--g4f/Provider/PerplexityLabs.py2
-rw-r--r--g4f/Provider/Pi.py21
-rw-r--r--g4f/Provider/Vercel.py22
-rw-r--r--g4f/Provider/You.py60
-rw-r--r--g4f/Provider/__init__.py70
-rw-r--r--g4f/Provider/bing/conversation.py39
-rw-r--r--g4f/Provider/deprecated/AiAsk.py (renamed from g4f/Provider/AiAsk.py)4
-rw-r--r--g4f/Provider/deprecated/AiChatOnline.py (renamed from g4f/Provider/AiChatOnline.py)6
-rw-r--r--g4f/Provider/deprecated/ChatAnywhere.py (renamed from g4f/Provider/ChatAnywhere.py)4
-rw-r--r--g4f/Provider/deprecated/FakeGpt.py (renamed from g4f/Provider/FakeGpt.py)6
-rw-r--r--g4f/Provider/deprecated/GPTalk.py (renamed from g4f/Provider/GPTalk.py)8
-rw-r--r--g4f/Provider/deprecated/GeekGpt.py (renamed from g4f/Provider/GeekGpt.py)4
-rw-r--r--g4f/Provider/deprecated/Hashnode.py (renamed from g4f/Provider/Hashnode.py)6
-rw-r--r--g4f/Provider/deprecated/Ylokh.py (renamed from g4f/Provider/Ylokh.py)6
-rw-r--r--g4f/Provider/deprecated/__init__.py10
-rw-r--r--g4f/Provider/helper.py3
-rw-r--r--g4f/Provider/not_working/AItianhu.py (renamed from g4f/Provider/AItianhu.py)6
-rw-r--r--g4f/Provider/not_working/Bestim.py (renamed from g4f/Provider/Bestim.py)112
-rw-r--r--g4f/Provider/not_working/ChatBase.py (renamed from g4f/Provider/ChatBase.py)6
-rw-r--r--g4f/Provider/not_working/ChatgptDemo.py (renamed from g4f/Provider/ChatgptDemo.py)50
-rw-r--r--g4f/Provider/not_working/ChatgptDemoAi.py (renamed from g4f/Provider/ChatgptDemoAi.py)7
-rw-r--r--g4f/Provider/not_working/ChatgptLogin.py (renamed from g4f/Provider/ChatgptLogin.py)8
-rw-r--r--g4f/Provider/not_working/Chatxyz.py (renamed from g4f/Provider/Chatxyz.py)4
-rw-r--r--g4f/Provider/not_working/Gpt6.py (renamed from g4f/Provider/Gpt6.py)9
-rw-r--r--g4f/Provider/not_working/GptChatly.py (renamed from g4f/Provider/GptChatly.py)8
-rw-r--r--g4f/Provider/not_working/GptForLove.py (renamed from g4f/Provider/GptForLove.py)10
-rw-r--r--g4f/Provider/not_working/GptGo.py (renamed from g4f/Provider/GptGo.py)10
-rw-r--r--g4f/Provider/not_working/GptGod.py (renamed from g4f/Provider/GptGod.py)8
-rw-r--r--g4f/Provider/not_working/OnlineGpt.py (renamed from g4f/Provider/OnlineGpt.py)9
-rw-r--r--g4f/Provider/not_working/__init__.py14
-rw-r--r--g4f/client.py6
-rw-r--r--g4f/errors.py3
-rw-r--r--g4f/gui/client/css/style.css1
-rw-r--r--g4f/models.py7
-rw-r--r--g4f/providers/helper.py18
-rw-r--r--g4f/requests/__init__.py75
-rw-r--r--g4f/requests/aiohttp.py26
-rw-r--r--g4f/requests/defaults.py28
-rw-r--r--g4f/webdriver.py37
-rw-r--r--requirements.txt1
-rw-r--r--setup.py4
57 files changed, 696 insertions, 542 deletions
diff --git a/README.md b/README.md
index 06434094..9f99a2e4 100644
--- a/README.md
+++ b/README.md
@@ -230,71 +230,64 @@ set G4F_PROXY=http://host:port
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
| ------ | ------- | ------- | ----- | ------ | ------ | ---- |
-| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chat.openai.com](https://chat.openai.com) | `g4f.Provider.OpenaiChat` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
-| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chat.openai.com](https://chat.openai.com) | `g4f.Provider.OpenaiChat` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
-| [chat.geekgpt.org](https://chat.geekgpt.org) | `g4f.Provider.GeekGpt` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
### GPT-3.5
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
| ------ | ------- | ------- | ----- | ------ | ------ | ---- |
| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [aichatonline.org](https://aichatonline.org) | `g4f.Provider.AiChatOnline` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [openchat.team](https://openchat.team) | `g4f.Provider.Aura` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chat.chatgptdemo.net](https://chat.chatgptdemo.net) | `g4f.Provider.ChatgptDemo` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [gptalk.net](https://gptalk.net) | `g4f.Provider.GPTalk` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chat.vercel.ai](https://chat.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [e.aiask.me](https://e.aiask.me) | `g4f.Provider.AiAsk` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatanywhere.cn](https://chatanywhere.cn) | `g4f.Provider.ChatAnywhere` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgptdemo.info](https://chatgptdemo.info/chat) | `g4f.Provider.ChatgptDemo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat-shared2.zhile.io](https://chat-shared2.zhile.io) | `g4f.Provider.FakeGpt` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [freegpts1.aifree.site](https://freegpts1.aifree.site/) | `g4f.Provider.FreeGpt` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [hashnode.com](https://hashnode.com) | `g4f.Provider.Hashnode` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [sdk.vercel.ai](https://sdk.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat.ylokh.xyz](https://chat.ylokh.xyz) | `g4f.Provider.Ylokh` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
### Other
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
| ------ | ------- | ------- | ----- | ------ | ------ | ---- |
+| [openchat.team](https://openchat.team) | `g4f.Provider.Aura` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
-| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro` | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
+| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat` | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingFace` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama2` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi` | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi` | ❌ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
-| [phind.com](https://www.phind.com) | `g4f.Provider.Phind` | ❌ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌
| [open-assistant.io](https://open-assistant.io/chat) | `g4f.Provider.OpenAssistant` | ❌ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ |
### Models
@@ -306,11 +299,11 @@ set G4F_PROXY=http://host:port
| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Llama-2-70b-chat-hf | Meta | 4+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| CodeLlama-34b-Instruct-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| CodeLlama-70b-Instruct-hf | Meta | g4f.Provider.DeepInfra | [llama.meta.com](https://llama.meta.com/) |
-| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) |
-| Mistral-7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) |
+| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| CodeLlama-34b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
+| Mistral-7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
@@ -318,7 +311,9 @@ set G4F_PROXY=http://host:port
| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
-| claude-v2 | Anthropic | 2+ Providers | [anthropic.com](https://www.anthropic.com/) |
+| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
+| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
+| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
## 🔗 Related GPT4Free Projects
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py
index d8f3471c..877b7fef 100644
--- a/g4f/Provider/Aura.py
+++ b/g4f/Provider/Aura.py
@@ -4,6 +4,8 @@ from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
+from ..requests import get_args_from_browser
+from ..webdriver import WebDriver
class Aura(AsyncGeneratorProvider):
url = "https://openchat.team"
@@ -15,24 +17,11 @@ class Aura(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
+ webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
- headers = {
- "Accept": "*/*",
- "Accept-Encoding": "gzip, deflate, br",
- "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
- "Content-Type": "application/json",
- "Origin": f"{cls.url}",
- "Referer": f"{cls.url}/",
- "Sec-Ch-Ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
- "Sec-Ch-Ua-Mobile": "?0",
- "Sec-Ch-Ua-Platform": '"Linux"',
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
- }
- async with ClientSession(headers=headers) as session:
+ args = get_args_from_browser(cls.url, webdriver, proxy)
+ async with ClientSession(**args) as session:
new_messages = []
system_message = []
for message in messages:
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 5bc89479..77178686 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -2,13 +2,12 @@ from __future__ import annotations
import random
import json
-import os
import uuid
import time
from urllib import parse
from aiohttp import ClientSession, ClientTimeout, BaseConnector
-from ..typing import AsyncResult, Messages, ImageType
+from ..typing import AsyncResult, Messages, ImageType, Cookies
from ..image import ImageResponse, ImageRequest
from .base_provider import AsyncGeneratorProvider
from .helper import get_connector
@@ -39,7 +38,7 @@ class Bing(AsyncGeneratorProvider):
messages: Messages,
proxy: str = None,
timeout: int = 900,
- cookies: dict = None,
+ cookies: Cookies = None,
connector: BaseConnector = None,
tone: str = Tones.balanced,
image: ImageType = None,
@@ -65,7 +64,7 @@ class Bing(AsyncGeneratorProvider):
else:
prompt = messages[-1]["content"]
context = create_context(messages[:-1])
-
+
cookies = {**get_default_cookies(), **cookies} if cookies else get_default_cookies()
gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
@@ -79,32 +78,88 @@ def create_context(messages: Messages) -> str:
:param messages: A list of message dictionaries.
:return: A string representing the context created from the messages.
"""
- return "".join(
- f"[{message['role']}]" + ("(#message)" if message['role'] != "system" else "(#additional_instructions)") + f"\n{message['content']}\n\n"
+ return "\n\n".join(
+ f"[{message['role']}]" + ("(#message)" if message['role'] != "system" else "(#additional_instructions)") + f"\n{message['content']}"
for message in messages
)
+def get_ip_address() -> str:
+ return f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
+
class Defaults:
"""
Default settings and configurations for the Bing provider.
"""
delimiter = "\x1e"
- ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
# List of allowed message types for Bing responses
allowedMessageTypes = [
- "ActionRequest", "Chat", "Context", "Progress", "SemanticSerp",
- "GenerateContentQuery", "SearchQuery", "RenderCardRequest"
+ "ActionRequest","Chat",
+ "ConfirmationCard", "Context",
+ "InternalSearchQuery", #"InternalSearchResult",
+ "Disengaged", #"InternalLoaderMessage",
+ "Progress", "RenderCardRequest",
+ "RenderContentRequest", "AdsQuery",
+ "SemanticSerp", "GenerateContentQuery",
+ "SearchQuery", "GeneratedCode",
+ "InternalTasksMessage"
]
- sliceIds = [
- 'abv2', 'srdicton', 'convcssclick', 'stylewv2', 'contctxp2tf',
- '802fluxv1pc_a', '806log2sphs0', '727savemem', '277teditgnds0', '207hlthgrds0'
- ]
+ sliceIds = {
+ "Balanced": [
+ "supllmnfe","archnewtf",
+ "stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl",
+ "thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t",
+ "bingfc", "0225unsticky1", "0228scss0",
+ "defquerycf", "defcontrol", "3022tphpv"
+ ],
+ "Creative": [
+ "bgstream", "fltltst2c",
+ "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
+ "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
+ "bingfccf", "0225unsticky1", "0228scss0",
+ "3022tpvs0"
+ ],
+ "Precise": [
+ "bgstream", "fltltst2c",
+ "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
+ "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
+ "bingfccf", "0225unsticky1", "0228scss0",
+ "defquerycf", "3022tpvs0"
+ ],
+ }
+
+ optionsSets = {
+ "Balanced": [
+ "nlu_direct_response_filter", "deepleo",
+ "disable_emoji_spoken_text", "responsible_ai_policy_235",
+ "enablemm", "dv3sugg", "autosave",
+ "iyxapbing", "iycapbing",
+ "galileo", "saharagenconv5", "gldcl1p",
+ "gpt4tmncnp"
+ ],
+ "Creative": [
+ "nlu_direct_response_filter", "deepleo",
+ "disable_emoji_spoken_text", "responsible_ai_policy_235",
+ "enablemm", "dv3sugg",
+ "iyxapbing", "iycapbing",
+ "h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
+ "gpt4tmncnp"
+ ],
+ "Precise": [
+ "nlu_direct_response_filter", "deepleo",
+ "disable_emoji_spoken_text", "responsible_ai_policy_235",
+ "enablemm", "dv3sugg",
+ "iyxapbing", "iycapbing",
+ "h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
+ "clgalileo", "gencontentv3"
+ ],
+ }
# Default location settings
location = {
"locale": "en-US", "market": "en-US", "region": "US",
+ "location":"lat:34.0536909;long:-118.242766;re=1000m;",
"locationHints": [{
"country": "United States", "state": "California", "city": "Los Angeles",
"timezoneoffset": 8, "countryConfidence": 8,
@@ -134,17 +189,8 @@ class Defaults:
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
'x-edge-shopping-flag': '1',
- 'x-forwarded-for': ip_address,
+ 'x-forwarded-for': get_ip_address(),
}
-
- optionsSets = [
- 'nlu_direct_response_filter', 'deepleo', 'disable_emoji_spoken_text',
- 'responsible_ai_policy_235', 'enablemm', 'iyxapbing', 'iycapbing',
- 'gencontentv3', 'fluxsrtrunc', 'fluxtrunc', 'fluxv1', 'rai278',
- 'replaceurl', 'eredirecturl', 'nojbfedge', "fluxcopilot", "nojbf",
- "dgencontentv3", "nointernalsugg", "disable_telemetry", "machine_affinity",
- "streamf", "codeint", "langdtwb", "fdwtlst", "fluxprod", "deuct3"
- ]
def get_default_cookies():
return {
@@ -156,11 +202,6 @@ def get_default_cookies():
'SRCHHPGUSR' : f'HV={int(time.time())}',
}
-class ConversationStyleOptionSets():
- CREATIVE = ["h3imaginative", "clgalileo", "gencontentv3"]
- BALANCED = ["galileo", "gldcl1p"]
- PRECISE = ["h3precise", "clgalileo"]
-
def format_message(msg: dict) -> str:
"""
Formats a message dictionary into a JSON string with a delimiter.
@@ -191,18 +232,8 @@ def create_message(
:param gpt4_turbo: Flag to enable GPT-4 Turbo.
:return: A formatted string message for the Bing API.
"""
- options_sets = Defaults.optionsSets.copy()
- # Append tone-specific options
- if tone == Tones.creative:
- options_sets.extend(ConversationStyleOptionSets.CREATIVE)
- elif tone == Tones.precise:
- options_sets.extend(ConversationStyleOptionSets.PRECISE)
- elif tone == Tones.balanced:
- options_sets.extend(ConversationStyleOptionSets.BALANCED)
- else:
- options_sets.append("harmonyv3")
- # Additional configurations based on parameters
+ options_sets = []
if not web_search:
options_sets.append("nosearchall")
if gpt4_turbo:
@@ -210,34 +241,38 @@ def create_message(
request_id = str(uuid.uuid4())
struct = {
- 'arguments': [{
- 'source': 'cib',
- 'optionsSets': options_sets,
- 'allowedMessageTypes': Defaults.allowedMessageTypes,
- 'sliceIds': Defaults.sliceIds,
- 'traceId': os.urandom(16).hex(),
- 'isStartOfSession': True,
- 'requestId': request_id,
- 'message': {
- **Defaults.location,
- 'author': 'user',
- 'inputMethod': 'Keyboard',
- 'text': prompt,
- 'messageType': 'Chat',
- 'requestId': request_id,
- 'messageId': request_id
- },
+ "arguments":[{
+ "source": "cib",
+ "optionsSets": [*Defaults.optionsSets[tone], *options_sets],
+ "allowedMessageTypes": Defaults.allowedMessageTypes,
+ "sliceIds": Defaults.sliceIds[tone],
"verbosity": "verbose",
"scenario": "SERP",
"plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [],
- 'tone': tone,
- 'spokenTextMode': 'None',
- 'conversationId': conversation.conversationId,
- 'participant': {'id': conversation.clientId},
+ "traceId": str(uuid.uuid4()),
+ "conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
+ "gptId": "copilot",
+ "isStartOfSession": True,
+ "requestId": request_id,
+ "message":{
+ **Defaults.location,
+ "userIpAddress": get_ip_address(),
+ "timestamp": "2024-03-11T22:40:36+01:00",
+ "author": "user",
+ "inputMethod": "Keyboard",
+ "text": prompt,
+ "messageType": "Chat",
+ "requestId": request_id,
+ "messageId": request_id
+ },
+ "tone": tone,
+ "spokenTextMode": "None",
+ "conversationId": conversation.conversationId,
+ "participant": {"id": conversation.clientId}
}],
- 'invocationId': '1',
- 'target': 'chat',
- 'type': 4
+ "invocationId": "0",
+ "target": "chat",
+ "type": 4
}
if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'):
@@ -283,14 +318,13 @@ async def stream_generate(
"""
headers = Defaults.headers
if cookies:
- headers["Cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
-
+ headers["cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
async with ClientSession(
- timeout=ClientTimeout(total=timeout), headers=headers, connector=connector
+ headers=headers, cookies=cookies,
+ timeout=ClientTimeout(total=timeout), connector=connector
) as session:
conversation = await create_conversation(session)
image_request = await upload_image(session, image, tone) if image else None
-
try:
async with session.ws_connect(
'wss://sydney.bing.com/sydney/ChatHub',
@@ -298,12 +332,13 @@ async def stream_generate(
params={'sec_access_token': conversation.conversationSignature}
) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
+ await wss.send_str(format_message({"type": 6}))
await wss.receive(timeout=timeout)
await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo))
-
response_txt = ''
returned_text = ''
final = False
+ message_id = None
while not final:
msg = await wss.receive(timeout=timeout)
if not msg.data:
@@ -315,13 +350,17 @@ async def stream_generate(
response = json.loads(obj)
if response and response.get('type') == 1 and response['arguments'][0].get('messages'):
message = response['arguments'][0]['messages'][0]
+ # Reset memory, if we have a new message
+ if message_id is not None and message_id != message["messageId"]:
+ returned_text = ''
+ message_id = message["messageId"]
image_response = None
if (message['contentOrigin'] != 'Apology'):
if 'adaptiveCards' in message:
card = message['adaptiveCards'][0]['body'][0]
if "text" in card:
response_txt = card.get('text')
- if message.get('messageType'):
+ if message.get('messageType') and "inlines" in card:
inline_txt = card['inlines'][0].get('text')
response_txt += inline_txt + '\n'
elif message.get('contentType') == "IMAGE":
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py
index afab034b..5aa728a1 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/ChatForAi.py
@@ -2,15 +2,17 @@ from __future__ import annotations
import time
import hashlib
+import uuid
from ..typing import AsyncResult, Messages
from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider
+from ..errors import RateLimitError
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-class ChatForAi(AsyncGeneratorProvider):
+class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatforai.store"
working = True
+ default_model = "gpt-3.5-turbo"
supports_message_history = True
supports_gpt_35_turbo = True
@@ -21,36 +23,39 @@ class ChatForAi(AsyncGeneratorProvider):
messages: Messages,
proxy: str = None,
timeout: int = 120,
+ temperature: float = 0.7,
+ top_p: float = 1,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
headers = {
"Content-Type": "text/plain;charset=UTF-8",
"Origin": cls.url,
"Referer": f"{cls.url}/?r=b",
}
- async with StreamSession(impersonate="chrome107", headers=headers, proxies={"https": proxy}, timeout=timeout) as session:
- prompt = messages[-1]["content"]
+ async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session:
timestamp = int(time.time() * 1e3)
- conversation_id = f"id_{timestamp-123}"
+ conversation_id = str(uuid.uuid4())
data = {
"conversationId": conversation_id,
"conversationType": "chat_continuous",
"botId": "chat_continuous",
"globalSettings":{
"baseUrl": "https://api.openai.com",
- "model": model if model else "gpt-3.5-turbo",
+ "model": model,
"messageHistorySize": 5,
- "temperature": 0.7,
- "top_p": 1,
+ "temperature": temperature,
+ "top_p": top_p,
**kwargs
},
- "botSettings": {},
- "prompt": prompt,
+ "prompt": "",
"messages": messages,
"timestamp": timestamp,
- "sign": generate_signature(timestamp, prompt, conversation_id)
+ "sign": generate_signature(timestamp, "", conversation_id)
}
async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
+ if response.status == 429:
+ raise RateLimitError("Rate limit reached")
response.raise_for_status()
async for chunk in response.iter_content():
if b"https://chatforai.store" in chunk:
@@ -59,5 +64,5 @@ class ChatForAi(AsyncGeneratorProvider):
def generate_signature(timestamp: int, message: str, id: str):
- buffer = f"{timestamp}:{id}:{message}:7YN8z6d6"
+ buffer = f"{id}:{timestamp}:{message}:h496Jd6b"
return hashlib.sha256(buffer.encode()).hexdigest()
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index e923a8b1..169c936d 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -13,7 +13,7 @@ class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
supports_message_history = True
supports_gpt_35_turbo = True
- working = False
+ working = True
_wpnonce = None
_context_id = None
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py
index a38aea5e..d15140d7 100644
--- a/g4f/Provider/ChatgptAi.py
+++ b/g4f/Provider/ChatgptAi.py
@@ -4,14 +4,16 @@ import re, html, json, string, random
from aiohttp import ClientSession
from ..typing import Messages, AsyncResult
+from ..errors import RateLimitError
from .base_provider import AsyncGeneratorProvider
-
+from .helper import get_random_string
class ChatgptAi(AsyncGeneratorProvider):
url = "https://chatgpt.ai"
- working = False
+ working = True
supports_message_history = True
- supports_gpt_35_turbo = True
+ supports_system_message = True,
+ supports_gpt_4 = True,
_system = None
@classmethod
@@ -45,7 +47,6 @@ class ChatgptAi(AsyncGeneratorProvider):
async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status()
text = await response.text()
-
result = re.search(r"data-system='(.*?)'", text)
if result :
cls._system = json.loads(html.unescape(result.group(1)))
@@ -56,14 +57,15 @@ class ChatgptAi(AsyncGeneratorProvider):
"botId": cls._system["botId"],
"customId": cls._system["customId"],
"session": cls._system["sessionId"],
- "chatId": "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=11)),
+ "chatId": get_random_string(),
"contextId": cls._system["contextId"],
- "messages": messages,
+ "messages": messages[:-1],
"newMessage": messages[-1]["content"],
- "stream": True
+ "newFileId": None,
+ "stream":True
}
async with session.post(
- f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
+ "https://chatgate.ai/wp-json/mwai-ui/v1/chats/submit",
proxy=proxy,
json=data,
headers={"X-Wp-Nonce": cls._system["restNonce"]}
@@ -76,6 +78,10 @@ class ChatgptAi(AsyncGeneratorProvider):
assert "type" in line
except:
raise RuntimeError(f"Broken line: {line.decode()}")
+ if line["type"] == "error":
+ if "https://chatgate.ai/login" in line["data"]:
+ raise RateLimitError("Rate limit reached")
+ raise RuntimeError(line["data"])
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py
index b9b25447..b345b48a 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/ChatgptFree.py
@@ -5,8 +5,7 @@ import re
from ..requests import StreamSession
from ..typing import Messages
from .base_provider import AsyncProvider
-from .helper import format_prompt, get_cookies
-
+from .helper import format_prompt
class ChatgptFree(AsyncProvider):
url = "https://chatgptfree.ai"
@@ -25,12 +24,6 @@ class ChatgptFree(AsyncProvider):
cookies: dict = None,
**kwargs
) -> str:
-
- if not cookies:
- cookies = get_cookies('chatgptfree.ai')
- if not cookies:
- raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://chatgptfree.ai on chrome]")
-
headers = {
'authority': 'chatgptfree.ai',
'accept': '*/*',
@@ -82,6 +75,5 @@ class ChatgptFree(AsyncProvider):
"bot_id": "0"
}
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
-
response.raise_for_status()
return (await response.json())["data"] \ No newline at end of file
diff --git a/g4f/Provider/ChatgptNext.py b/g4f/Provider/ChatgptNext.py
index 1ae37bd5..2d6f7487 100644
--- a/g4f/Provider/ChatgptNext.py
+++ b/g4f/Provider/ChatgptNext.py
@@ -4,13 +4,14 @@ import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from ..providers.base_provider import AsyncGeneratorProvider
-
+from .base_provider import AsyncGeneratorProvider
class ChatgptNext(AsyncGeneratorProvider):
url = "https://www.chatgpt-free.cc"
working = True
supports_gpt_35_turbo = True
+ supports_message_history = True
+ supports_system_message = True
@classmethod
async def create_async_generator(
@@ -18,6 +19,11 @@ class ChatgptNext(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
+ max_tokens: int = None,
+ temperature: float = 0.7,
+ top_p: float = 1,
+ presence_penalty: float = 0,
+ frequency_penalty: float = 0,
**kwargs
) -> AsyncResult:
if not model:
@@ -43,11 +49,11 @@ class ChatgptNext(AsyncGeneratorProvider):
"messages": messages,
"stream": True,
"model": model,
- "temperature": 0.5,
- "presence_penalty": 0,
- "frequency_penalty": 0,
- "top_p": 1,
- **kwargs
+ "temperature": temperature,
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "top_p": top_p,
+ "max_tokens": max_tokens,
}
async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response:
response.raise_for_status()
diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py
index c8b9375a..9be0d89b 100644
--- a/g4f/Provider/ChatgptX.py
+++ b/g4f/Provider/ChatgptX.py
@@ -7,12 +7,12 @@ from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
-
+from ..errors import RateLimitError
class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de"
supports_gpt_35_turbo = True
- working = False
+ working = True
@classmethod
async def create_async_generator(
@@ -73,6 +73,8 @@ class ChatgptX(AsyncGeneratorProvider):
async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
chat = await response.json()
+ if "messages" in chat and "Anfragelimit" in chat["messages"]:
+ raise RateLimitError("Rate limit reached")
if "response" not in chat or not chat["response"]:
raise RuntimeError(f'Response: {chat}')
headers = {
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py
index 93e7955c..d84bd81d 100644
--- a/g4f/Provider/FlowGpt.py
+++ b/g4f/Provider/FlowGpt.py
@@ -5,12 +5,14 @@ from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..errors import RateLimitError
class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flowgpt.com/chat"
working = True
supports_gpt_35_turbo = True
supports_message_history = True
+ supports_system_message = True
default_model = "gpt-3.5-turbo"
models = [
"gpt-3.5-turbo",
@@ -30,6 +32,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ temperature: float = 0.7,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
@@ -59,7 +62,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"question": messages[-1]["content"],
"history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history],
"system": system_message,
- "temperature": kwargs.get("temperature", 0.7),
+ "temperature": temperature,
"promptId": f"model-{model}",
"documentIds": [],
"chatFileDocumentIds": [],
@@ -67,6 +70,8 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"generateAudio": False
}
async with session.post("https://backend-k8s.flowgpt.com/v2/chat-anonymous", json=data, proxy=proxy) as response:
+ if response.status == 429:
+ raise RateLimitError("Rate limit reached")
response.raise_for_status()
async for chunk in response.content:
if chunk.strip():
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
index 8981ef79..c20c85d2 100644
--- a/g4f/Provider/FreeChatgpt.py
+++ b/g4f/Provider/FreeChatgpt.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-import json, random
-from aiohttp import ClientSession
+import json
+from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
@@ -18,6 +18,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ timeout: int = 120,
**kwargs
) -> AsyncResult:
headers = {
@@ -33,7 +34,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
}
- async with ClientSession(headers=headers) as session:
+ async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
data = {
"messages": messages,
"stream": True,
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 15232c8d..9c210f0b 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -5,15 +5,18 @@ import time, hashlib, random
from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
+from ..errors import RateLimitError
domains = [
- 'https://s.aifree.site'
+ "https://s.aifree.site",
+ "https://v.aifree.site/"
]
class FreeGpt(AsyncGeneratorProvider):
- url = "https://freegpts1.aifree.site/"
- working = False
+ url = "https://freegptsnav.aifree.site"
+ working = True
supports_message_history = True
+ supports_system_message = True
supports_gpt_35_turbo = True
@classmethod
@@ -38,15 +41,14 @@ class FreeGpt(AsyncGeneratorProvider):
"pass": None,
"sign": generate_signature(timestamp, prompt)
}
- url = random.choice(domains)
- async with session.post(f"{url}/api/generate", json=data) as response:
+ domain = random.choice(domains)
+ async with session.post(f"{domain}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
chunk = chunk.decode()
if chunk == "当前地区当日额度已消耗完":
- raise RuntimeError("Rate limit reached")
+ raise RateLimitError("Rate limit reached")
yield chunk
-
def generate_signature(timestamp: int, message: str, secret: str = ""):
data = f"{timestamp}:{message}:{secret}"
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py
index 8b8fc5dc..9c2d1fb2 100644
--- a/g4f/Provider/GeminiProChat.py
+++ b/g4f/Provider/GeminiProChat.py
@@ -2,15 +2,18 @@ from __future__ import annotations
import time
from hashlib import sha256
-from aiohttp import ClientSession
+from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
-
+from ..errors import RateLimitError
+from ..requests import raise_for_status
+from ..requests.aiohttp import get_connector
class GeminiProChat(AsyncGeneratorProvider):
url = "https://gemini-chatbot-sigma.vercel.app"
working = True
+ supports_message_history = True
@classmethod
async def create_async_generator(
@@ -18,6 +21,7 @@ class GeminiProChat(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
+ connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
headers = {
@@ -34,7 +38,7 @@ class GeminiProChat(AsyncGeneratorProvider):
"Connection": "keep-alive",
"TE": "trailers",
}
- async with ClientSession(headers=headers) as session:
+ async with ClientSession(connector=get_connector(connector, proxy), headers=headers) as session:
timestamp = int(time.time() * 1e3)
data = {
"messages":[{
@@ -46,7 +50,10 @@ class GeminiProChat(AsyncGeneratorProvider):
"sign": generate_signature(timestamp, messages[-1]["content"]),
}
async with session.post(f"{cls.url}/api/generate", json=data, proxy=proxy) as response:
- response.raise_for_status()
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(f"Response {response.status}: Rate limit reached")
+ await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode()
diff --git a/g4f/Provider/GptTalkRu.py b/g4f/Provider/GptTalkRu.py
index 16d69f3c..e8c2ffa2 100644
--- a/g4f/Provider/GptTalkRu.py
+++ b/g4f/Provider/GptTalkRu.py
@@ -1,10 +1,13 @@
from __future__ import annotations
-from aiohttp import ClientSession
+from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
-
+from .helper import get_random_string, get_connector
+from ..requests import raise_for_status, get_args_from_browser, WebDriver
+from ..webdriver import has_seleniumwire
+from ..errors import MissingRequirementsError
class GptTalkRu(AsyncGeneratorProvider):
url = "https://gpttalk.ru"
@@ -17,33 +20,40 @@ class GptTalkRu(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
+ connector: BaseConnector = None,
+ webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
- headers = {
- "Accept": "application/json, text/plain, */*",
- "Accept-Language": "en-US",
- "Connection": "keep-alive",
- "Content-Type": "application/json",
- "Origin": "https://gpttalk.ru",
- "Referer": "https://gpttalk.ru/",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
- "sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- }
- async with ClientSession(headers=headers) as session:
+ if not has_seleniumwire:
+ raise MissingRequirementsError('Install "selenium-wire" package')
+ args = get_args_from_browser(f"{cls.url}", webdriver)
+ args["headers"]["accept"] = "application/json, text/plain, */*"
+ async with ClientSession(connector=get_connector(connector, proxy), **args) as session:
+ async with session.get("https://gpttalk.ru/getToken") as response:
+ await raise_for_status(response)
+ public_key = (await response.json())["response"]["key"]["publicKey"]
+ random_string = get_random_string(8)
data = {
"model": model,
"modelType": 1,
"prompt": messages,
"responseType": "stream",
+ "security": {
+ "randomMessage": random_string,
+ "shifrText": encrypt(public_key, random_string)
+ }
}
async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response:
- response.raise_for_status()
+ await raise_for_status(response)
async for chunk in response.content.iter_any():
- yield chunk.decode() \ No newline at end of file
+ yield chunk.decode()
+
+def encrypt(public_key: str, value: str) -> str:
+ from Crypto.Cipher import PKCS1_v1_5
+ from Crypto.PublicKey import RSA
+ import base64
+ rsa_key = RSA.importKey(public_key)
+ cipher = PKCS1_v1_5.new(rsa_key)
+ return base64.b64encode(cipher.encrypt(value.encode())).decode() \ No newline at end of file
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py
index 0e3ba13d..849bcdbe 100644
--- a/g4f/Provider/Koala.py
+++ b/g4f/Provider/Koala.py
@@ -1,17 +1,18 @@
from __future__ import annotations
import json
-from aiohttp import ClientSession
+from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from .helper import get_random_string, get_connector
+from ..requests import raise_for_status
class Koala(AsyncGeneratorProvider):
url = "https://koala.sh"
+ working = True
supports_gpt_35_turbo = True
supports_message_history = True
- working = True
@classmethod
async def create_async_generator(
@@ -19,35 +20,36 @@ class Koala(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
+ connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chat",
- "Content-Type": "application/json",
"Flag-Real-Time-Data": "false",
"Visitor-ID": get_random_string(20),
"Origin": cls.url,
"Alt-Used": "koala.sh",
- "Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
- "Pragma": "no-cache",
- "Cache-Control": "no-cache",
"TE": "trailers",
}
- async with ClientSession(headers=headers) as session:
+ async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
+ input = messages[-1]["content"]
+ system_messages = [message["content"] for message in messages if message["role"] == "system"]
+ if system_messages:
+ input += " ".join(system_messages)
data = {
- "input": messages[-1]["content"],
+ "input": input,
"inputHistory": [
message["content"]
- for message in messages
+ for message in messages[:-1]
if message["role"] == "user"
],
"outputHistory": [
@@ -58,7 +60,7 @@ class Koala(AsyncGeneratorProvider):
"model": model,
}
async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
- response.raise_for_status()
+ await raise_for_status(response)
async for chunk in response.content:
if chunk.startswith(b"data: "):
yield json.loads(chunk[6:]) \ No newline at end of file
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 92154d7d..b5e7cbe7 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -7,7 +7,7 @@ from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
-from ..errors import RateLimitError
+from ..requests import raise_for_status
models = {
"gpt-4": {
@@ -76,6 +76,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site"
working = True
supports_message_history = True
+ supports_system_message = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-3.5-turbo"
@@ -116,19 +117,17 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
data={"token": "abcdefghijklmnopqrst"},
verify_ssl=False
) as response:
- response.raise_for_status()
+ await raise_for_status(response)
async with session.post(
"https://liaobots.work/api/user",
proxy=proxy,
json={"authcode": ""},
verify_ssl=False
) as response:
- if response.status == 401:
- raise RateLimitError("Rate limit reached. Use a other provider or ip address")
- response.raise_for_status()
+ await raise_for_status(response)
cls._auth_code = (await response.json(content_type=None))["authCode"]
cls._cookie_jar = session.cookie_jar
-
+
data = {
"conversationId": str(uuid.uuid4()),
"model": models[cls.get_model(model)],
@@ -143,7 +142,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
headers={"x-auth-code": cls._auth_code},
verify_ssl=False
) as response:
- response.raise_for_status()
+ await raise_for_status(response)
async for chunk in response.content.iter_any():
if b"<html coupert-item=" in chunk:
raise RuntimeError("Invalid session")
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index de2d1b71..6c80efee 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -14,7 +14,7 @@ WS_URL = "wss://labs-api.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://labs.perplexity.ai"
working = True
- default_model = "sonar-medium-online"
+ default_model = "mixtral-8x7b-instruct"
models = [
"sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "mistral-7b-instruct",
"codellama-70b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct",
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 2f7dc436..5a1e9f0e 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -4,12 +4,13 @@ import json
from ..typing import CreateResult, Messages
from .base_provider import AbstractProvider, format_prompt
-from ..requests import Session, get_session_from_browser
+from ..requests import Session, get_session_from_browser, raise_for_status
class Pi(AbstractProvider):
url = "https://pi.ai/talk"
working = True
supports_stream = True
+ _session = None
@classmethod
def create_completion(
@@ -17,20 +18,19 @@ class Pi(AbstractProvider):
model: str,
messages: Messages,
stream: bool,
- session: Session = None,
proxy: str = None,
timeout: int = 180,
conversation_id: str = None,
**kwargs
) -> CreateResult:
- if not session:
- session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout)
+ if cls._session is None:
+ cls._session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout)
if not conversation_id:
- conversation_id = cls.start_conversation(session)
+ conversation_id = cls.start_conversation(cls._session)
prompt = format_prompt(messages)
else:
prompt = messages[-1]["content"]
- answer = cls.ask(session, prompt, conversation_id)
+ answer = cls.ask(cls._session, prompt, conversation_id)
for line in answer:
if "text" in line:
yield line["text"]
@@ -41,8 +41,7 @@ class Pi(AbstractProvider):
'accept': 'application/json',
'x-api-version': '3'
})
- if 'Just a moment' in response.text:
- raise RuntimeError('Error: Cloudflare detected')
+ raise_for_status(response)
return response.json()['conversations'][0]['sid']
def get_chat_history(session: Session, conversation_id: str):
@@ -50,8 +49,7 @@ class Pi(AbstractProvider):
'conversation': conversation_id,
}
response = session.get('https://pi.ai/api/chat/history', params=params)
- if 'Just a moment' in response.text:
- raise RuntimeError('Error: Cloudflare detected')
+ raise_for_status(response)
return response.json()
def ask(session: Session, prompt: str, conversation_id: str):
@@ -61,9 +59,8 @@ class Pi(AbstractProvider):
'mode': 'BASE',
}
response = session.post('https://pi.ai/api/chat', json=json_data, stream=True)
+ raise_for_status(response)
for line in response.iter_lines():
- if b'Just a moment' in line:
- raise RuntimeError('Error: Cloudflare detected')
if line.startswith(b'data: {"text":'):
yield json.loads(line.split(b'data: ')[1])
elif line.startswith(b'data: {"title":'):
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index e10aa232..f8faeeaf 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -8,17 +8,18 @@ try:
except ImportError:
has_requirements = False
-from ..typing import Messages, TypedDict, CreateResult, Any
+from ..typing import Messages, CreateResult
from .base_provider import AbstractProvider
-from ..errors import MissingRequirementsError
+from ..requests import raise_for_status
+from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError
class Vercel(AbstractProvider):
url = 'https://chat.vercel.ai'
working = True
- supports_message_history = True
+ supports_message_history = True
+ supports_system_message = True
supports_gpt_35_turbo = True
supports_stream = True
- supports_gpt_4 = False
@staticmethod
def create_completion(
@@ -26,6 +27,7 @@ class Vercel(AbstractProvider):
messages: Messages,
stream: bool,
proxy: str = None,
+ max_retries: int = 6,
**kwargs
) -> CreateResult:
if not has_requirements:
@@ -54,19 +56,17 @@ class Vercel(AbstractProvider):
'messages': messages,
'id' : f'{os.urandom(3).hex()}a',
}
-
- max_retries = kwargs.get('max_retries', 6)
+ response = None
for _ in range(max_retries):
response = requests.post('https://chat.vercel.ai/api/chat',
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
- try:
- response.raise_for_status()
- except:
+ if not response.ok:
continue
for token in response.iter_content(chunk_size=None):
yield token.decode()
break
-
+ raise_for_status(response)
+
def get_anti_bot_token() -> str:
headers = {
'authority': 'sdk.vercel.ai',
@@ -92,7 +92,7 @@ def get_anti_bot_token() -> str:
js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
-
+
sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"]
raw_token = json.dumps({'r': sec_list, 't': raw_data['t']},
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 1fdaf06d..85b60452 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -4,13 +4,14 @@ import re
import json
import base64
import uuid
-from aiohttp import ClientSession, FormData, BaseConnector
+from asyncio import get_running_loop
+from aiohttp import ClientSession, FormData, BaseConnector, CookieJar
from ..typing import AsyncResult, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..providers.helper import get_connector, format_prompt
+from .helper import format_prompt, get_connector
from ..image import to_bytes, ImageResponse
-from ..requests.defaults import DEFAULT_HEADERS
+from ..requests import WebDriver, raise_for_status, get_args_from_browser
class You(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://you.com"
@@ -32,6 +33,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = {
"claude-v2": "claude-2"
}
+ _args: dict = None
+ _cookie_jar: CookieJar = None
_cookies = None
_cookies_used = 0
@@ -43,25 +46,34 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image: ImageType = None,
image_name: str = None,
connector: BaseConnector = None,
+ webdriver: WebDriver = None,
proxy: str = None,
chat_mode: str = "default",
**kwargs,
) -> AsyncResult:
+ if cls._args is None:
+ cls._args = get_args_from_browser(cls.url, webdriver, proxy)
+ cls._cookie_jar = CookieJar(loop=get_running_loop())
+ else:
+ if "cookies" in cls._args:
+ del cls._args["cookies"]
+ cls._cookie_jar._loop = get_running_loop()
+ if image is not None:
+ chat_mode = "agent"
+ elif not model or model == cls.default_model:
+ chat_mode = "default"
+ elif model.startswith("dall-e"):
+ chat_mode = "create"
+ else:
+ chat_mode = "custom"
+ model = cls.get_model(model)
async with ClientSession(
connector=get_connector(connector, proxy),
- headers=DEFAULT_HEADERS
- ) as client:
- if image is not None:
- chat_mode = "agent"
- elif not model or model == cls.default_model:
- chat_mode = "default"
- elif model.startswith("dall-e"):
- chat_mode = "create"
- else:
- chat_mode = "custom"
- model = cls.get_model(model)
- cookies = await cls.get_cookies(client) if chat_mode != "default" else None
- upload = json.dumps([await cls.upload_file(client, cookies, to_bytes(image), image_name)]) if image else ""
+ cookie_jar=cls._cookie_jar,
+ **cls._args
+ ) as session:
+ cookies = await cls.get_cookies(session) if chat_mode != "default" else None
+ upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else ""
#questions = [message["content"] for message in messages if message["role"] == "user"]
# chat = [
# {"question": questions[idx-1], "answer": message["content"]}
@@ -70,8 +82,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
# and idx < len(questions)
# ]
headers = {
- "Accept": "text/event-stream",
- "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
+ "accept": "text/event-stream",
+ "referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
}
data = {
"userFiles": upload,
@@ -86,14 +98,14 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
}
if chat_mode == "custom":
params["selectedAIModel"] = model.replace("-", "_")
- async with (client.post if chat_mode == "default" else client.get)(
+ async with (session.post if chat_mode == "default" else session.get)(
f"{cls.url}/api/streamingSearch",
data=data,
params=params,
headers=headers,
cookies=cookies
) as response:
- response.raise_for_status()
+ await raise_for_status(response)
async for line in response.content:
if line.startswith(b'event: '):
event = line[7:-1].decode()
@@ -115,7 +127,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
f"{cls.url}/api/get_nonce",
cookies=cookies,
) as response:
- response.raise_for_status()
+ await raise_for_status(response)
upload_nonce = await response.text()
data = FormData()
data.add_field('file', file, filename=filename)
@@ -127,8 +139,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
},
cookies=cookies
) as response:
- if not response.ok:
- raise RuntimeError(f"Response: {await response.text()}")
+ await raise_for_status(response)
result = await response.json()
result["user_filename"] = filename
result["size"] = len(file)
@@ -177,8 +188,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
"session_duration_minutes": 129600
}
) as response:
- if not response.ok:
- raise RuntimeError(f"Response: {await response.text()}")
+ await raise_for_status(response)
session = (await response.json())["data"]
return {
"stytch_session": session["session_token"],
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 462fc249..8db3c0d4 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -6,56 +6,36 @@ from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
from .deprecated import *
+from .not_working import *
from .selenium import *
from .needs_auth import *
from .unfinished import *
-from .AiAsk import AiAsk
-from .AiChatOnline import AiChatOnline
-from .AItianhu import AItianhu
-from .Aura import Aura
-from .Bestim import Bestim
-from .Bing import Bing
+from .Aura import Aura
+from .Bing import Bing
from .BingCreateImages import BingCreateImages
-from .ChatAnywhere import ChatAnywhere
-from .ChatBase import ChatBase
-from .ChatForAi import ChatForAi
-from .Chatgpt4Online import Chatgpt4Online
-from .ChatgptAi import ChatgptAi
-from .ChatgptDemo import ChatgptDemo
-from .ChatgptDemoAi import ChatgptDemoAi
-from .ChatgptFree import ChatgptFree
-from .ChatgptLogin import ChatgptLogin
-from .ChatgptNext import ChatgptNext
-from .ChatgptX import ChatgptX
-from .Chatxyz import Chatxyz
-from .DeepInfra import DeepInfra
-from .FakeGpt import FakeGpt
-from .FlowGpt import FlowGpt
-from .FreeChatgpt import FreeChatgpt
-from .FreeGpt import FreeGpt
-from .GeekGpt import GeekGpt
-from .GeminiPro import GeminiPro
-from .GeminiProChat import GeminiProChat
-from .Gpt6 import Gpt6
-from .GPTalk import GPTalk
-from .GptChatly import GptChatly
-from .GptForLove import GptForLove
-from .GptGo import GptGo
-from .GptGod import GptGod
-from .GptTalkRu import GptTalkRu
-from .Hashnode import Hashnode
-from .HuggingChat import HuggingChat
-from .HuggingFace import HuggingFace
-from .Koala import Koala
-from .Liaobots import Liaobots
-from .Llama2 import Llama2
-from .OnlineGpt import OnlineGpt
-from .PerplexityLabs import PerplexityLabs
-from .Pi import Pi
-from .Vercel import Vercel
-from .Ylokh import Ylokh
-from .You import You
+from .ChatForAi import ChatForAi
+from .Chatgpt4Online import Chatgpt4Online
+from .ChatgptAi import ChatgptAi
+from .ChatgptFree import ChatgptFree
+from .ChatgptNext import ChatgptNext
+from .ChatgptX import ChatgptX
+from .DeepInfra import DeepInfra
+from .FlowGpt import FlowGpt
+from .FreeChatgpt import FreeChatgpt
+from .FreeGpt import FreeGpt
+from .GeminiPro import GeminiPro
+from .GeminiProChat import GeminiProChat
+from .GptTalkRu import GptTalkRu
+from .HuggingChat import HuggingChat
+from .HuggingFace import HuggingFace
+from .Koala import Koala
+from .Liaobots import Liaobots
+from .Llama2 import Llama2
+from .PerplexityLabs import PerplexityLabs
+from .Pi import Pi
+from .Vercel import Vercel
+from .You import You
import sys
diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py
index 4af9e5fe..03f17ee7 100644
--- a/g4f/Provider/bing/conversation.py
+++ b/g4f/Provider/bing/conversation.py
@@ -2,6 +2,8 @@ from __future__ import annotations
import uuid
from aiohttp import ClientSession
+from ...errors import ResponseStatusError
+from ...requests import raise_for_status
class Conversation:
"""
@@ -32,8 +34,11 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv
Conversation: An instance representing the created conversation.
"""
url = 'https://www.bing.com/search?toncp=0&FORM=hpcodx&q=Bing+AI&showconv=1&cc=en'
- async with session.get(url, proxy=proxy) as response:
- response.raise_for_status()
+ headers = {
+ "cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar)
+ }
+ async with session.get(url, headers=headers) as response:
+ await raise_for_status(response)
headers = {
"accept": "application/json",
"sec-fetch-dest": "empty",
@@ -41,25 +46,21 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv
"sec-fetch-site": "same-origin",
"x-ms-client-request-id": str(uuid.uuid4()),
"x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.12.3 OS/Windows",
- "referer": url,
- "Cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar)
+ "referer": "https://www.bing.com/search?toncp=0&FORM=hpcodx&q=Bing+AI&showconv=1&cc=en",
+ "cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar)
}
- for k, v in headers.items():
- session.headers[k] = v
- url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1'
+ url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1634.0-service-contracts"
async with session.get(url, headers=headers, proxy=proxy) as response:
- try:
- data = await response.json()
- except:
- raise RuntimeError(f"Response: {await response.text()}")
-
- conversationId = data.get('conversationId')
- clientId = data.get('clientId')
- conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
-
- if not conversationId or not clientId or not conversationSignature:
- raise Exception('Failed to create conversation.')
- return Conversation(conversationId, clientId, conversationSignature)
+ if response.status == 404:
+ raise ResponseStatusError(f"Response {response.status}: Can't create a new chat")
+ await raise_for_status(response)
+ data = await response.json()
+ conversationId = data.get('conversationId')
+ clientId = data.get('clientId')
+ conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
+ if not conversationId or not clientId or not conversationSignature:
+ raise Exception('Failed to create conversation.')
+ return Conversation(conversationId, clientId, conversationSignature)
async def list_conversations(session: ClientSession) -> list:
"""
diff --git a/g4f/Provider/AiAsk.py b/g4f/Provider/deprecated/AiAsk.py
index 094ef076..6ea5f3e0 100644
--- a/g4f/Provider/AiAsk.py
+++ b/g4f/Provider/deprecated/AiAsk.py
@@ -1,8 +1,8 @@
from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class AiAsk(AsyncGeneratorProvider):
url = "https://e.aiask.me"
diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/deprecated/AiChatOnline.py
index cc3b5b8e..e690f28e 100644
--- a/g4f/Provider/AiChatOnline.py
+++ b/g4f/Provider/deprecated/AiChatOnline.py
@@ -3,9 +3,9 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
class AiChatOnline(AsyncGeneratorProvider):
url = "https://aichatonline.org"
diff --git a/g4f/Provider/ChatAnywhere.py b/g4f/Provider/deprecated/ChatAnywhere.py
index 5f5f15de..d035eaf0 100644
--- a/g4f/Provider/ChatAnywhere.py
+++ b/g4f/Provider/deprecated/ChatAnywhere.py
@@ -2,8 +2,8 @@ from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class ChatAnywhere(AsyncGeneratorProvider):
diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/deprecated/FakeGpt.py
index ee14abf4..99b6bb1a 100644
--- a/g4f/Provider/FakeGpt.py
+++ b/g4f/Provider/deprecated/FakeGpt.py
@@ -3,9 +3,9 @@ from __future__ import annotations
import uuid, time, random, json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt, get_random_string
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt, get_random_string
class FakeGpt(AsyncGeneratorProvider):
diff --git a/g4f/Provider/GPTalk.py b/g4f/Provider/deprecated/GPTalk.py
index 5749ff2e..5b36d37b 100644
--- a/g4f/Provider/GPTalk.py
+++ b/g4f/Provider/deprecated/GPTalk.py
@@ -3,14 +3,14 @@ from __future__ import annotations
import secrets, time, json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class GPTalk(AsyncGeneratorProvider):
url = "https://gptalk.net"
- working = True
+ working = False
supports_gpt_35_turbo = True
_auth = None
used_times = 0
diff --git a/g4f/Provider/GeekGpt.py b/g4f/Provider/deprecated/GeekGpt.py
index f1dea9b1..7a460083 100644
--- a/g4f/Provider/GeekGpt.py
+++ b/g4f/Provider/deprecated/GeekGpt.py
@@ -1,8 +1,8 @@
from __future__ import annotations
import requests, json
-from .base_provider import AbstractProvider
-from ..typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ...typing import CreateResult, Messages
from json import dumps
diff --git a/g4f/Provider/Hashnode.py b/g4f/Provider/deprecated/Hashnode.py
index 7a0c2903..c2c0ffb7 100644
--- a/g4f/Provider/Hashnode.py
+++ b/g4f/Provider/deprecated/Hashnode.py
@@ -2,9 +2,9 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_hex
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_hex
class SearchTypes():
quick = "quick"
diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/deprecated/Ylokh.py
index 11fe497f..dbff4602 100644
--- a/g4f/Provider/Ylokh.py
+++ b/g4f/Provider/deprecated/Ylokh.py
@@ -2,9 +2,9 @@ from __future__ import annotations
import json
-from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider
-from ..typing import AsyncResult, Messages
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
class Ylokh(AsyncGeneratorProvider):
url = "https://chat.ylokh.xyz"
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index 8ec5f2fc..f6b4a1d9 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -23,4 +23,12 @@ from .Opchatgpts import Opchatgpts
from .Yqcloud import Yqcloud
from .Aichat import Aichat
from .Berlin import Berlin
-from .Phind import Phind \ No newline at end of file
+from .Phind import Phind
+from .AiAsk import AiAsk
+from .AiChatOnline import AiChatOnline
+from .ChatAnywhere import ChatAnywhere
+from .FakeGpt import FakeGpt
+from .GeekGpt import GeekGpt
+from .GPTalk import GPTalk
+from .Hashnode import Hashnode
+from .Ylokh import Ylokh \ No newline at end of file
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index da5b99f6..338e0966 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -1,2 +1,3 @@
from ..providers.helper import *
-from ..cookies import get_cookies \ No newline at end of file
+from ..cookies import get_cookies
+from ..requests.aiohttp import get_connector \ No newline at end of file
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/not_working/AItianhu.py
index 34187694..501b334e 100644
--- a/g4f/Provider/AItianhu.py
+++ b/g4f/Provider/not_working/AItianhu.py
@@ -2,9 +2,9 @@ from __future__ import annotations
import json
-from ..typing import AsyncResult, Messages
-from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class AItianhu(AsyncGeneratorProvider):
diff --git a/g4f/Provider/Bestim.py b/g4f/Provider/not_working/Bestim.py
index 323bd713..94a4d32b 100644
--- a/g4f/Provider/Bestim.py
+++ b/g4f/Provider/not_working/Bestim.py
@@ -1,56 +1,56 @@
-from __future__ import annotations
-
-from ..typing import Messages
-from .base_provider import BaseProvider, CreateResult
-from ..requests import get_session_from_browser
-from uuid import uuid4
-
-class Bestim(BaseProvider):
- url = "https://chatgpt.bestim.org"
- supports_gpt_35_turbo = True
- supports_message_history = True
- working = False
- supports_stream = True
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- **kwargs
- ) -> CreateResult:
- session = get_session_from_browser(cls.url, proxy=proxy)
- headers = {
- 'Accept': 'application/json, text/event-stream',
- }
- data = {
- "messagesHistory": [{
- "id": str(uuid4()),
- "content": m["content"],
- "from": "you" if m["role"] == "user" else "bot"
- } for m in messages],
- "type": "chat",
- }
- response = session.post(
- url="https://chatgpt.bestim.org/chat/send2/",
- json=data,
- headers=headers,
- stream=True
- )
- response.raise_for_status()
- for line in response.iter_lines():
- if not line.startswith(b"event: trylimit"):
- yield line.decode().removeprefix("data: ")
-
-
-
-
-
-
-
-
-
-
-
+from __future__ import annotations
+
+from ...typing import Messages
+from ..base_provider import BaseProvider, CreateResult
+from ...requests import get_session_from_browser
+from uuid import uuid4
+
+class Bestim(BaseProvider):
+ url = "https://chatgpt.bestim.org"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ **kwargs
+ ) -> CreateResult:
+ session = get_session_from_browser(cls.url, proxy=proxy)
+ headers = {
+ 'Accept': 'application/json, text/event-stream',
+ }
+ data = {
+ "messagesHistory": [{
+ "id": str(uuid4()),
+ "content": m["content"],
+ "from": "you" if m["role"] == "user" else "bot"
+ } for m in messages],
+ "type": "chat",
+ }
+ response = session.post(
+ url="https://chatgpt.bestim.org/chat/send2/",
+ json=data,
+ headers=headers,
+ stream=True
+ )
+ response.raise_for_status()
+ for line in response.iter_lines():
+ if not line.startswith(b"event: trylimit"):
+ yield line.decode().removeprefix("data: ")
+
+
+
+
+
+
+
+
+
+
+
diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/not_working/ChatBase.py
index 996ca39a..ef1c8f99 100644
--- a/g4f/Provider/ChatBase.py
+++ b/g4f/Provider/not_working/ChatBase.py
@@ -2,15 +2,15 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class ChatBase(AsyncGeneratorProvider):
url = "https://www.chatbase.co"
+ working = False
supports_gpt_35_turbo = True
supports_message_history = True
- working = True
jailbreak = True
list_incorrect_responses = ["support@chatbase",
"about Chatbase"]
diff --git a/g4f/Provider/ChatgptDemo.py b/g4f/Provider/not_working/ChatgptDemo.py
index 666b5753..593a2d29 100644
--- a/g4f/Provider/ChatgptDemo.py
+++ b/g4f/Provider/not_working/ChatgptDemo.py
@@ -1,16 +1,17 @@
from __future__ import annotations
-import time, json, re
+import time, json, re, asyncio
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ...errors import RateLimitError
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class ChatgptDemo(AsyncGeneratorProvider):
- url = "https://chat.chatgptdemo.net"
- supports_gpt_35_turbo = True
+ url = "https://chatgptdemo.info/chat"
working = False
+ supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
@@ -21,10 +22,10 @@ class ChatgptDemo(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
- "authority": "chat.chatgptdemo.net",
- "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
- "origin": "https://chat.chatgptdemo.net",
- "referer": "https://chat.chatgptdemo.net/",
+ "authority": "chatgptdemo.info",
+ "accept-language": "en-US",
+ "origin": "https://chatgptdemo.info",
+ "referer": "https://chatgptdemo.info/chat/",
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
@@ -36,28 +37,29 @@ class ChatgptDemo(AsyncGeneratorProvider):
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
- response = await response.text()
-
- result = re.search(
- r'<div id="USERID" style="display: none">(.*?)<\/div>',
- response,
- )
-
- if result:
- user_id = result.group(1)
- else:
- raise RuntimeError("No user id found")
- async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
+ text = await response.text()
+ result = re.search(
+ r'<div id="USERID" style="display: none">(.*?)<\/div>',
+ text,
+ )
+ if result:
+ user_id = result.group(1)
+ else:
+ raise RuntimeError("No user id found")
+ async with session.post(f"https://chatgptdemo.info/chat/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
response.raise_for_status()
chat_id = (await response.json())["id_"]
if not chat_id:
raise RuntimeError("Could not create new chat")
+ await asyncio.sleep(10)
data = {
"question": format_prompt(messages),
"chat_id": chat_id,
- "timestamp": int(time.time()*1000),
+ "timestamp": int((time.time())*1e3),
}
- async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response:
+ async with session.post(f"https://chatgptdemo.info/chat/chat_api_stream", json=data, proxy=proxy) as response:
+ if response.status == 429:
+ raise RateLimitError("Rate limit reached")
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
diff --git a/g4f/Provider/ChatgptDemoAi.py b/g4f/Provider/not_working/ChatgptDemoAi.py
index a8c98b65..6cdd0c7a 100644
--- a/g4f/Provider/ChatgptDemoAi.py
+++ b/g4f/Provider/not_working/ChatgptDemoAi.py
@@ -3,9 +3,9 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
class ChatgptDemoAi(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.ai"
@@ -49,6 +49,7 @@ class ChatgptDemoAi(AsyncGeneratorProvider):
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
+ response.raise_for_status()
if chunk.startswith(b"data: "):
data = json.loads(chunk[6:])
if data["type"] == "live":
diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/not_working/ChatgptLogin.py
index 037e0a6e..6e9d57c4 100644
--- a/g4f/Provider/ChatgptLogin.py
+++ b/g4f/Provider/not_working/ChatgptLogin.py
@@ -5,15 +5,15 @@ import time
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class ChatgptLogin(AsyncGeneratorProvider):
url = "https://chatgptlogin.ai"
- supports_gpt_35_turbo = True
working = False
+ supports_gpt_35_turbo = True
_user_id = None
@classmethod
diff --git a/g4f/Provider/Chatxyz.py b/g4f/Provider/not_working/Chatxyz.py
index dd1216aa..a1b3638e 100644
--- a/g4f/Provider/Chatxyz.py
+++ b/g4f/Provider/not_working/Chatxyz.py
@@ -3,8 +3,8 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class Chatxyz(AsyncGeneratorProvider):
url = "https://chat.3211000.xyz"
diff --git a/g4f/Provider/Gpt6.py b/g4f/Provider/not_working/Gpt6.py
index b8a294e2..0c1bdcc5 100644
--- a/g4f/Provider/Gpt6.py
+++ b/g4f/Provider/not_working/Gpt6.py
@@ -3,14 +3,12 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
-
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class Gpt6(AsyncGeneratorProvider):
url = "https://gpt6.ai"
- working = True
+ working = False
supports_gpt_35_turbo = True
@classmethod
@@ -45,6 +43,7 @@ class Gpt6(AsyncGeneratorProvider):
async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
+ print(line)
if line.startswith(b"data: [DONE]"):
break
elif line.startswith(b"data: "):
diff --git a/g4f/Provider/GptChatly.py b/g4f/Provider/not_working/GptChatly.py
index 9fb739a8..a1e3dd74 100644
--- a/g4f/Provider/GptChatly.py
+++ b/g4f/Provider/not_working/GptChatly.py
@@ -1,13 +1,13 @@
from __future__ import annotations
-from ..requests import Session, get_session_from_browser
-from ..typing import Messages
-from .base_provider import AsyncProvider
+from ...requests import Session, get_session_from_browser
+from ...typing import Messages
+from ..base_provider import AsyncProvider
class GptChatly(AsyncProvider):
url = "https://gptchatly.com"
- working = True
+ working = False
supports_message_history = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/GptForLove.py b/g4f/Provider/not_working/GptForLove.py
index cc82da21..4c578227 100644
--- a/g4f/Provider/GptForLove.py
+++ b/g4f/Provider/not_working/GptForLove.py
@@ -9,14 +9,14 @@ try:
except ImportError:
has_requirements = False
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
-from ..errors import MissingRequirementsError
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+from ...errors import MissingRequirementsError
class GptForLove(AsyncGeneratorProvider):
url = "https://ai18.gptforlove.com"
- working = True
+ working = False
supports_gpt_35_turbo = True
@classmethod
diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/not_working/GptGo.py
index 538bb7b6..363aabea 100644
--- a/g4f/Provider/GptGo.py
+++ b/g4f/Provider/not_working/GptGo.py
@@ -4,14 +4,14 @@ from aiohttp import ClientSession
import json
import base64
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, format_prompt
class GptGo(AsyncGeneratorProvider):
url = "https://gptgo.ai"
+ working = False
supports_gpt_35_turbo = True
- working = True
@classmethod
async def create_async_generator(
@@ -44,6 +44,8 @@ class GptGo(AsyncGeneratorProvider):
) as response:
response.raise_for_status()
token = await response.text();
+ if token == "error token":
+ raise RuntimeError(f"Response: {token}")
token = base64.b64decode(token[10:-20]).decode()
async with session.get(
@@ -57,6 +59,8 @@ class GptGo(AsyncGeneratorProvider):
break
if line.startswith(b"data: "):
line = json.loads(line[6:])
+ if "choices" not in line:
+ raise RuntimeError(f"Response: {line}")
content = line["choices"][0]["delta"].get("content")
if content and content != "\n#GPTGO ":
yield content
diff --git a/g4f/Provider/GptGod.py b/g4f/Provider/not_working/GptGod.py
index 08d9269e..46b40645 100644
--- a/g4f/Provider/GptGod.py
+++ b/g4f/Provider/not_working/GptGod.py
@@ -4,14 +4,14 @@ import secrets
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class GptGod(AsyncGeneratorProvider):
url = "https://gptgod.site"
- supports_gpt_35_turbo = True
working = False
+ supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/OnlineGpt.py b/g4f/Provider/not_working/OnlineGpt.py
index 9f0d11c4..f4f3a846 100644
--- a/g4f/Provider/OnlineGpt.py
+++ b/g4f/Provider/not_working/OnlineGpt.py
@@ -3,14 +3,13 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
-
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
class OnlineGpt(AsyncGeneratorProvider):
url = "https://onlinegpt.org"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_message_history = False
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
new file mode 100644
index 00000000..4778c968
--- /dev/null
+++ b/g4f/Provider/not_working/__init__.py
@@ -0,0 +1,14 @@
+
+from .AItianhu import AItianhu
+from .Bestim import Bestim
+from .ChatBase import ChatBase
+from .ChatgptDemo import ChatgptDemo
+from .ChatgptDemoAi import ChatgptDemoAi
+from .ChatgptLogin import ChatgptLogin
+from .Chatxyz import Chatxyz
+from .Gpt6 import Gpt6
+from .GptChatly import GptChatly
+from .GptForLove import GptForLove
+from .GptGo import GptGo
+from .GptGod import GptGod
+from .OnlineGpt import OnlineGpt \ No newline at end of file
diff --git a/g4f/client.py b/g4f/client.py
index c4319872..d7ceb009 100644
--- a/g4f/client.py
+++ b/g4f/client.py
@@ -133,12 +133,18 @@ class Completions():
max_tokens: int = None,
stop: Union[list[str], str] = None,
api_key: str = None,
+ ignored : list[str] = None,
+ ignore_working: bool = False,
+ ignore_stream: bool = False,
**kwargs
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
model, provider = get_model_and_provider(
model,
self.provider if provider is None else provider,
stream,
+ ignored,
+ ignore_working,
+ ignore_stream,
**kwargs
)
stop = [stop] if isinstance(stop, str) else stop
diff --git a/g4f/errors.py b/g4f/errors.py
index 00cf8cdb..0cb12884 100644
--- a/g4f/errors.py
+++ b/g4f/errors.py
@@ -38,4 +38,7 @@ class NoImageResponseError(Exception):
...
class RateLimitError(Exception):
+ ...
+
+class ResponseStatusError(Exception):
... \ No newline at end of file
diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css
index 6a54db51..17f3e4b3 100644
--- a/g4f/gui/client/css/style.css
+++ b/g4f/gui/client/css/style.css
@@ -1013,6 +1013,7 @@ a:-webkit-any-link {
font-size: 15px;
width: 100%;
color: var(--colour-3);
+ min-height: 59px;
height: 59px;
outline: none;
padding: var(--inner-gap) var(--section-gap);
diff --git a/g4f/models.py b/g4f/models.py
index b3e0d0ee..ae2ef54b 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -6,13 +6,11 @@ from .Provider import RetryProvider, ProviderType
from .Provider import (
Chatgpt4Online,
PerplexityLabs,
- ChatgptDemoAi,
GeminiProChat,
ChatgptNext,
HuggingChat,
HuggingFace,
ChatgptDemo,
- FreeChatgpt,
GptForLove,
ChatgptAi,
DeepInfra,
@@ -66,7 +64,6 @@ gpt_35_long = Model(
best_provider = RetryProvider([
FreeGpt, You,
Chatgpt4Online,
- ChatgptDemoAi,
ChatgptNext,
ChatgptDemo,
Gpt6,
@@ -182,7 +179,7 @@ gemini = bard = palm = Model(
claude_v2 = Model(
name = 'claude-v2',
base_provider = 'anthropic',
- best_provider = RetryProvider([FreeChatgpt, Vercel])
+ best_provider = RetryProvider([Vercel])
)
claude_3_opus = Model(
@@ -236,7 +233,7 @@ gpt_4_32k_0613 = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'google',
- best_provider = RetryProvider([FreeChatgpt, GeminiProChat, You])
+ best_provider = RetryProvider([GeminiProChat, You])
)
pi = Model(
diff --git a/g4f/providers/helper.py b/g4f/providers/helper.py
index c027216b..df6767a4 100644
--- a/g4f/providers/helper.py
+++ b/g4f/providers/helper.py
@@ -3,10 +3,8 @@ from __future__ import annotations
import random
import secrets
import string
-from aiohttp import BaseConnector
-from ..typing import Messages, Optional
-from ..errors import MissingRequirementsError
+from ..typing import Messages
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
"""
@@ -49,16 +47,4 @@ def get_random_hex() -> str:
Returns:
str: A random hexadecimal string of 32 characters (16 bytes).
"""
- return secrets.token_hex(16).zfill(32)
-
-def get_connector(connector: BaseConnector = None, proxy: str = None, rdns: bool = False) -> Optional[BaseConnector]:
- if proxy and not connector:
- try:
- from aiohttp_socks import ProxyConnector
- if proxy.startswith("socks5h://"):
- proxy = proxy.replace("socks5h://", "socks5://")
- rdns = True
- connector = ProxyConnector.from_url(proxy, rdns=rdns)
- except ImportError:
- raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
- return connector \ No newline at end of file
+ return secrets.token_hex(16).zfill(32) \ No newline at end of file
diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py
index 83176557..d4ef9cec 100644
--- a/g4f/requests/__init__.py
+++ b/g4f/requests/__init__.py
@@ -1,18 +1,22 @@
from __future__ import annotations
from urllib.parse import urlparse
+from typing import Union
+from aiohttp import ClientResponse
+from requests import Response as RequestsResponse
try:
- from curl_cffi.requests import Session
+ from curl_cffi.requests import Session, Response
from .curl_cffi import StreamResponse, StreamSession
has_curl_cffi = True
except ImportError:
- from typing import Type as Session
+ from typing import Type as Session, Type as Response
from .aiohttp import StreamResponse, StreamSession
has_curl_cffi = False
-from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
-from ..errors import MissingRequirementsError
+from ..webdriver import WebDriver, WebDriverSession
+from ..webdriver import user_config_dir, bypass_cloudflare, get_driver_cookies
+from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError
from .defaults import DEFAULT_HEADERS
def get_args_from_browser(
@@ -20,7 +24,8 @@ def get_args_from_browser(
webdriver: WebDriver = None,
proxy: str = None,
timeout: int = 120,
- do_bypass_cloudflare: bool = True
+ do_bypass_cloudflare: bool = True,
+ virtual_display: bool = False
) -> dict:
"""
Create a Session object using a WebDriver to handle cookies and headers.
@@ -34,21 +39,37 @@ def get_args_from_browser(
Returns:
Session: A Session object configured with cookies and headers from the WebDriver.
"""
- with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=False) as driver:
+ user_data_dir = "" #user_config_dir(f"g4f-{urlparse(url).hostname}")
+ with WebDriverSession(webdriver, user_data_dir, proxy=proxy, virtual_display=virtual_display) as driver:
if do_bypass_cloudflare:
bypass_cloudflare(driver, url, timeout)
- cookies = get_driver_cookies(driver)
user_agent = driver.execute_script("return navigator.userAgent")
- parse = urlparse(url)
+ headers = {
+ **DEFAULT_HEADERS,
+ 'referer': url,
+ 'user-agent': user_agent,
+ }
+ if hasattr(driver, "requests"):
+ for request in driver.requests:
+ if request.url.startswith(url):
+ for key, value in request.headers.items():
+ if key in (
+ "accept-encoding",
+ "accept-language",
+ "user-agent",
+ "sec-ch-ua",
+ "sec-ch-ua-platform",
+ "sec-ch-ua-arch",
+ "sec-ch-ua-full-version",
+ "sec-ch-ua-platform-version",
+ "sec-ch-ua-bitness"
+ ):
+ headers[key] = value
+ break
+ cookies = get_driver_cookies(driver)
return {
'cookies': cookies,
- 'headers': {
- **DEFAULT_HEADERS,
- 'Authority': parse.netloc,
- 'Origin': f'{parse.scheme}://{parse.netloc}',
- 'Referer': url,
- 'User-Agent': user_agent,
- },
+ 'headers': headers,
}
def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> Session:
@@ -59,5 +80,25 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
**args,
proxies={"https": proxy, "http": proxy},
timeout=timeout,
- impersonate="chrome110"
- ) \ No newline at end of file
+ impersonate="chrome"
+ )
+
+async def raise_for_status_async(response: Union[StreamResponse, ClientResponse]):
+ if response.status in (429, 402):
+ raise RateLimitError(f"Response {response.status}: Rate limit reached")
+ text = await response.text() if not response.ok else None
+ if response.status == 403 and "<title>Just a moment...</title>" in text:
+ raise ResponseStatusError(f"Response {response.status}: Cloudflare detected")
+ elif not response.ok:
+ raise ResponseStatusError(f"Response {response.status}: {text}")
+
+def raise_for_status(response: Union[StreamResponse, ClientResponse, Response, RequestsResponse]):
+ if isinstance(response, StreamSession) or isinstance(response, ClientResponse):
+ return raise_for_status_async(response)
+
+ if response.status_code in (429, 402):
+ raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
+ elif response.status_code == 403 and "<title>Just a moment...</title>" in response.text:
+ raise ResponseStatusError(f"Response {response.status_code}: Cloudflare detected")
+ elif not response.ok:
+ raise ResponseStatusError(f"Response {response.status_code}: {response.text}") \ No newline at end of file
diff --git a/g4f/requests/aiohttp.py b/g4f/requests/aiohttp.py
index d9bd6541..6979b20a 100644
--- a/g4f/requests/aiohttp.py
+++ b/g4f/requests/aiohttp.py
@@ -1,16 +1,20 @@
from __future__ import annotations
-from aiohttp import ClientSession, ClientResponse, ClientTimeout
-from typing import AsyncGenerator, Any
+from aiohttp import ClientSession, ClientResponse, ClientTimeout, BaseConnector
+from typing import AsyncIterator, Any, Optional
-from ..providers.helper import get_connector
from .defaults import DEFAULT_HEADERS
+from ..errors import MissingRequirementsError
class StreamResponse(ClientResponse):
- async def iter_lines(self) -> AsyncGenerator[bytes, None]:
+ async def iter_lines(self) -> AsyncIterator[bytes]:
async for line in self.content:
yield line.rstrip(b"\r\n")
+ async def iter_content(self) -> AsyncIterator[bytes]:
+ async for chunk in self.content.iter_any():
+ yield chunk
+
async def json(self) -> Any:
return await super().json(content_type=None)
@@ -27,4 +31,16 @@ class StreamSession(ClientSession):
response_class=StreamResponse,
connector=get_connector(kwargs.get("connector"), proxies.get("https")),
headers=headers
- ) \ No newline at end of file
+ )
+
+def get_connector(connector: BaseConnector = None, proxy: str = None, rdns: bool = False) -> Optional[BaseConnector]:
+ if proxy and not connector:
+ try:
+ from aiohttp_socks import ProxyConnector
+ if proxy.startswith("socks5h://"):
+ proxy = proxy.replace("socks5h://", "socks5://")
+ rdns = True
+ connector = ProxyConnector.from_url(proxy, rdns=rdns)
+ except ImportError:
+ raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
+ return connector \ No newline at end of file
diff --git a/g4f/requests/defaults.py b/g4f/requests/defaults.py
index 6ae6d7eb..2457f046 100644
--- a/g4f/requests/defaults.py
+++ b/g4f/requests/defaults.py
@@ -1,13 +1,19 @@
DEFAULT_HEADERS = {
- 'Accept': '*/*',
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Accept-Language': 'en-US',
- 'Connection': 'keep-alive',
- 'Sec-Ch-Ua': '"Not A(Brand";v="99", "Google Chrome";v="121", "Chromium";v="121"',
- 'Sec-Ch-Ua-Mobile': '?0',
- 'Sec-Ch-Ua-Platform': '"Windows"',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-site',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36'
+ "sec-ch-ua": '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"',
+ "sec-ch-ua-mobile": "?0",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
+ "ec-ch-ua-arch": '"x86"',
+ "sec-ch-ua-full-version": '"122.0.6261.69"',
+ "accept": "*/*",
+ "sec-ch-ua-platform-version:": '"6.5.0"',
+ "sec-ch-ua-full-version-list": '"Chromium";v="122.0.6261.69", "Not(A:Brand";v="24.0.0.0", "Google Chrome";v="122.0.6261.69"',
+ "sec-ch-ua-bitness": '"64"',
+ "sec-ch-ua-model": '""',
+ "sec-ch-ua-platform": '"Windows"',
+ "sec-fetch-site": "same-site",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-dest": "empty",
+ "referer": "",
+ "accept-encoding": "gzip, deflate, br",
+ "accept-language": "en-US",
} \ No newline at end of file
diff --git a/g4f/webdriver.py b/g4f/webdriver.py
index d9028a63..47962d96 100644
--- a/g4f/webdriver.py
+++ b/g4f/webdriver.py
@@ -2,9 +2,9 @@ from __future__ import annotations
try:
from platformdirs import user_config_dir
+ from undetected_chromedriver import Chrome, ChromeOptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
- from undetected_chromedriver import Chrome, ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@@ -12,10 +12,10 @@ try:
from selenium.common.exceptions import NoSuchElementException
has_requirements = True
except ImportError:
- from typing import Type as WebDriver
+ from typing import Type as WebDriver, Callable as user_config_dir
has_requirements = False
-import time
+import time
from shutil import which
from os import path
from os import access, R_OK
@@ -29,6 +29,24 @@ try:
except ImportError:
has_pyvirtualdisplay = False
+try:
+ from undetected_chromedriver import Chrome as _Chrome, ChromeOptions
+ from seleniumwire.webdriver import InspectRequestsMixin, DriverCommonMixin
+
+ class Chrome(InspectRequestsMixin, DriverCommonMixin, _Chrome):
+ def __init__(self, *args, options=None, seleniumwire_options={}, **kwargs):
+ if options is None:
+ options = ChromeOptions()
+ options.add_argument('--proxy-bypass-list=<-loopback>')
+ config = self._setup_backend(seleniumwire_options)
+ options.add_argument(f"--proxy-server={config['proxy']['httpProxy']}")
+ options.add_argument("--ignore-certificate-errors")
+ super().__init__(*args, options=options, **kwargs)
+ has_seleniumwire = True
+except:
+ from undetected_chromedriver import Chrome, ChromeOptions
+ has_seleniumwire = False
+
def get_browser(
user_data_dir: str = None,
headless: bool = False,
@@ -106,7 +124,7 @@ def bypass_cloudflare(driver: WebDriver, url: str, timeout: int) -> None:
}});
""", element, url)
element.click()
- time.sleep(3)
+ time.sleep(5)
# Switch to the new tab and close the old tab
original_window = driver.current_window_handle
@@ -126,9 +144,10 @@ def bypass_cloudflare(driver: WebDriver, url: str, timeout: int) -> None:
...
except Exception as e:
if debug.logging:
- print(f"Error bypassing Cloudflare: {e}")
- finally:
- driver.switch_to.default_content()
+ print(f"Error bypassing Cloudflare: {str(e).splitlines()[0]}")
+ #driver.switch_to.default_content()
+ driver.switch_to.window(window_handle)
+ driver.execute_script("document.href = document.href;")
WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "body:not(.no-js)"))
)
@@ -223,13 +242,13 @@ class WebDriverSession:
self.default_driver.close()
except Exception as e:
if debug.logging:
- print(f"Error closing WebDriver: {e}")
+ print(f"Error closing WebDriver: {str(e).splitlines()[0]}")
finally:
self.default_driver.quit()
if self.virtual_display:
self.virtual_display.stop()
def element_send_text(element: WebElement, text: str) -> None:
- script = "arguments[0].innerText = arguments[1]"
+ script = "arguments[0].innerText = arguments[1];"
element.parent.execute_script(script, element, text)
element.send_keys(Keys.ENTER) \ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 36126722..1a7ded34 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,4 +21,5 @@ brotli
beautifulsoup4
setuptools
aiohttp_socks
+selenium-wire
gpt4all \ No newline at end of file
diff --git a/setup.py b/setup.py
index a81770f1..e653620a 100644
--- a/setup.py
+++ b/setup.py
@@ -33,6 +33,7 @@ EXTRA_REQUIRE = {
"werkzeug", "flask", # gui
"loguru", "fastapi",
"uvicorn", "nest_asyncio", # api
+ "selenium-wire"
],
"image": [
"pillow",
@@ -42,7 +43,8 @@ EXTRA_REQUIRE = {
"webdriver": [
"platformdirs",
"undetected-chromedriver",
- "setuptools"
+ "setuptools",
+ "selenium-wire"
],
"openai": [
"async-property",