summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--README.md8
-rw-r--r--g4f/Provider/Providers/AItianhu.py36
-rw-r--r--g4f/Provider/Providers/Acytoo.py41
-rw-r--r--g4f/Provider/Providers/DFEHub.py44
-rw-r--r--g4f/Provider/Providers/EasyChat.py43
-rw-r--r--g4f/Provider/__init__.py6
7 files changed, 176 insertions, 4 deletions
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 932dc30f..67aa60da 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,6 +3,6 @@
### Please, follow these steps to contribute:
1. Reverse a website from this list: [sites-to-reverse](https://github.com/xtekky/gpt4free/issues/40)
2. Add it to [./testing](https://github.com/xtekky/gpt4free/tree/main/testing)
-3. Refractor it and add it to [./gpt4free](https://github.com/xtekky/gpt4free/tree/main/gpt4free)
+3. Refractor it and add it to [./g4f](https://github.com/xtekky/gpt4free/tree/main/g4f)
### We will be grateful to see you as a contributor!
diff --git a/README.md b/README.md
index bdca945c..bc1e9dd9 100644
--- a/README.md
+++ b/README.md
@@ -152,17 +152,21 @@ for token in chat_completion:
| --- | --- | --- | --- | --- | --- | --- |
| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [super.lockchat.app](http://super.lockchat.app) | `g4f.Provider.Lockchat` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [liaobots.com](https://liaobots.com) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
+| [liaobots.com](https://liaobots.com) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ |
| [ai.ls](https://ai.ls) | `g4f.Provider.Ails` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat9.yqcloud.top](https://chat9.yqcloud.top/) | `g4f.Provider.Yqcloud` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [theb.ai](https://theb.ai) | `g4f.Provider.Theb` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [play.vercel.ai](https://play.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [play.vercel.ai](https://play.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [forefront.com](https://forefront.com) | `g4f.Provider.Forefront` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [deepai.org](https://deepai.org) | `g4f.Provider.DeepAi` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat.getgpt.world](https://chat.getgpt.world/) | `g4f.Provider.GetGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chatgptlogin.ac](https://chatgptlogin.ac) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat-gpt.org](https://chat-gpt.org/chat) | `g4f.Provider.Aichat` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chat.acytoo.com](https://chat.acytoo.com) | `g4f.Provider.Acytoo` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [aitianhu.com](https://aitianhu.com) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chat.dfehub.com](https://chat.dfehub.com) | `g4f.Provider.DFEHub` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [free.easychat.work](https://free.easychat.work) | `g4f.Provider.EasyChat` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
### Other Models
diff --git a/g4f/Provider/Providers/AItianhu.py b/g4f/Provider/Providers/AItianhu.py
new file mode 100644
index 00000000..d3e6a45f
--- /dev/null
+++ b/g4f/Provider/Providers/AItianhu.py
@@ -0,0 +1,36 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://www.aitianhu.com/api/chat-process"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ }
+ data = {
+ "prompt": base,
+ "options": {},
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
+ "temperature": 0.8,
+ "top_p": 1
+ }
+ response = requests.post(url, headers=headers, json=data)
+ if response.status_code == 200:
+ lines = response.text.strip().split('\n')
+ res = json.loads(lines[-1])
+ yield res['text']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Acytoo.py b/g4f/Provider/Providers/Acytoo.py
new file mode 100644
index 00000000..06083eb5
--- /dev/null
+++ b/g4f/Provider/Providers/Acytoo.py
@@ -0,0 +1,41 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://chat.acytoo.com/api/completions"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ }
+ data = {
+ "key": "",
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {
+ "role": "user",
+ "content": base,
+ "createdAt": 1688518523500
+ }
+ ],
+ "temperature": 1,
+ "password": ""
+ }
+
+ response = requests.post(url, headers=headers, data=json.dumps(data))
+ if response.status_code == 200:
+ yield response.text
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/DFEHub.py b/g4f/Provider/Providers/DFEHub.py
new file mode 100644
index 00000000..1bbdd01e
--- /dev/null
+++ b/g4f/Provider/Providers/DFEHub.py
@@ -0,0 +1,44 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://chat.dfehub.com/api/chat"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ }
+ data = {
+ "model": {
+ "id": "gpt-3.5-turbo",
+ "name": "GPT-3.5",
+ "maxLength": 12000,
+ "tokenLimit": 4000
+ },
+ "messages": [
+ {
+ "role": "user",
+ "content": base
+ }
+ ],
+ "key": "",
+ "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
+ "temperature": 1
+ }
+ response = requests.post(url, headers=headers, data=json.dumps(data))
+ if response.status_code == 200:
+ yield response.text
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/EasyChat.py b/g4f/Provider/Providers/EasyChat.py
new file mode 100644
index 00000000..9f4aa7b2
--- /dev/null
+++ b/g4f/Provider/Providers/EasyChat.py
@@ -0,0 +1,43 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://free.easychat.work/api/openai/v1/chat/completions"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ ''' limited to 240 messages/hour'''
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ }
+
+ data = {
+ "messages": [
+ {"role": "system", "content": "You are ChatGPT, a large language model trained by OpenAI."},
+ {"role": "user", "content": base}
+ ],
+ "stream": False,
+ "model": "gpt-3.5-turbo",
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
+ }
+
+ response = requests.post(url, headers=headers, json=data)
+ if response.status_code == 200:
+ response = response.json()
+ yield response['choices'][0]['message']['content']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 269fa17e..3a86291d 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -14,7 +14,11 @@ from .Providers import (
H2o,
ChatgptLogin,
DeepAi,
- GetGpt
+ GetGpt,
+ AItianhu,
+ EasyChat,
+ Acytoo,
+ DFEHub,
)
Palm = Bard