summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/close-inactive-issues.yml31
-rw-r--r--.gitignore10
-rw-r--r--.vscode/settings.json7
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--README.md72
-rw-r--r--g4f/.v1/requirements.txt8
-rw-r--r--g4f/Provider/Provider.py1
-rw-r--r--g4f/Provider/Providers/AItianhu.py38
-rw-r--r--g4f/Provider/Providers/Acytoo.py42
-rw-r--r--g4f/Provider/Providers/AiService.py41
-rw-r--r--g4f/Provider/Providers/Aichat.py1
-rw-r--r--g4f/Provider/Providers/Ails.py12
-rw-r--r--g4f/Provider/Providers/Bard.py2
-rw-r--r--g4f/Provider/Providers/Bing.py1
-rw-r--r--g4f/Provider/Providers/BingHuan.py28
-rw-r--r--g4f/Provider/Providers/ChatgptAi.py5
-rw-r--r--g4f/Provider/Providers/ChatgptLogin.py4
-rw-r--r--g4f/Provider/Providers/DeepAi.py80
-rw-r--r--g4f/Provider/Providers/DfeHub.py56
-rw-r--r--g4f/Provider/Providers/EasyChat.py52
-rw-r--r--g4f/Provider/Providers/Forefront.py2
-rw-r--r--g4f/Provider/Providers/GetGpt.py2
-rw-r--r--g4f/Provider/Providers/H2o.py198
-rw-r--r--g4f/Provider/Providers/Liaobots.py1
-rw-r--r--g4f/Provider/Providers/Lockchat.py7
-rw-r--r--g4f/Provider/Providers/Theb.py3
-rw-r--r--g4f/Provider/Providers/Vercel.py118
-rw-r--r--g4f/Provider/Providers/Wewordle.py73
-rw-r--r--g4f/Provider/Providers/You.py1
-rw-r--r--g4f/Provider/Providers/Yqcloud.py1
-rw-r--r--g4f/Provider/Providers/__init__.py0
-rw-r--r--g4f/Provider/Providers/helpers/binghuan.py221
-rw-r--r--g4f/Provider/Providers/opchatgpts.py42
-rw-r--r--g4f/Provider/__init__.py11
-rw-r--r--g4f/__init__.py11
-rw-r--r--g4f/models.py437
-rw-r--r--pyproject.toml21
-rw-r--r--requirements.txt27
-rw-r--r--setup.py36
-rw-r--r--testing/readme_table.py27
-rw-r--r--testing/test.py12
41 files changed, 1219 insertions, 525 deletions
diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml
new file mode 100644
index 00000000..d81b727a
--- /dev/null
+++ b/.github/workflows/close-inactive-issues.yml
@@ -0,0 +1,31 @@
+name: Close inactive issues
+
+on:
+ schedule:
+ - cron: "5 0 * * *"
+
+jobs:
+ close-issues:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+ steps:
+ - uses: actions/stale@v5
+ with:
+ days-before-issue-stale: 7
+ days-before-issue-close: 7
+
+ days-before-pr-stale: 7
+ days-before-pr-close: 7
+
+ stale-issue-label: "stale"
+ stale-pr-label: "stale"
+
+ stale-issue-message: "Bumping this issue because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
+ close-issue-message: "Closing due to inactivity."
+
+ stale-pr-message: "Bumping this pull request because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
+ close-pr-message: "Closing due to inactivity."
+
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 8ce2a38b..03f73c6d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,4 +29,12 @@ cookie.json
*.pyc
-dist/ \ No newline at end of file
+dist/
+*.egg-info/
+*.egg
+*.egg-info
+build
+test.py
+
+# Emacs crap
+*~ \ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 00000000..ae2a0b0e
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,7 @@
+{
+ "[python]": {
+ "editor.defaultFormatter": "ms-python.black-formatter",
+ "editor.formatOnSave": true,
+ },
+ "python.formatting.provider": "none"
+}
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 932dc30f..67aa60da 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,6 +3,6 @@
### Please, follow these steps to contribute:
1. Reverse a website from this list: [sites-to-reverse](https://github.com/xtekky/gpt4free/issues/40)
2. Add it to [./testing](https://github.com/xtekky/gpt4free/tree/main/testing)
-3. Refractor it and add it to [./gpt4free](https://github.com/xtekky/gpt4free/tree/main/gpt4free)
+3. Refractor it and add it to [./g4f](https://github.com/xtekky/gpt4free/tree/main/g4f)
### We will be grateful to see you as a contributor!
diff --git a/README.md b/README.md
index c31dc88a..d167f3df 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,33 @@
-![image](https://github.com/onlpx/gpt4free-v2/assets/98614666/7886223b-c1d1-4260-82aa-da5741f303bb)
+
+![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9)
By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses.
This (quite censored) New Version of gpt4free, was just released, it may contain bugs, open an issue or contribute a PR when encountering one, some features were disabled.
Docker is for now not available but I would be happy if someone contributes a PR. The g4f GUI will be uploaded soon enough.
+### New
+- pypi package:
+```
+pip install -U g4f
+```
+
## Table of Contents:
- [Getting Started](#getting-started)
- + [Prerequisites](#prerequisites-)
- + [Setting up the project](#setting-up-the-project-)
+ + [Prerequisites](#prerequisites)
+ + [Setting up the project](#setting-up-the-project)
- [Usage](#usage)
- * [The `g4f` Package](#the--g4f--package)
- * [interference openai-proxy api](#interference-openai-proxy-api--use-with-openai-python-package-)
+ * [The `g4f` Package](#the-g4f-package)
+ * [interference openai-proxy api](#interference-openai-proxy-api-use-with-openai-python-package)
- [Models](#models)
- * [gpt-3.5 / gpt-4](#gpt-35---gpt-4)
+ * [gpt-3.5 / gpt-4](#gpt-35--gpt-4)
* [Other Models](#other-models)
- [Related gpt4free projects](#related-gpt4free-projects)
- [Contribute](#contribute)
- [ChatGPT clone](#chatgpt-clone)
-- [Copyright](#copyright-)
-- [Copyright Notice](#copyright-notice-)
+- [Copyright](#copyright)
+- [Copyright Notice](#copyright-notice)
- [Star History](#star-history)
## Getting Started
@@ -29,6 +36,13 @@ Docker is for now not available but I would be happy if someone contributes a PR
1. [Download and install Python](https://www.python.org/downloads/) (Version 3.x is recommended).
#### Setting up the project:
+##### Install using pypi
+```
+pip install -U g4f
+```
+
+##### or
+
1. Clone the GitHub repository:
```
git clone https://github.com/xtekky/gpt4free.git
@@ -81,7 +95,7 @@ for message in response:
print(message)
# normal response
-response = g4f.ChatCompletion.create(model=g4f.Model.gpt_4, messages=[
+response = g4f.ChatCompletion.create(model=g4f.models.gpt_4, messages=[
{"role": "user", "content": "hi"}]) # alterative model setting
print(response)
@@ -148,21 +162,32 @@ for token in chat_completion:
### gpt-3.5 / gpt-4
-| Website| Provider| gpt-3.5 | gpt-4 | Streaming | Status | Auth |
+| Website| Provider| gpt-3.5 | gpt-4 | Stream | Status | Auth |
| --- | --- | --- | --- | --- | --- | --- |
-| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [super.lockchat.app](http://super.lockchat.app) | `g4f.Provider.Lockchat` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [liaobots.com](https://liaobots.com) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [ai.ls](https://ai.ls) | `g4f.Provider.Ails` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chat9.yqcloud.top](https://chat9.yqcloud.top/) | `g4f.Provider.Yqcloud` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [theb.ai](https://theb.ai) | `g4f.Provider.Theb` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chat9.yqcloud.top](https://chat9.yqcloud.top/) | `g4f.Provider.Yqcloud` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [theb.ai](https://theb.ai) | `g4f.Provider.Theb` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat-gpt.org](https://chat-gpt.org/chat) | `g4f.Provider.Aichat` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [play.vercel.ai](https://play.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [forefront.com](https://forefront.com) | `g4f.Provider.Forefront` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [forefront.com](https://forefront.com) | `g4f.Provider.Forefront` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [supertest.lockchat.app](http://supertest.lockchat.app) | `g4f.Provider.Lockchat` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [liaobots.com](https://liaobots.com) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ |
+| [gpt-gm.h2o.ai](https://gpt-gm.h2o.ai) | `g4f.Provider.H2o` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chatgptlogin.ac](https://chatgptlogin.ac) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [deepai.org](https://deepai.org) | `g4f.Provider.DeepAi` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chat.getgpt.world](https://chat.getgpt.world/) | `g4f.Provider.GetGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chatgptlogin.ac](https://chatgptlogin.ac) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chat-gpt.org](https://chat-gpt.org/chat) | `g4f.Provider.Aichat` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chat.getgpt.world](https://chat.getgpt.world/) | `g4f.Provider.GetGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [www.aitianhu.com](https://www.aitianhu.com/api/chat-process) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [free.easychat.work](https://free.easychat.work) | `g4f.Provider.EasyChat` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chat.acytoo.com](https://chat.acytoo.com/api/completions) | `g4f.Provider.Acytoo` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat.dfehub.com](https://chat.dfehub.com/api/chat) | `g4f.Provider.DfeHub` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [aiservice.vercel.app](https://aiservice.vercel.app/api/chat/answer) | `g4f.Provider.AiService` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [b.ai-huan.xyz](https://b.ai-huan.xyz) | `g4f.Provider.BingHuan` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [wewordle.org](https://wewordle.org/gptapi/v1/android/turbo) | `g4f.Provider.Wewordle` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgpt.ai](https://chatgpt.ai/gpt-4/) | `g4f.Provider.ChatgptAi` | ❌ | ✔️ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [opchatgpts.net](https://opchatgpts.net) | `g4f.Provider.opchatgpts` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
### Other Models
@@ -237,6 +262,13 @@ for token in chat_completion:
<td><a href="https://github.com/mishalhossin/Discord-Chatbot-Gpt4Free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/mishalhossin/Discord-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/mishalhossin/Coding-Chatbot-Gpt4Free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/mishalhossin/Discord-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
</tr>
+ <tr>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free"><b>LangChain gpt4free</b></a></td>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
+ </tr>
</tbody>
</table>
diff --git a/g4f/.v1/requirements.txt b/g4f/.v1/requirements.txt
index 3a1f815b..4cbabf17 100644
--- a/g4f/.v1/requirements.txt
+++ b/g4f/.v1/requirements.txt
@@ -5,11 +5,13 @@ pypasser
names
colorama
curl_cffi
+aiohttp
+flask
+flask_cors
streamlit
selenium
fake-useragent
twocaptcha
-https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip
pydantic
pymailtm
Levenshtein
@@ -18,4 +20,6 @@ mailgw_temporary_email
pycryptodome
random-password-generator
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
-tornado>=6.3.2 # not directly required, pinned by Snyk to avoid a vulnerability \ No newline at end of file
+tornado>=6.3.2 # not directly required, pinned by Snyk to avoid a vulnerability
+PyExecJS
+browser_cookie3 \ No newline at end of file
diff --git a/g4f/Provider/Provider.py b/g4f/Provider/Provider.py
index 12c23333..f123becd 100644
--- a/g4f/Provider/Provider.py
+++ b/g4f/Provider/Provider.py
@@ -5,6 +5,7 @@ url = None
model = None
supports_stream = False
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
return
diff --git a/g4f/Provider/Providers/AItianhu.py b/g4f/Provider/Providers/AItianhu.py
new file mode 100644
index 00000000..0bdaa09a
--- /dev/null
+++ b/g4f/Provider/Providers/AItianhu.py
@@ -0,0 +1,38 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://www.aitianhu.com/api/chat-process"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = True
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ }
+ data = {
+ "prompt": base,
+ "options": {},
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
+ "temperature": kwargs.get("temperature", 0.8),
+ "top_p": kwargs.get("top_p", 1)
+ }
+ response = requests.post(url, headers=headers, json=data)
+ if response.status_code == 200:
+ lines = response.text.strip().split('\n')
+ res = json.loads(lines[-1])
+ yield res['text']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Acytoo.py b/g4f/Provider/Providers/Acytoo.py
new file mode 100644
index 00000000..4f40eac2
--- /dev/null
+++ b/g4f/Provider/Providers/Acytoo.py
@@ -0,0 +1,42 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://chat.acytoo.com/api/completions"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ }
+ data = {
+ "key": "",
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {
+ "role": "user",
+ "content": base,
+ "createdAt": 1688518523500
+ }
+ ],
+ "temperature": 1,
+ "password": ""
+ }
+
+ response = requests.post(url, headers=headers, data=json.dumps(data))
+ if response.status_code == 200:
+ yield response.text
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/AiService.py b/g4f/Provider/Providers/AiService.py
new file mode 100644
index 00000000..0f9d9c47
--- /dev/null
+++ b/g4f/Provider/Providers/AiService.py
@@ -0,0 +1,41 @@
+import os,sys
+import requests
+from ...typing import get_type_hints
+
+url = "https://aiservice.vercel.app/api/chat/answer"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = True
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ "accept": "*/*",
+ "content-type": "text/plain;charset=UTF-8",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "Referer": "https://aiservice.vercel.app/chat",
+ }
+ data = {
+ "input": base
+ }
+ response = requests.post(url, headers=headers, json=data)
+ if response.status_code == 200:
+ _json = response.json()
+ yield _json['data']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Aichat.py b/g4f/Provider/Providers/Aichat.py
index e4fde8c3..919486f2 100644
--- a/g4f/Provider/Providers/Aichat.py
+++ b/g4f/Provider/Providers/Aichat.py
@@ -5,6 +5,7 @@ url = 'https://chat-gpt.org/chat'
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
+working = True
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
base = ''
diff --git a/g4f/Provider/Providers/Ails.py b/g4f/Provider/Providers/Ails.py
index 1a14b2e9..1433c4a3 100644
--- a/g4f/Provider/Providers/Ails.py
+++ b/g4f/Provider/Providers/Ails.py
@@ -13,6 +13,8 @@ url: str = 'https://ai.ls'
model: str = 'gpt-3.5-turbo'
supports_stream = True
needs_auth = False
+working = True
+
class Utils:
def hash(json_data: Dict[str, str]) -> sha256:
@@ -35,7 +37,9 @@ class Utils:
n = e % 10
r = n + 1 if n % 2 == 0 else n
return str(e - n + r)
-
+ def getV():
+ crossref = requests.get("https://ai.ls"+ requests.get("https://ai.ls/?chat=1").text.split('crossorigin href="')[1].split('"')[0]).text.split('G4="')[1].split('"')[0]
+ return crossref
def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs):
@@ -45,7 +49,7 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer free',
'client-id': str(uuid.uuid4()),
- 'client-v': '0.1.217',
+ 'client-v': Utils.getV(),
'content-type': 'application/json',
'origin': 'https://ai.ls',
'referer': 'https://ai.ls/',
@@ -73,7 +77,7 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
json_data = json.dumps(separators=(',', ':'), obj={
'model': 'gpt-3.5-turbo',
- 'temperature': 0.6,
+ 'temperature': temperature,
'stream': True,
'messages': messages} | sig)
@@ -88,4 +92,4 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
yield token
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/Bard.py b/g4f/Provider/Providers/Bard.py
index 4c37c4b7..0d007a10 100644
--- a/g4f/Provider/Providers/Bard.py
+++ b/g4f/Provider/Providers/Bard.py
@@ -5,6 +5,8 @@ url = 'https://bard.google.com'
model = ['Palm2']
supports_stream = False
needs_auth = True
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
diff --git a/g4f/Provider/Providers/Bing.py b/g4f/Provider/Providers/Bing.py
index 2ec2cf05..5e290f91 100644
--- a/g4f/Provider/Providers/Bing.py
+++ b/g4f/Provider/Providers/Bing.py
@@ -16,6 +16,7 @@ url = 'https://bing.com/chat'
model = ['gpt-4']
supports_stream = True
needs_auth = False
+working = True
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
diff --git a/g4f/Provider/Providers/BingHuan.py b/g4f/Provider/Providers/BingHuan.py
new file mode 100644
index 00000000..64b67e4b
--- /dev/null
+++ b/g4f/Provider/Providers/BingHuan.py
@@ -0,0 +1,28 @@
+import os,sys
+import json
+import subprocess
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://b.ai-huan.xyz'
+model = ['gpt-3.5-turbo', 'gpt-4']
+supports_stream = True
+needs_auth = False
+working = False
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ path = os.path.dirname(os.path.realpath(__file__))
+ config = json.dumps({
+ 'messages': messages,
+ 'model': model}, separators=(',', ':'))
+ cmd = ['python', f'{path}/helpers/binghuan.py', config]
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ for line in iter(p.stdout.readline, b''):
+ yield line.decode('cp1252')
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/ChatgptAi.py b/g4f/Provider/Providers/ChatgptAi.py
index 00d4cf6f..1f9ead0e 100644
--- a/g4f/Provider/Providers/ChatgptAi.py
+++ b/g4f/Provider/Providers/ChatgptAi.py
@@ -6,6 +6,8 @@ url = 'https://chatgpt.ai/gpt-4/'
model = ['gpt-4']
supports_stream = False
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
chat = ''
@@ -13,8 +15,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
chat += '%s: %s\n' % (message['role'], message['content'])
chat += 'assistant: '
- response = requests.get('https://chatgpt.ai/gpt-4/')
-
+ response = requests.get('https://chatgpt.ai/')
nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
headers = {
diff --git a/g4f/Provider/Providers/ChatgptLogin.py b/g4f/Provider/Providers/ChatgptLogin.py
index 9551d15d..0fdbab8e 100644
--- a/g4f/Provider/Providers/ChatgptLogin.py
+++ b/g4f/Provider/Providers/ChatgptLogin.py
@@ -8,7 +8,7 @@ url = 'https://chatgptlogin.ac'
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
-
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
def get_nonce():
@@ -75,7 +75,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
'userName': '<div class="mwai-name-text">User:</div>',
'aiName': '<div class="mwai-name-text">AI:</div>',
'model': 'gpt-3.5-turbo',
- 'temperature': 0.8,
+ 'temperature': kwargs.get('temperature', 0.8),
'maxTokens': 1024,
'maxResults': 1,
'apiKey': '',
diff --git a/g4f/Provider/Providers/DeepAi.py b/g4f/Provider/Providers/DeepAi.py
index 02b08120..27618cbb 100644
--- a/g4f/Provider/Providers/DeepAi.py
+++ b/g4f/Provider/Providers/DeepAi.py
@@ -1,45 +1,73 @@
-import os
import json
-import random
-import hashlib
+import os
import requests
-
+import js2py
from ...typing import sha256, Dict, get_type_hints
-url = 'https://deepai.org'
+
+url = "https://api.deepai.org/"
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = True
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- def md5(text: str) -> str:
- return hashlib.md5(text.encode()).hexdigest()[::-1]
+token_js = """
+var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
+var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
+h = Math.round(1E11 * Math.random()) + "";
+f = function () {
+ for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
+
+ return function (t) {
+ var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
+ Z = [],
+ A = unescape(encodeURI(t)) + "\u0080",
+ z = A.length;
+ t = --z / 4 + 2 | 15;
+ for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
+ for (q = A = 0; q < t; q += 16) {
+ for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
+ for (A = 4; A;) ea[--A] += z[A]
+ }
+ for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
+ return t.split("").reverse().join("")
+ }
+}();
+"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
+"""
- def get_api_key(user_agent: str) -> str:
- part1 = str(random.randint(0, 10**11))
- part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
-
- return f"tryit-{part1}-{part2}"
+uuid4_js = """
+function uuidv4() {
+ for (var a = [], b = 0; 36 > b; b++) a[b] = "0123456789abcdef".substr(Math.floor(16 * Math.random()), 1);
+ a[14] = "4";
+ a[19] = "0123456789abcdef".substr(a[19] & 3 | 8, 1);
+ a[8] = a[13] = a[18] = a[23] = "-";
+ return a.join("")
+}
+uuidv4();"""
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+def create_session():
+ url = "https://api.deepai.org/save_chat_session"
- headers = {
- "api-key": get_api_key(user_agent),
- "user-agent": user_agent
- }
+ payload = {'uuid': js2py.eval_js(uuid4_js), "title":"", "chat_style": "chat", "messages": '[]'}
+ headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"}
- files = {
- "chat_style": (None, "chat"),
- "chatHistory": (None, json.dumps(messages))
- }
+ response = requests.request("POST", url, headers=headers, data=payload)
+ return response
- r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
+def _create_completion(model: str, messages:list, stream: bool = True, **kwargs):
+ create_session()
+ url = "https://api.deepai.org/make_me_a_pizza"
- for chunk in r.iter_content(chunk_size=None):
- r.raise_for_status()
- yield chunk.decode()
+ payload = {'chas_style': "chat", "chatHistory": json.dumps(messages)}
+ api_key = js2py.eval_js(token_js)
+ headers = {"api-key": api_key, "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"}
+ response = requests.request("POST", url, headers=headers, data=payload, stream=True)
+ for chunk in response.iter_content(chunk_size=None):
+ response.raise_for_status()
+ yield chunk.decode()
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join(
diff --git a/g4f/Provider/Providers/DfeHub.py b/g4f/Provider/Providers/DfeHub.py
new file mode 100644
index 00000000..e3ff8045
--- /dev/null
+++ b/g4f/Provider/Providers/DfeHub.py
@@ -0,0 +1,56 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+import re
+import time
+
+url = "https://chat.dfehub.com/api/chat"
+model = ['gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+working = True
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ headers = {
+ 'authority': 'chat.dfehub.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.dfehub.com',
+ 'referer': 'https://chat.dfehub.com/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ 'x-requested-with': 'XMLHttpRequest',
+ }
+
+ json_data = {
+ 'messages': messages,
+ 'model': 'gpt-3.5-turbo',
+ 'temperature': kwargs.get('temperature', 0.5),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
+ "stream": True,
+ }
+ response = requests.post('https://chat.dfehub.com/api/openai/v1/chat/completions',
+ headers=headers, json=json_data)
+
+ for chunk in response.iter_lines():
+ if b'detail' in chunk:
+ delay = re.findall(r"\d+\.\d+", chunk.decode())
+ delay = float(delay[-1])
+ print(f"Provider.DfeHub::Rate Limit Reached::Waiting {delay} seconds")
+ time.sleep(delay)
+ yield from _create_completion(model, messages, stream, **kwargs)
+ if b'content' in chunk:
+ data = json.loads(chunk.decode().split('data: ')[1])
+ yield (data['choices'][0]['delta']['content'])
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/EasyChat.py b/g4f/Provider/Providers/EasyChat.py
new file mode 100644
index 00000000..909428fa
--- /dev/null
+++ b/g4f/Provider/Providers/EasyChat.py
@@ -0,0 +1,52 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+import json
+
+url = "https://free.easychat.work"
+model = ['gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+working = True
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ headers = {
+ 'authority': 'free.easychat.work',
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'endpoint': '',
+ 'origin': 'https://free.easychat.work',
+ 'plugins': '0',
+ 'referer': 'https://free.easychat.work/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ 'usesearch': 'false',
+ 'x-requested-with': 'XMLHttpRequest',
+ }
+
+ json_data = {
+ 'messages': messages,
+ 'stream': True,
+ 'model': model,
+ 'temperature': kwargs.get('temperature', 0.5),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
+ }
+
+ response = requests.post('https://free.easychat.work/api/openai/v1/chat/completions',
+ headers=headers, json=json_data)
+
+ for chunk in response.iter_lines():
+ if b'content' in chunk:
+ data = json.loads(chunk.decode().split('data: ')[1])
+ yield (data['choices'][0]['delta']['content'])
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Forefront.py b/g4f/Provider/Providers/Forefront.py
index e7e89831..70ea6725 100644
--- a/g4f/Provider/Providers/Forefront.py
+++ b/g4f/Provider/Providers/Forefront.py
@@ -7,6 +7,8 @@ url = 'https://forefront.com'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
json_data = {
diff --git a/g4f/Provider/Providers/GetGpt.py b/g4f/Provider/Providers/GetGpt.py
index 56a121f6..bafc0ce8 100644
--- a/g4f/Provider/Providers/GetGpt.py
+++ b/g4f/Provider/Providers/GetGpt.py
@@ -9,6 +9,8 @@ url = 'https://chat.getgpt.world/'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
def encrypt(e):
diff --git a/g4f/Provider/Providers/H2o.py b/g4f/Provider/Providers/H2o.py
index eabf94e2..92043026 100644
--- a/g4f/Provider/Providers/H2o.py
+++ b/g4f/Provider/Providers/H2o.py
@@ -1,106 +1,94 @@
-from requests import Session
-from uuid import uuid4
-from json import loads
-import os
-import json
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://gpt-gm.h2o.ai'
-model = ['falcon-40b', 'falcon-7b', 'llama-13b']
-supports_stream = True
-needs_auth = False
-
-models = {
- 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
- 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
-}
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n'
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
- conversation += 'assistant:'
-
- client = Session()
- client.headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'document',
- 'sec-fetch-mode': 'navigate',
- 'sec-fetch-site': 'same-origin',
- 'sec-fetch-user': '?1',
- 'upgrade-insecure-requests': '1',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- client.get('https://gpt-gm.h2o.ai/')
- response = client.post('https://gpt-gm.h2o.ai/settings', data={
- 'ethicsModalAccepted': 'true',
- 'shareConversationsWithModelAuthors': 'true',
- 'ethicsModalAcceptedAt': '',
- 'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'searchEnabled': 'true',
- })
-
- headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'model': models[model]
- }
-
- response = client.post('https://gpt-gm.h2o.ai/conversation',
- headers=headers, json=json_data)
- conversationId = response.json()['conversationId']
-
-
- completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = {
- 'inputs': conversation,
- 'parameters': {
- 'temperature': kwargs.get('temperature', 0.4),
- 'truncate': kwargs.get('truncate', 2048),
- 'max_new_tokens': kwargs.get('max_new_tokens', 1024),
- 'do_sample': kwargs.get('do_sample', True),
- 'repetition_penalty': kwargs.get('repetition_penalty', 1.2),
- 'return_full_text': kwargs.get('return_full_text', False)
- },
- 'stream': True,
- 'options': {
- 'id': kwargs.get('id', str(uuid4())),
- 'response_id': kwargs.get('response_id', str(uuid4())),
- 'is_retry': False,
- 'use_cache': False,
- 'web_search_id': ''
- }
- })
-
- for line in completion.iter_lines():
- if b'data' in line:
- line = loads(line.decode('utf-8').replace('data:', ''))
- token = line['token']['text']
-
- if token == '<|endoftext|>':
- break
- else:
- yield (token)
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+from requests import Session
+from uuid import uuid4
+from json import loads
+import os
+import json
+import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://gpt-gm.h2o.ai'
+model = ['falcon-40b', 'falcon-7b', 'llama-13b']
+supports_stream = True
+needs_auth = False
+working = True
+
+models = {
+ 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
+ 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
+ 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
+}
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+
+ conversation = ''
+ for message in messages:
+ conversation += '%s: %s\n' % (message['role'], message['content'])
+
+ conversation += 'assistant: '
+ session = requests.Session()
+
+ response = session.get("https://gpt-gm.h2o.ai/")
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
+ "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Upgrade-Insecure-Requests": "1",
+ "Sec-Fetch-Dest": "document",
+ "Sec-Fetch-Mode": "navigate",
+ "Sec-Fetch-Site": "same-origin",
+ "Sec-Fetch-User": "?1",
+ "Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
+ }
+ data = {
+ "ethicsModalAccepted": "true",
+ "shareConversationsWithModelAuthors": "true",
+ "ethicsModalAcceptedAt": "",
+ "activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
+ "searchEnabled": "true"
+ }
+ response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "*/*",
+ "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
+ "Content-Type": "application/json",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Referer": "https://gpt-gm.h2o.ai/"
+ }
+ data = {
+ "model": models[model]
+ }
+
+ conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
+ data = {
+ "inputs": conversation,
+ "parameters": {
+ "temperature": kwargs.get('temperature', 0.4),
+ "truncate": kwargs.get('truncate', 2048),
+ "max_new_tokens": kwargs.get('max_new_tokens', 1024),
+ "do_sample": kwargs.get('do_sample', True),
+ "repetition_penalty": kwargs.get('repetition_penalty', 1.2),
+ "return_full_text": kwargs.get('return_full_text', False)
+ },
+ "stream": True,
+ "options": {
+ "id": kwargs.get('id', str(uuid4())),
+ "response_id": kwargs.get('response_id', str(uuid4())),
+ "is_retry": False,
+ "use_cache": False,
+ "web_search_id": ""
+ }
+ }
+
+ response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
+ generated_text = response.text.replace("\n", "").split("data:")
+ generated_text = json.loads(generated_text[-1])
+
+ return generated_text["generated_text"]
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Liaobots.py b/g4f/Provider/Providers/Liaobots.py
index 76b13c31..75746c03 100644
--- a/g4f/Provider/Providers/Liaobots.py
+++ b/g4f/Provider/Providers/Liaobots.py
@@ -5,6 +5,7 @@ url = 'https://liaobots.com'
model = ['gpt-3.5-turbo', 'gpt-4']
supports_stream = True
needs_auth = True
+working = False
models = {
'gpt-4': {
diff --git a/g4f/Provider/Providers/Lockchat.py b/g4f/Provider/Providers/Lockchat.py
index d97bc67b..dd1edb84 100644
--- a/g4f/Provider/Providers/Lockchat.py
+++ b/g4f/Provider/Providers/Lockchat.py
@@ -2,15 +2,16 @@ import requests
import os
import json
from ...typing import sha256, Dict, get_type_hints
-url = 'http://super.lockchat.app'
+url = 'http://supertest.lockchat.app'
model = ['gpt-4', 'gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
payload = {
- "temperature": 0.7,
+ "temperature": temperature,
"messages": messages,
"model": model,
"stream": True,
@@ -18,7 +19,7 @@ def _create_completion(model: str, messages: list, stream: bool, temperature: fl
headers = {
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
}
- response = requests.post("http://super.lockchat.app/v1/chat/completions?auth=FnMNPlwZEnGFqvEc9470Vw==",
+ response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
json=payload, headers=headers, stream=True)
for token in response.iter_lines():
if b'The model: `gpt-4` does not exist' in token:
diff --git a/g4f/Provider/Providers/Theb.py b/g4f/Provider/Providers/Theb.py
index aa43ebc5..a78fb51f 100644
--- a/g4f/Provider/Providers/Theb.py
+++ b/g4f/Provider/Providers/Theb.py
@@ -9,6 +9,7 @@ url = 'https://theb.ai'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
@@ -20,7 +21,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
cmd = ['python3', f'{path}/helpers/theb.py', config]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
+
for line in iter(p.stdout.readline, b''):
yield line.decode('utf-8')
diff --git a/g4f/Provider/Providers/Vercel.py b/g4f/Provider/Providers/Vercel.py
index e5df9cf0..03d9be17 100644
--- a/g4f/Provider/Providers/Vercel.py
+++ b/g4f/Provider/Providers/Vercel.py
@@ -11,6 +11,7 @@ from ...typing import sha256, Dict, get_type_hints
url = 'https://play.vercel.ai'
supports_stream = True
needs_auth = False
+working = False
models = {
'claude-instant-v1': 'anthropic:claude-instant-v1',
@@ -41,122 +42,19 @@ vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant
'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
-# based on https://github.com/ading2210/vercel-llm-api // modified
-class Client:
- def __init__(self):
- self.session = requests.Session()
- self.headers = {
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Te': 'trailers',
- 'Upgrade-Insecure-Requests': '1'
- }
- self.session.headers.update(self.headers)
-
- def get_token(self):
- b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text
- data = json.loads(base64.b64decode(b64))
-
- code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % (
- data['c'], data['a'])
-
- token_string = json.dumps(separators=(',', ':'),
- obj={'r': execjs.compile(code).call('token'), 't': data['t']})
-
- return base64.b64encode(token_string.encode()).decode()
-
- def get_default_params(self, model_id):
- return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()}
-
- def generate(self, model_id: str, prompt: str, params: dict = {}):
- if not ':' in model_id:
- model_id = models[model_id]
-
- defaults = self.get_default_params(model_id)
-
- payload = defaults | params | {
- 'prompt': prompt,
- 'model': model_id,
- }
-
- headers = self.headers | {
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Custom-Encoding': self.get_token(),
- 'Host': 'sdk.vercel.ai',
- 'Origin': 'https://sdk.vercel.ai',
- 'Referrer': 'https://sdk.vercel.ai',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- }
-
- chunks_queue = queue.Queue()
- error = None
- response = None
-
- def callback(data):
- chunks_queue.put(data.decode())
-
- def request_thread():
- nonlocal response, error
- for _ in range(3):
- try:
- response = self.session.post('https://sdk.vercel.ai/api/generate',
- json=payload, headers=headers, content_callback=callback)
- response.raise_for_status()
-
- except Exception as e:
- if _ == 2:
- error = e
-
- else:
- continue
-
- thread = threading.Thread(target=request_thread, daemon=True)
- thread.start()
-
- text = ''
- index = 0
- while True:
- try:
- chunk = chunks_queue.get(block=True, timeout=0.1)
-
- except queue.Empty:
- if error:
- raise error
-
- elif response:
- break
-
- else:
- continue
-
- text += chunk
- lines = text.split('\n')
-
- if len(lines) - 1 > index:
- new = lines[index:-1]
- for word in new:
- yield json.loads(word)
- index = len(lines) - 1
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- yield 'Vercel is currently not working.'
return
+ # conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
- conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
-
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
+ # for message in messages:
+ # conversation += '%s: %s\n' % (message['role'], message['content'])
- conversation += 'assistant: '
+ # conversation += 'assistant: '
- completion = Client().generate(model, conversation)
+ # completion = Client().generate(model, conversation)
- for token in completion:
- yield token
+ # for token in completion:
+ # yield token
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Wewordle.py b/g4f/Provider/Providers/Wewordle.py
new file mode 100644
index 00000000..116ebb85
--- /dev/null
+++ b/g4f/Provider/Providers/Wewordle.py
@@ -0,0 +1,73 @@
+import os,sys
+import requests
+import json
+import random
+import time
+import string
+from ...typing import sha256, Dict, get_type_hints
+
+url = "https://wewordle.org/gptapi/v1/android/turbo"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+ # randomize user id and app id
+ _user_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=16))
+ _app_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=31))
+ # make current date with format utc
+ _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
+ headers = {
+ 'accept': '*/*',
+ 'pragma': 'no-cache',
+ 'Content-Type': 'application/json',
+ 'Connection':'keep-alive'
+ # user agent android client
+ # 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
+
+ }
+ data = {
+ "user": _user_id,
+ "messages": [
+ {"role": "user", "content": base}
+ ],
+ "subscriber": {
+ "originalPurchaseDate": None,
+ "originalApplicationVersion": None,
+ "allPurchaseDatesMillis": {},
+ "entitlements": {
+ "active": {},
+ "all": {}
+ },
+ "allPurchaseDates": {},
+ "allExpirationDatesMillis": {},
+ "allExpirationDates": {},
+ "originalAppUserId": f"$RCAnonymousID:{_app_id}",
+ "latestExpirationDate": None,
+ "requestDate": _request_date,
+ "latestExpirationDateMillis": None,
+ "nonSubscriptionTransactions": [],
+ "originalPurchaseDateMillis": None,
+ "managementURL": None,
+ "allPurchasedProductIdentifiers": [],
+ "firstSeen": _request_date,
+ "activeSubscriptions": []
+ }
+ }
+ response = requests.post(url, headers=headers, data=json.dumps(data))
+ if response.status_code == 200:
+ _json = response.json()
+ if 'message' in _json:
+ yield _json['message']['content']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/You.py b/g4f/Provider/Providers/You.py
index 02a2774c..3c321118 100644
--- a/g4f/Provider/Providers/You.py
+++ b/g4f/Provider/Providers/You.py
@@ -9,6 +9,7 @@ url = 'https://you.com'
model = 'gpt-3.5-turbo'
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
diff --git a/g4f/Provider/Providers/Yqcloud.py b/g4f/Provider/Providers/Yqcloud.py
index 488951dd..fae44682 100644
--- a/g4f/Provider/Providers/Yqcloud.py
+++ b/g4f/Provider/Providers/Yqcloud.py
@@ -9,6 +9,7 @@ model = [
]
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
diff --git a/g4f/Provider/Providers/__init__.py b/g4f/Provider/Providers/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/g4f/Provider/Providers/__init__.py
diff --git a/g4f/Provider/Providers/helpers/binghuan.py b/g4f/Provider/Providers/helpers/binghuan.py
new file mode 100644
index 00000000..203bbe45
--- /dev/null
+++ b/g4f/Provider/Providers/helpers/binghuan.py
@@ -0,0 +1,221 @@
+# Original Code From : https://gitler.moe/g4f/gpt4free
+# https://gitler.moe/g4f/gpt4free/src/branch/main/g4f/Provider/Providers/helpers/bing.py
+import sys
+import ssl
+import uuid
+import json
+import time
+import random
+import asyncio
+import certifi
+# import requests
+from curl_cffi import requests
+import websockets
+import browser_cookie3
+
+config = json.loads(sys.argv[1])
+
+ssl_context = ssl.create_default_context()
+ssl_context.load_verify_locations(certifi.where())
+
+
+
+conversationstyles = {
+ 'gpt-4': [ #'precise'
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave",
+ "clgalileo",
+ "gencontentv3"
+ ],
+ 'balanced': [
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "harmonyv3",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave"
+ ],
+ 'gpt-3.5-turbo': [ #'precise'
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3imaginative",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave",
+ "gencontentv3"
+ ]
+}
+
+def format(msg: dict) -> str:
+ return json.dumps(msg) + '\x1e'
+
+def get_token():
+ return
+
+ try:
+ cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
+ return cookies['_U']
+ except:
+ print('Error: could not find bing _U cookie in edge browser.')
+ exit(1)
+
+class AsyncCompletion:
+ async def create(
+ prompt : str = None,
+ optionSets : list = None,
+ token : str = None): # No auth required anymore
+
+ create = None
+ for _ in range(5):
+ try:
+ create = requests.get('https://b.ai-huan.xyz/turing/conversation/create',
+ headers = {
+ 'host': 'b.ai-huan.xyz',
+ 'accept-encoding': 'gzip, deflate, br',
+ 'connection': 'keep-alive',
+ 'authority': 'b.ai-huan.xyz',
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'max-age=0',
+ 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"110.0.1587.69"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
+ 'x-edge-shopping-flag': '1',
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+ }
+ )
+
+ conversationId = create.json()['conversationId']
+ clientId = create.json()['clientId']
+ conversationSignature = create.json()['conversationSignature']
+
+ except Exception as e:
+ time.sleep(0.5)
+ continue
+
+ if create == None: raise Exception('Failed to create conversation.')
+
+ wss: websockets.WebSocketClientProtocol or None = None
+
+ wss = await websockets.connect('wss://sydney.vcanbb.chat/sydney/ChatHub', max_size = None, ssl = ssl_context,
+ extra_headers = {
+ 'accept': 'application/json',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"109.0.1518.78"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': "",
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'x-ms-client-request-id': str(uuid.uuid4()),
+ 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
+ 'Referer': 'https://b.ai-huan.xyz/search?q=Bing+AI&showconv=1&FORM=hpcodx',
+ 'Referrer-Policy': 'origin-when-cross-origin',
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+ }
+ )
+
+ await wss.send(format({'protocol': 'json', 'version': 1}))
+ await wss.recv()
+
+ struct = {
+ 'arguments': [
+ {
+ 'source': 'cib',
+ 'optionsSets': optionSets,
+ 'isStartOfSession': True,
+ 'message': {
+ 'author': 'user',
+ 'inputMethod': 'Keyboard',
+ 'text': prompt,
+ 'messageType': 'Chat'
+ },
+ 'conversationSignature': conversationSignature,
+ 'participant': {
+ 'id': clientId
+ },
+ 'conversationId': conversationId
+ }
+ ],
+ 'invocationId': '0',
+ 'target': 'chat',
+ 'type': 4
+ }
+
+ await wss.send(format(struct))
+
+ base_string = ''
+
+ final = False
+ while not final:
+ objects = str(await wss.recv()).split('\x1e')
+ for obj in objects:
+ if obj is None or obj == '':
+ continue
+
+ response = json.loads(obj)
+ #print(response, flush=True, end='')
+ if response.get('type') == 1 and response['arguments'][0].get('messages',):
+ response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
+
+ yield (response_text.replace(base_string, ''))
+ base_string = response_text
+
+ elif response.get('type') == 2:
+ final = True
+
+ await wss.close()
+
+# i thing bing realy donset understand multi message (based on prompt template)
+def convert(messages):
+ context = ""
+ for message in messages:
+ context += "[%s](#message)\n%s\n\n" % (message['role'],
+ message['content'])
+ return context
+
+async def run(optionSets, messages):
+ prompt = messages[-1]['content']
+ if(len(messages) > 1):
+ prompt = convert(messages)
+ async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets):
+ try:
+ print(value, flush=True, end='')
+ except UnicodeEncodeError as e:
+ # emoji encoding problem
+ print(value.encode('utf-8'), flush=True, end='')
+
+optionSet = conversationstyles[config['model']]
+asyncio.run(run(optionSet, config['messages'])) \ No newline at end of file
diff --git a/g4f/Provider/Providers/opchatgpts.py b/g4f/Provider/Providers/opchatgpts.py
new file mode 100644
index 00000000..0ff652fb
--- /dev/null
+++ b/g4f/Provider/Providers/opchatgpts.py
@@ -0,0 +1,42 @@
+import os
+import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://opchatgpts.net'
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+working = True
+
+def _create_completion(model: str, messages: list, stream: bool = False, temperature: float = 0.8, max_tokens: int = 1024, system_prompt: str = "Converse as if you were an AI assistant. Be friendly, creative.", **kwargs):
+
+ data = {
+ 'env': 'chatbot',
+ 'session': 'N/A',
+ 'prompt': "\n",
+ 'context': system_prompt,
+ 'messages': messages,
+ 'newMessage': messages[::-1][0]["content"],
+ 'userName': '<div class="mwai-name-text">User:</div>',
+ 'aiName': '<div class="mwai-name-text">AI:</div>',
+ 'model': 'gpt-3.5-turbo',
+ 'temperature': temperature,
+ 'maxTokens': max_tokens,
+ 'maxResults': 1,
+ 'apiKey': '',
+ 'service': 'openai',
+ 'embeddingsIndex': '',
+ 'stop': ''
+ }
+
+ response = requests.post('https://opchatgpts.net/wp-json/ai-chatbot/v1/chat', json=data).json()
+
+ if response["success"]:
+
+ return response["reply"] # `yield (response["reply"])` doesn't work
+
+ raise Exception("Request failed: " + response)
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 269fa17e..ee434400 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -14,7 +14,16 @@ from .Providers import (
H2o,
ChatgptLogin,
DeepAi,
- GetGpt
+ GetGpt,
+ AItianhu,
+ EasyChat,
+ Acytoo,
+ DfeHub,
+ AiService,
+ BingHuan,
+ Wewordle,
+ ChatgptAi,
+ opchatgpts,
)
Palm = Bard
diff --git a/g4f/__init__.py b/g4f/__init__.py
index a0b4bac6..e5d3d4bf 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -1,12 +1,15 @@
import sys
from . import Provider
-from g4f.models import Model, ModelUtils
+from g4f import models
+logging = False
class ChatCompletion:
@staticmethod
- def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
+ def create(model: models.Model | str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
kwargs['auth'] = auth
+ if provider and provider.working == False:
+ return f'{provider.__name__} is not working'
if provider and provider.needs_auth and not auth:
print(
@@ -16,7 +19,7 @@ class ChatCompletion:
try:
if isinstance(model, str):
try:
- model = ModelUtils.convert[model]
+ model = models.ModelUtils.convert[model]
except KeyError:
raise Exception(f'The model: {model} does not exist')
@@ -27,7 +30,7 @@ class ChatCompletion:
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
sys.exit(1)
- print(f'Using {engine.__name__} provider')
+ if logging: print(f'Using {engine.__name__} provider')
return (engine._create_completion(model.name, messages, stream, **kwargs)
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
diff --git a/g4f/models.py b/g4f/models.py
index ecf18e6d..3a049614 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -1,201 +1,244 @@
-from g4f import Provider
+from types import ModuleType
+from . import Provider
+from dataclasses import dataclass
+@dataclass
class Model:
- class model:
- name: str
- base_provider: str
- best_provider: str
-
- class gpt_35_turbo:
- name: str = 'gpt-3.5-turbo'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Forefront
-
- class gpt_4:
- name: str = 'gpt-4'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Bing
- best_providers: list = [Provider.Bing, Provider.Lockchat]
-
- class claude_instant_v1_100k:
- name: str = 'claude-instant-v1-100k'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_instant_v1:
- name: str = 'claude-instant-v1'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_v1_100k:
- name: str = 'claude-v1-100k'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class claude_v1:
- name: str = 'claude-v1'
- base_provider: str = 'anthropic'
- best_provider: Provider.Provider = Provider.Vercel
-
- class alpaca_7b:
- name: str = 'alpaca-7b'
- base_provider: str = 'replicate'
- best_provider: Provider.Provider = Provider.Vercel
-
- class stablelm_tuned_alpha_7b:
- name: str = 'stablelm-tuned-alpha-7b'
- base_provider: str = 'replicate'
- best_provider: Provider.Provider = Provider.Vercel
-
- class bloom:
- name: str = 'bloom'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class bloomz:
- name: str = 'bloomz'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class flan_t5_xxl:
- name: str = 'flan-t5-xxl'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class flan_ul2:
- name: str = 'flan-ul2'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class gpt_neox_20b:
- name: str = 'gpt-neox-20b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class oasst_sft_4_pythia_12b_epoch_35:
- name: str = 'oasst-sft-4-pythia-12b-epoch-3.5'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class santacoder:
- name: str = 'santacoder'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.Vercel
-
- class command_medium_nightly:
- name: str = 'command-medium-nightly'
- base_provider: str = 'cohere'
- best_provider: Provider.Provider = Provider.Vercel
-
- class command_xlarge_nightly:
- name: str = 'command-xlarge-nightly'
- base_provider: str = 'cohere'
- best_provider: Provider.Provider = Provider.Vercel
-
- class code_cushman_001:
- name: str = 'code-cushman-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class code_davinci_002:
- name: str = 'code-davinci-002'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_ada_001:
- name: str = 'text-ada-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_babbage_001:
- name: str = 'text-babbage-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_curie_001:
- name: str = 'text-curie-001'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_davinci_002:
- name: str = 'text-davinci-002'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class text_davinci_003:
- name: str = 'text-davinci-003'
- base_provider: str = 'openai'
- best_provider: Provider.Provider = Provider.Vercel
-
- class palm:
- name: str = 'palm'
- base_provider: str = 'google'
- best_provider: Provider.Provider = Provider.Bard
-
-
- """ 'falcon-40b': Model.falcon_40b,
- 'falcon-7b': Model.falcon_7b,
- 'llama-13b': Model.llama_13b,"""
-
- class falcon_40b:
- name: str = 'falcon-40b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
- class falcon_7b:
- name: str = 'falcon-7b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
- class llama_13b:
- name: str = 'llama-13b'
- base_provider: str = 'huggingface'
- best_provider: Provider.Provider = Provider.H2o
-
+ name: str
+ base_provider: str
+ best_provider: ModuleType | None
+
+
+gpt_35_turbo = Model(
+ name="gpt-3.5-turbo",
+ base_provider="openai",
+ best_provider=Provider.Forefront,
+)
+
+gpt_4 = Model(
+ name="gpt-4",
+ base_provider="openai",
+ best_provider=Provider.Bing,
+)
+
+claude_instant_v1_100k = Model(
+ name="claude-instant-v1-100k",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_instant_v1 = Model(
+ name="claude-instant-v1",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_v1_100k = Model(
+ name="claude-v1-100k",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+claude_v1 = Model(
+ name="claude-v1",
+ base_provider="anthropic",
+ best_provider=Provider.Vercel,
+)
+
+alpaca_7b = Model(
+ name="alpaca-7b",
+ base_provider="replicate",
+ best_provider=Provider.Vercel,
+)
+
+stablelm_tuned_alpha_7b = Model(
+ name="stablelm-tuned-alpha-7b",
+ base_provider="replicate",
+ best_provider=Provider.Vercel,
+)
+
+bloom = Model(
+ name="bloom",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+bloomz = Model(
+ name="bloomz",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+flan_t5_xxl = Model(
+ name="flan-t5-xxl",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+flan_ul2 = Model(
+ name="flan-ul2",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+gpt_neox_20b = Model(
+ name="gpt-neox-20b",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+oasst_sft_4_pythia_12b_epoch_35 = Model(
+ name="oasst-sft-4-pythia-12b-epoch-3.5",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+santacoder = Model(
+ name="santacoder",
+ base_provider="huggingface",
+ best_provider=Provider.Vercel,
+)
+
+command_medium_nightly = Model(
+ name="command-medium-nightly",
+ base_provider="cohere",
+ best_provider=Provider.Vercel,
+)
+
+command_xlarge_nightly = Model(
+ name="command-xlarge-nightly",
+ base_provider="cohere",
+ best_provider=Provider.Vercel,
+)
+
+code_cushman_001 = Model(
+ name="code-cushman-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+code_davinci_002 = Model(
+ name="code-davinci-002",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_ada_001 = Model(
+ name="text-ada-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_babbage_001 = Model(
+ name="text-babbage-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_curie_001 = Model(
+ name="text-curie-001",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_davinci_002 = Model(
+ name="text-davinci-002",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+text_davinci_003 = Model(
+ name="text-davinci-003",
+ base_provider="openai",
+ best_provider=Provider.Vercel,
+)
+
+palm = Model(
+ name="palm",
+ base_provider="google",
+ best_provider=Provider.Bard,
+)
+
+falcon_40b = Model(
+ name="falcon-40b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+falcon_7b = Model(
+ name="falcon-7b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+llama_13b = Model(
+ name="llama-13b",
+ base_provider="huggingface",
+ best_provider=Provider.H2o,
+)
+
+gpt_35_turbo_16k = Model(
+ name="gpt-3.5-turbo-16k",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_35_turbo_0613 = Model(
+ name="gpt-3.5-turbo-0613",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_35_turbo_16k_0613 = Model(
+ name="gpt-3.5-turbo-16k-0613",
+ base_provider="openai",
+ best_provider=Provider.EasyChat,
+)
+
+gpt_4_32k = Model(name="gpt-4-32k", base_provider="openai", best_provider=None)
+
+gpt_4_0613 = Model(name="gpt-4-0613", base_provider="openai", best_provider=None)
+
+
class ModelUtils:
- convert: dict = {
- 'gpt-3.5-turbo': Model.gpt_35_turbo,
- 'gpt-4': Model.gpt_4,
-
- 'claude-instant-v1-100k': Model.claude_instant_v1_100k,
- 'claude-v1-100k': Model.claude_v1_100k,
- 'claude-instant-v1': Model.claude_instant_v1,
- 'claude-v1': Model.claude_v1,
-
- 'alpaca-7b': Model.alpaca_7b,
- 'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b,
-
- 'bloom': Model.bloom,
- 'bloomz': Model.bloomz,
-
- 'flan-t5-xxl': Model.flan_t5_xxl,
- 'flan-ul2': Model.flan_ul2,
-
- 'gpt-neox-20b': Model.gpt_neox_20b,
- 'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35,
- 'santacoder': Model.santacoder,
-
- 'command-medium-nightly': Model.command_medium_nightly,
- 'command-xlarge-nightly': Model.command_xlarge_nightly,
-
- 'code-cushman-001': Model.code_cushman_001,
- 'code-davinci-002': Model.code_davinci_002,
-
- 'text-ada-001': Model.text_ada_001,
- 'text-babbage-001': Model.text_babbage_001,
- 'text-curie-001': Model.text_curie_001,
- 'text-davinci-002': Model.text_davinci_002,
- 'text-davinci-003': Model.text_davinci_003,
-
- 'palm2': Model.palm,
- 'palm': Model.palm,
- 'google': Model.palm,
- 'google-bard': Model.palm,
- 'google-palm': Model.palm,
- 'bard': Model.palm,
-
- 'falcon-40b': Model.falcon_40b,
- 'falcon-7b': Model.falcon_7b,
- 'llama-13b': Model.llama_13b,
- } \ No newline at end of file
+ convert: dict[str, Model] = {
+ "gpt-3.5-turbo": gpt_35_turbo,
+ "gpt-3.5-turbo-16k": gpt_35_turbo_16k,
+ "gpt-3.5-turbo-0613": gpt_35_turbo_0613,
+ "gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613,
+ "gpt-4": gpt_4,
+ "gpt-4-32k": gpt_4_32k,
+ "gpt-4-0613": gpt_4_0613,
+ "claude-instant-v1-100k": claude_instant_v1_100k,
+ "claude-v1-100k": claude_v1_100k,
+ "claude-instant-v1": claude_instant_v1,
+ "claude-v1": claude_v1,
+ "alpaca-7b": alpaca_7b,
+ "stablelm-tuned-alpha-7b": stablelm_tuned_alpha_7b,
+ "bloom": bloom,
+ "bloomz": bloomz,
+ "flan-t5-xxl": flan_t5_xxl,
+ "flan-ul2": flan_ul2,
+ "gpt-neox-20b": gpt_neox_20b,
+ "oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35,
+ "santacoder": santacoder,
+ "command-medium-nightly": command_medium_nightly,
+ "command-xlarge-nightly": command_xlarge_nightly,
+ "code-cushman-001": code_cushman_001,
+ "code-davinci-002": code_davinci_002,
+ "text-ada-001": text_ada_001,
+ "text-babbage-001": text_babbage_001,
+ "text-curie-001": text_curie_001,
+ "text-davinci-002": text_davinci_002,
+ "text-davinci-003": text_davinci_003,
+ "palm2": palm,
+ "palm": palm,
+ "google": palm,
+ "google-bard": palm,
+ "google-palm": palm,
+ "bard": palm,
+ "falcon-40b": falcon_40b,
+ "falcon-7b": falcon_7b,
+ "llama-13b": llama_13b,
+ }
diff --git a/pyproject.toml b/pyproject.toml
deleted file mode 100644
index 10718ffa..00000000
--- a/pyproject.toml
+++ /dev/null
@@ -1,21 +0,0 @@
-[build-system]
-requires = ["setuptools", "wheel"]
-build-backend = "setuptools.build_meta"
-
-[project]
-name = "gpt4free"
-version = "0.2.0"
-description = ""
-authors = []
-license = { text = "GPL-3.0" }
-readme = "README.md"
-requires-python = ">=3.10"
-dynamic = ["dependencies"]
-
-[tool.setuptools.packages.find]
-exclude = ["**/*.txt", "/.v1/*"]
-include = ["g4f"]
-
-
-[tool.setuptools.dynamic]
-dependencies = {file = ["requirements.txt"]} \ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 1ec8ad45..7f51ed28 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,23 +1,8 @@
-websocket-client
requests
-tls-client
-pypasser
-names
-colorama
-curl_cffi
-streamlit
-selenium
-fake-useragent
-twocaptcha
-streamlit-chat@https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip
-pydantic
-pymailtm
-Levenshtein
-retrying
-mailgw_temporary_email
pycryptodome
-random-password-generator
-numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
-tornado>=6.3.2 # not directly required, pinned by Snyk to avoid a vulnerability
-PyExecJS
-browser_cookie3 \ No newline at end of file
+curl_cffi
+aiohttp
+certifi
+browser_cookie3
+websockets
+pyexecjs
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..a1cf37b2
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,36 @@
+from setuptools import setup, find_packages
+import codecs
+import os
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
+ long_description = "\n" + fh.read()
+
+with open('requirements.txt') as f:
+ required = f.read().splitlines()
+
+VERSION = '0.0.1.3'
+DESCRIPTION = 'The official gpt4free repository | various collection of powerful language models'
+
+# Setting up
+setup(
+ name="g4f",
+ version=VERSION,
+ author="Tekky",
+ author_email="<support@g4f.ai>",
+ description=DESCRIPTION,
+ long_description_content_type="text/markdown",
+ long_description=long_description,
+ packages=find_packages(),
+ install_requires=required,
+ keywords=['python', 'chatbot', 'reverse-engineering', 'openai', 'chatbots', 'gpt', 'language-model', 'gpt-3', 'gpt3', 'openai-api', 'gpt-4', 'gpt4', 'chatgpt', 'chatgpt-api', 'openai-chatgpt', 'chatgpt-free', 'chatgpt-4', 'chatgpt4','chatgpt4-api', 'free', 'free-gpt', 'gpt4free', 'g4f'],
+ classifiers=[
+ "Development Status :: 2 - Pre-Alpha",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python :: 3",
+ "Operating System :: Unix",
+ "Operating System :: MacOS :: MacOS X",
+ "Operating System :: Microsoft :: Windows",
+ ]
+) \ No newline at end of file
diff --git a/testing/readme_table.py b/testing/readme_table.py
index d798ae9d..4d078034 100644
--- a/testing/readme_table.py
+++ b/testing/readme_table.py
@@ -13,7 +13,15 @@ from g4f.Provider import (
H2o,
ChatgptLogin,
DeepAi,
- GetGpt
+ GetGpt,
+ AItianhu,
+ EasyChat,
+ Acytoo,
+ DfeHub,
+ AiService,
+ BingHuan,
+ Wewordle,
+ ChatgptAi,
)
from urllib.parse import urlparse
@@ -33,7 +41,15 @@ providers = [
H2o,
ChatgptLogin,
DeepAi,
- GetGpt
+ GetGpt,
+ AItianhu,
+ EasyChat,
+ Acytoo,
+ DfeHub,
+ AiService,
+ BingHuan,
+ Wewordle,
+ ChatgptAi,
]
# | Website| Provider| gpt-3.5-turbo | gpt-4 | Supports Stream | Status | Needs Auth |
@@ -41,12 +57,15 @@ print('| Website| Provider| gpt-3.5 | gpt-4 | Streaming | Status | Auth |')
print('| --- | --- | --- | --- | --- | --- | --- |')
for provider in providers:
+
parsed_url = urlparse(provider.url)
- name = f"`g4f.Provider{provider.__name__.split('.')[-1]}`"
+ name = f"`g4f.Provider.{provider.__name__.split('.')[-1]}`"
url = f'[{parsed_url.netloc}]({provider.url})'
has_gpt4 = '✔️' if 'gpt-4' in provider.model else '❌'
has_gpt3_5 = '✔️' if 'gpt-3.5-turbo' in provider.model else '❌'
streaming = '✔️' if provider.supports_stream else '❌'
needs_auth = '✔️' if provider.needs_auth else '❌'
- print(f'| {url} | {name} | {has_gpt3_5} | {has_gpt4} | {streaming} | ![Active](https://img.shields.io/badge/Active-brightgreen) | {needs_auth} |') \ No newline at end of file
+ working = '![Active](https://img.shields.io/badge/Active-brightgreen)' if provider.working else '![Inactive](https://img.shields.io/badge/Inactive-red)'
+
+ print(f'| {url} | {name} | {has_gpt3_5} | {has_gpt4} | {streaming} | {working} | {needs_auth} |') \ No newline at end of file
diff --git a/testing/test.py b/testing/test.py
new file mode 100644
index 00000000..ebb2b16d
--- /dev/null
+++ b/testing/test.py
@@ -0,0 +1,12 @@
+import g4f
+
+# Set with provider
+stream = False
+response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.Yqcloud, messages=[
+ {"role": "user", "content": "hello"}], stream=stream)
+
+if stream:
+ for message in response:
+ print(message)
+else:
+ print(response) \ No newline at end of file