From 7158e3232bb005cab6c16b069a792e2a0d6af012 Mon Sep 17 00:00:00 2001 From: najam-tariq <103676132+najam-tariq@users.noreply.github.com> Date: Tue, 4 Jul 2023 21:29:19 -0400 Subject: some more providers --- README.md | 4 ++++ g4f/Provider/Providers/AItianhu.py | 36 +++++++++++++++++++++++++++++++ g4f/Provider/Providers/Acytoo.py | 41 +++++++++++++++++++++++++++++++++++ g4f/Provider/Providers/DFEHub.py | 44 ++++++++++++++++++++++++++++++++++++++ g4f/Provider/Providers/EasyChat.py | 43 +++++++++++++++++++++++++++++++++++++ g4f/Provider/__init__.py | 6 +++++- 6 files changed, 173 insertions(+), 1 deletion(-) create mode 100644 g4f/Provider/Providers/AItianhu.py create mode 100644 g4f/Provider/Providers/Acytoo.py create mode 100644 g4f/Provider/Providers/DFEHub.py create mode 100644 g4f/Provider/Providers/EasyChat.py diff --git a/README.md b/README.md index c31dc88a..b2a53a8d 100644 --- a/README.md +++ b/README.md @@ -163,6 +163,10 @@ for token in chat_completion: | [chat.getgpt.world](https://chat.getgpt.world/) | `g4f.Provider.GetGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chatgptlogin.ac](https://chatgptlogin.ac) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chat-gpt.org](https://chat-gpt.org/chat) | `g4f.Provider.Aichat` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [chat.acytoo.com](https://chat.acytoo.com) | `g4f.Provider.Acytoo` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [aitianhu.com](https://aitianhu.com) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [chat.dfehub.com](https://chat.dfehub.com) | `g4f.Provider.DFEHub` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [free.easychat.work](https://free.easychat.work) | `g4f.Provider.EasyChat` | ✔️ | ❌ | ❌ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | ### Other Models diff --git a/g4f/Provider/Providers/AItianhu.py b/g4f/Provider/Providers/AItianhu.py new file mode 100644 index 00000000..d3e6a45f --- /dev/null +++ b/g4f/Provider/Providers/AItianhu.py @@ -0,0 +1,36 @@ +import os, requests +from ...typing import sha256, Dict, get_type_hints +import json + +url = "https://www.aitianhu.com/api/chat-process" +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + headers = { + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" + } + data = { + "prompt": base, + "options": {}, + "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", + "temperature": 0.8, + "top_p": 1 + } + response = requests.post(url, headers=headers, json=data) + if response.status_code == 200: + lines = response.text.strip().split('\n') + res = json.loads(lines[-1]) + yield res['text'] + else: + print(f"Error Occurred::{response.status_code}") + return None + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Acytoo.py b/g4f/Provider/Providers/Acytoo.py new file mode 100644 index 00000000..06083eb5 --- /dev/null +++ b/g4f/Provider/Providers/Acytoo.py @@ -0,0 +1,41 @@ +import os, requests +from ...typing import sha256, Dict, get_type_hints +import json + +url = "https://chat.acytoo.com/api/completions" +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + headers = { + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" + } + data = { + "key": "", + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": base, + "createdAt": 1688518523500 + } + ], + "temperature": 1, + "password": "" + } + + response = requests.post(url, headers=headers, data=json.dumps(data)) + if response.status_code == 200: + yield response.text + else: + print(f"Error Occurred::{response.status_code}") + return None + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/DFEHub.py b/g4f/Provider/Providers/DFEHub.py new file mode 100644 index 00000000..1bbdd01e --- /dev/null +++ b/g4f/Provider/Providers/DFEHub.py @@ -0,0 +1,44 @@ +import os, requests +from ...typing import sha256, Dict, get_type_hints +import json + +url = "https://chat.dfehub.com/api/chat" +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + headers = { + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" + } + data = { + "model": { + "id": "gpt-3.5-turbo", + "name": "GPT-3.5", + "maxLength": 12000, + "tokenLimit": 4000 + }, + "messages": [ + { + "role": "user", + "content": base + } + ], + "key": "", + "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", + "temperature": 1 + } + response = requests.post(url, headers=headers, data=json.dumps(data)) + if response.status_code == 200: + yield response.text + else: + print(f"Error Occurred::{response.status_code}") + return None + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/EasyChat.py b/g4f/Provider/Providers/EasyChat.py new file mode 100644 index 00000000..9f4aa7b2 --- /dev/null +++ b/g4f/Provider/Providers/EasyChat.py @@ -0,0 +1,43 @@ +import os, requests +from ...typing import sha256, Dict, get_type_hints +import json + +url = "https://free.easychat.work/api/openai/v1/chat/completions" +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + ''' limited to 240 messages/hour''' + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + headers = { + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", + } + + data = { + "messages": [ + {"role": "system", "content": "You are ChatGPT, a large language model trained by OpenAI."}, + {"role": "user", "content": base} + ], + "stream": False, + "model": "gpt-3.5-turbo", + "temperature": 0.5, + "presence_penalty": 0, + "frequency_penalty": 0, + "top_p": 1 + } + + response = requests.post(url, headers=headers, json=data) + if response.status_code == 200: + response = response.json() + yield response['choices'][0]['message']['content'] + else: + print(f"Error Occurred::{response.status_code}") + return None + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 269fa17e..3a86291d 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -14,7 +14,11 @@ from .Providers import ( H2o, ChatgptLogin, DeepAi, - GetGpt + GetGpt, + AItianhu, + EasyChat, + Acytoo, + DFEHub, ) Palm = Bard -- cgit v1.2.3