diff options
-rw-r--r-- | README.md | 10 | ||||
-rw-r--r-- | g4f/Provider/Acytoo.py | 7 | ||||
-rw-r--r-- | g4f/Provider/Aichat.py | 6 | ||||
-rw-r--r-- | g4f/Provider/EasyChat.py | 39 | ||||
-rw-r--r-- | g4f/Provider/Equing.py | 3 | ||||
-rw-r--r-- | g4f/Provider/H2o.py | 2 | ||||
-rw-r--r-- | g4f/Provider/Theb.py | 87 | ||||
-rw-r--r-- | setup.py | 7 | ||||
-rw-r--r-- | testing/test_interference.py | 2 |
9 files changed, 127 insertions, 36 deletions
@@ -1,10 +1,10 @@ ![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9) -By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses. +By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, reuploads made by other users, or anything else related to gpt4free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses. -This (quite censored) New Version of gpt4free, was just released, it may contain bugs, open an issue or contribute a PR when encountering one, some features were disabled. -Docker is for now not available but I would be happy if someone contributes a PR. The g4f GUI will be uploaded soon enough. +This (quite censored) New Version of gpt4free, was just released so it may contain bugs. Please open an issue or contribute a PR when encountering one. +P.S: Docker is for now not available but I would be happy if someone contributes a PR. The g4f GUI will be uploaded soon enough. ### New - pypi package: @@ -351,7 +351,7 @@ for message in response: ## ChatGPT clone -> Currently implementing new features and trying to scale it, please be patient it may be unstable +> We are currently implementing new features and trying to scale it, but please be patient as it may be unstable > https://chat.g4f.ai/chat > This site was developed by me and includes **gpt-4/3.5**, **internet access** and **gpt-jailbreak's** like DAN > Run locally here: https://github.com/xtekky/chatgpt-clone @@ -384,4 +384,4 @@ along with this program. If not, see <https://www.gnu.org/licenses/>. <a href="https://github.com/xtekky/gpt4free/stargazers"> <img width="500" alt="Star History Chart" src="https://api.star-history.com/svg?repos=xtekky/gpt4free&type=Date"> -</a> +</a>
\ No newline at end of file diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py index 32c67c0c..2edd9efd 100644 --- a/g4f/Provider/Acytoo.py +++ b/g4f/Provider/Acytoo.py @@ -19,11 +19,12 @@ class Acytoo(BaseProvider): **kwargs: Any, ) -> CreateResult: headers = _create_header() - payload = _create_payload(messages) + payload = _create_payload(messages, kwargs.get('temperature', 0.5)) url = "https://chat.acytoo.com/api/completions" response = requests.post(url=url, headers=headers, json=payload) response.raise_for_status() + response.encoding = "utf-8" yield response.text @@ -34,7 +35,7 @@ def _create_header(): } -def _create_payload(messages: list[dict[str, str]]): +def _create_payload(messages: list[dict[str, str]], temperature): payload_messages = [ message | {"createdAt": int(time.time()) * 1000} for message in messages ] @@ -42,6 +43,6 @@ def _create_payload(messages: list[dict[str, str]]): "key": "", "model": "gpt-3.5-turbo", "messages": payload_messages, - "temperature": 1, + "temperature": temperature, "password": "", } diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py index 6992d071..a1d90db7 100644 --- a/g4f/Provider/Aichat.py +++ b/g4f/Provider/Aichat.py @@ -40,9 +40,9 @@ class Aichat(BaseProvider): json_data = { "message": base, - "temperature": 1, + "temperature": kwargs.get('temperature', 0.5), "presence_penalty": 0, - "top_p": 1, + "top_p": kwargs.get('top_p', 1), "frequency_penalty": 0, } @@ -52,4 +52,6 @@ class Aichat(BaseProvider): json=json_data, ) response.raise_for_status() + if not response.json()['response']: + raise Exception("Error Response: " + response.json()) yield response.json()["message"] diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py index 59c46ffa..2a61346c 100644 --- a/g4f/Provider/EasyChat.py +++ b/g4f/Provider/EasyChat.py @@ -10,6 +10,7 @@ class EasyChat(BaseProvider): url = "https://free.easychat.work" supports_stream = True supports_gpt_35_turbo = True + working = True @staticmethod def create_completion( @@ -25,6 +26,7 @@ class EasyChat(BaseProvider): "https://chat2.fastgpt.me", "https://chat3.fastgpt.me", "https://chat4.fastgpt.me", + "https://gxos1h1ddt.fastgpt.me" ] server = active_servers[kwargs.get("active_server", 0)] headers = { @@ -34,9 +36,17 @@ class EasyChat(BaseProvider): "content-type": "application/json", "origin": f"{server}", "referer": f"{server}/", - "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", "x-requested-with": "XMLHttpRequest", + 'plugins': '0', + 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', + 'usesearch': 'false', + 'x-requested-with': 'XMLHttpRequest' } json_data = { @@ -57,14 +67,25 @@ class EasyChat(BaseProvider): f"{server}/api/openai/v1/chat/completions", headers=headers, json=json_data, + stream=stream, ) - - response.raise_for_status() - print(response.text) - for chunk in response.iter_lines(): - if b"content" in chunk: - data = json.loads(chunk.decode().split("data: ")[1]) - yield data["choices"][0]["delta"]["content"] + if response.status_code == 200: + if stream == False: + json_data = response.json() + if "choices" in json_data: + yield json_data["choices"][0]["message"]["content"] + else: + yield Exception("No response from server") + else: + + for chunk in response.iter_lines(): + if b"content" in chunk: + splitData = chunk.decode().split("data: ") + if len(splitData) > 1: + yield json.loads(splitData[1])["choices"][0]["delta"]["content"] + else: + yield Exception(f"Error {response.status_code} from server") + @classmethod @property diff --git a/g4f/Provider/Equing.py b/g4f/Provider/Equing.py index bcf6cff9..90c865d9 100644 --- a/g4f/Provider/Equing.py +++ b/g4f/Provider/Equing.py @@ -53,6 +53,9 @@ class Equing(ABC): response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', headers=headers, json=json_data, stream=stream) + if not stream: + yield response.json()["choices"][0]["message"]["content"] + return for line in response.iter_content(chunk_size=1024): if line: diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py index c2492e59..f9b799bb 100644 --- a/g4f/Provider/H2o.py +++ b/g4f/Provider/H2o.py @@ -75,6 +75,8 @@ class H2o(BaseProvider): headers=headers, json=data, ) + response.raise_for_status() + response.encoding = "utf-8" generated_text = response.text.replace("\n", "").split("data:") generated_text = json.loads(generated_text[-1]) diff --git a/g4f/Provider/Theb.py b/g4f/Provider/Theb.py index c6fd6f19..09c94c24 100644 --- a/g4f/Provider/Theb.py +++ b/g4f/Provider/Theb.py @@ -1,16 +1,15 @@ -import json - -from curl_cffi import requests - +import json,random,requests +# from curl_cffi import requests from ..typing import Any, CreateResult from .base_provider import BaseProvider class Theb(BaseProvider): url = "https://theb.ai" - working = False + working = True supports_stream = True supports_gpt_35_turbo = True + needs_auth = True @staticmethod def create_completion( @@ -19,21 +18,79 @@ class Theb(BaseProvider): stream: bool, **kwargs: Any, ) -> CreateResult: - prompt = messages[-1]["content"] - + conversation = '' + for message in messages: + conversation += '%s: %s\n' % (message['role'], message['content']) + + conversation += 'assistant: ' + auth = kwargs.get("auth", { + "bearer_token":"free", + "org_id":"theb", + }) + bearer_token = auth["bearer_token"] + org_id = auth["org_id"] headers = { - "accept": "application/json, text/plain, */*", - "content-type": "application/json", + 'authority': 'beta.theb.ai', + 'accept': 'text/event-stream', + 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', + 'authorization': 'Bearer '+bearer_token, + 'content-type': 'application/json', + 'origin': 'https://beta.theb.ai', + 'referer': 'https://beta.theb.ai/home', + 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', + 'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8', } + # generate 10 random number + # 0.1 - 0.9 + req_rand = random.randint(100000000, 9999999999) - json_data: dict[str, Any] = {"prompt": prompt, "options": {}} + json_data: dict[str, Any] = { + "text": conversation, + "category": "04f58f64a4aa4191a957b47290fee864", + "model": "ee8d4f29cb7047f78cbe84313ed6ace8", + "model_params": { + "system_prompt": "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", + "temperature": kwargs.get("temperature", 1), + "top_p": kwargs.get("top_p", 1), + "frequency_penalty": kwargs.get("frequency_penalty", 0), + "presence_penalty": kwargs.get("presence_penalty", 0), + "long_term_memory": "auto" + } + } response = requests.post( - "https://chatbot.theb.ai/api/chat-process", + "https://beta.theb.ai/api/conversation?org_id="+org_id+"&req_rand="+str(req_rand), headers=headers, json=json_data, - impersonate="chrome110", + stream=True, ) response.raise_for_status() - line = response.text.splitlines()[-1] - text = json.loads(line)["text"] - yield text + content = "" + next_content = "" + for chunk in response.iter_lines(): + if b"content" in chunk: + next_content = content + data = json.loads(chunk.decode().split("data: ")[1]) + content = data["content"] + yield data["content"].replace(next_content, "") + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("auth", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("presence_penalty", "int"), + ("frequency_penalty", "int"), + ("top_p", "int") + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file @@ -11,7 +11,7 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh: with open("requirements.txt") as f: required = f.read().splitlines() -VERSION = '0.0.2.2' +VERSION = '0.0.2.3' DESCRIPTION = ( "The official gpt4free repository | various collection of powerful language models" ) @@ -27,6 +27,11 @@ setup( long_description=long_description, packages=find_packages(), install_requires=required, + url='https://github.com/xtekky/gpt4free', # Link to your GitHub repository + project_urls={ + 'Source Code': 'https://github.com/xtekky/gpt4free', # GitHub link + 'Bug Tracker': 'https://github.com/xtekky/gpt4free/issues', # Link to issue tracker + }, keywords=[ "python", "chatbot", diff --git a/testing/test_interference.py b/testing/test_interference.py index 31717ea7..d8e85a6c 100644 --- a/testing/test_interference.py +++ b/testing/test_interference.py @@ -24,4 +24,4 @@ def main(): if __name__ == "__main__": - main() + main()
\ No newline at end of file |