From 79cf039a888eef117ef514a9ad30a6cb4dbf81b9 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Fri, 27 Oct 2023 22:59:14 +0200 Subject: Update config supports_message_history --- g4f/Provider/Acytoo.py | 1 + g4f/Provider/AiAsk.py | 1 + g4f/Provider/Aibn.py | 5 ++- g4f/Provider/Ails.py | 5 ++- g4f/Provider/Bing.py | 6 +-- g4f/Provider/ChatBase.py | 7 ++-- g4f/Provider/ChatForAi.py | 4 +- g4f/Provider/Chatgpt4Online.py | 5 ++- g4f/Provider/ChatgptAi.py | 3 +- g4f/Provider/ChatgptX.py | 1 - g4f/Provider/DeepInfra.py | 1 + g4f/Provider/FakeGpt.py | 3 +- g4f/Provider/FreeGpt.py | 4 +- g4f/Provider/GPTalk.py | 3 +- g4f/Provider/GeekGpt.py | 85 ++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/Geekgpt.py | 84 ----------------------------------------- g4f/Provider/GptChatly.py | 7 ++-- g4f/Provider/GptForLove.py | 3 +- g4f/Provider/Hashnode.py | 3 +- g4f/Provider/Liaobots.py | 1 + g4f/Provider/Llama2.py | 5 ++- g4f/Provider/NoowAi.py | 5 ++- g4f/Provider/Opchatgpts.py | 5 ++- g4f/Provider/Vercel.py | 5 ++- g4f/Provider/Ylokh.py | 5 ++- g4f/Provider/You.py | 1 - g4f/Provider/Yqcloud.py | 1 - g4f/Provider/__init__.py | 4 +- 28 files changed, 136 insertions(+), 127 deletions(-) create mode 100644 g4f/Provider/GeekGpt.py delete mode 100644 g4f/Provider/Geekgpt.py (limited to 'g4f/Provider') diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py index 830d59bc..4dee176a 100644 --- a/g4f/Provider/Acytoo.py +++ b/g4f/Provider/Acytoo.py @@ -9,6 +9,7 @@ from .base_provider import AsyncGeneratorProvider class Acytoo(AsyncGeneratorProvider): url = 'https://chat.acytoo.com' working = False + supports_message_history = True supports_gpt_35_turbo = True @classmethod diff --git a/g4f/Provider/AiAsk.py b/g4f/Provider/AiAsk.py index f10be389..ac123fc9 100644 --- a/g4f/Provider/AiAsk.py +++ b/g4f/Provider/AiAsk.py @@ -6,6 +6,7 @@ from .base_provider import AsyncGeneratorProvider class AiAsk(AsyncGeneratorProvider): url = "https://e.aiask.me" + supports_message_history = True supports_gpt_35_turbo = True working = True diff --git a/g4f/Provider/Aibn.py b/g4f/Provider/Aibn.py index 14935888..1f81a61e 100644 --- a/g4f/Provider/Aibn.py +++ b/g4f/Provider/Aibn.py @@ -9,9 +9,10 @@ from .base_provider import AsyncGeneratorProvider class Aibn(AsyncGeneratorProvider): - url = "https://aibn.cc" + url = "https://aibn.cc" + working = False + supports_message_history = True supports_gpt_35_turbo = True - working = False @classmethod async def create_async_generator( diff --git a/g4f/Provider/Ails.py b/g4f/Provider/Ails.py index fae3e62a..58010756 100644 --- a/g4f/Provider/Ails.py +++ b/g4f/Provider/Ails.py @@ -12,8 +12,9 @@ from .base_provider import AsyncGeneratorProvider class Ails(AsyncGeneratorProvider): - url: str = "https://ai.ls" - working = False + url = "https://ai.ls" + working = False + supports_message_history = True supports_gpt_35_turbo = True @staticmethod diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index ca14510c..726faa2b 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -30,10 +30,10 @@ default_cookies = { } class Bing(AsyncGeneratorProvider): - url = "https://bing.com/chat" - working = True + url = "https://bing.com/chat" + working = True supports_message_history = True - supports_gpt_4 = True + supports_gpt_4 = True @staticmethod def create_async_generator( diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py index ada51fed..ba3d4ea5 100644 --- a/g4f/Provider/ChatBase.py +++ b/g4f/Provider/ChatBase.py @@ -7,12 +7,11 @@ from .base_provider import AsyncGeneratorProvider class ChatBase(AsyncGeneratorProvider): - url = "https://www.chatbase.co" + url = "https://www.chatbase.co" supports_gpt_35_turbo = True supports_message_history = True - working = True - list_incorrect_responses = ["support@chatbase", - "about Chatbase"] + working = True + list_incorrect_responses = ["support@chatbase", "about Chatbase"] @classmethod async def create_async_generator( diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py index 7a4e9264..7a123f0f 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/ChatForAi.py @@ -9,8 +9,8 @@ from .base_provider import AsyncGeneratorProvider class ChatForAi(AsyncGeneratorProvider): - url = "https://chatforai.store" - working = True + url = "https://chatforai.store" + working = True supports_message_history = True supports_gpt_35_turbo = True diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py index e81c8d4c..d7509639 100644 --- a/g4f/Provider/Chatgpt4Online.py +++ b/g4f/Provider/Chatgpt4Online.py @@ -8,9 +8,10 @@ from .base_provider import AsyncGeneratorProvider class Chatgpt4Online(AsyncGeneratorProvider): - url = "https://chatgpt4online.org" + url = "https://chatgpt4online.org" + supports_message_history = True supports_gpt_35_turbo = True - working = False + working = False @classmethod async def create_async_generator( diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py index 9783d868..40ad9481 100644 --- a/g4f/Provider/ChatgptAi.py +++ b/g4f/Provider/ChatgptAi.py @@ -8,8 +8,9 @@ from .base_provider import AsyncGeneratorProvider class ChatgptAi(AsyncGeneratorProvider): - url: str = "https://chatgpt.ai" + url = "https://chatgpt.ai" working = True + supports_message_history = True supports_gpt_35_turbo = True _system = None diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py index 75ff0da5..3101c05d 100644 --- a/g4f/Provider/ChatgptX.py +++ b/g4f/Provider/ChatgptX.py @@ -12,7 +12,6 @@ from .helper import format_prompt class ChatgptX(AsyncGeneratorProvider): url = "https://chatgptx.de" supports_gpt_35_turbo = True - supports_message_history = True working = True @classmethod diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py index 70045cae..da6333ad 100644 --- a/g4f/Provider/DeepInfra.py +++ b/g4f/Provider/DeepInfra.py @@ -9,6 +9,7 @@ from .base_provider import AsyncGeneratorProvider class DeepInfra(AsyncGeneratorProvider): url = "https://deepinfra.com" + supports_message_history = True working = True @classmethod diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/FakeGpt.py index a89425d3..daa96737 100644 --- a/g4f/Provider/FakeGpt.py +++ b/g4f/Provider/FakeGpt.py @@ -10,11 +10,10 @@ from .helper import format_prompt class FakeGpt(AsyncGeneratorProvider): url = "https://chat-shared2.zhile.io" - supports_message_history = True supports_gpt_35_turbo = True working = True _access_token = None - _cookie_jar = None + _cookie_jar = None @classmethod async def create_async_generator( diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 758e411b..a3a26fe6 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -11,10 +11,10 @@ domains = [ ] class FreeGpt(AsyncGeneratorProvider): - url = "https://freegpts1.aifree.site/" + url = "https://freegpts1.aifree.site/" + working = True supports_message_history = True supports_gpt_35_turbo = True - working = True @classmethod async def create_async_generator( diff --git a/g4f/Provider/GPTalk.py b/g4f/Provider/GPTalk.py index a5644fc4..b5881e5d 100644 --- a/g4f/Provider/GPTalk.py +++ b/g4f/Provider/GPTalk.py @@ -10,9 +10,8 @@ from .helper import format_prompt class GPTalk(AsyncGeneratorProvider): url = "https://gptalk.net" - supports_gpt_35_turbo = True - supports_message_history = True working = True + supports_gpt_35_turbo = True _auth = None @classmethod diff --git a/g4f/Provider/GeekGpt.py b/g4f/Provider/GeekGpt.py new file mode 100644 index 00000000..8c449745 --- /dev/null +++ b/g4f/Provider/GeekGpt.py @@ -0,0 +1,85 @@ +from __future__ import annotations +import requests, json + +from .base_provider import BaseProvider +from ..typing import CreateResult, Messages +from json import dumps + + +class GeekGpt(BaseProvider): + url = 'https://chat.geekgpt.org' + working = True + supports_message_history = True + supports_stream = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + **kwargs + ) -> CreateResult: + if not model: + model = "gpt-3.5-turbo" + json_data = { + 'messages': messages, + 'model': model, + 'temperature': kwargs.get('temperature', 0.9), + 'presence_penalty': kwargs.get('presence_penalty', 0), + 'top_p': kwargs.get('top_p', 1), + 'frequency_penalty': kwargs.get('frequency_penalty', 0), + 'stream': True + } + + data = dumps(json_data, separators=(',', ':')) + + headers = { + 'authority': 'ai.fakeopen.com', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'authorization': 'Bearer pk-this-is-a-real-free-pool-token-for-everyone', + 'content-type': 'application/json', + 'origin': 'https://chat.geekgpt.org', + 'referer': 'https://chat.geekgpt.org/', + 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', + } + + response = requests.post("https://ai.fakeopen.com/v1/chat/completions", + headers=headers, data=data, stream=True) + response.raise_for_status() + + for chunk in response.iter_lines(): + if b'content' in chunk: + json_data = chunk.decode().replace("data: ", "") + + if json_data == "[DONE]": + break + + try: + content = json.loads(json_data)["choices"][0]["delta"].get("content") + except Exception as e: + raise RuntimeError(f'error | {e} :', json_data) + + if content: + yield content + + @classmethod + @property + def params(cls): + params = [ + ('model', 'str'), + ('messages', 'list[dict[str, str]]'), + ('stream', 'bool'), + ('temperature', 'float'), + ] + param = ', '.join([': '.join(p) for p in params]) + return f'g4f.provider.{cls.__name__} supports: ({param})' diff --git a/g4f/Provider/Geekgpt.py b/g4f/Provider/Geekgpt.py deleted file mode 100644 index 3c577cf8..00000000 --- a/g4f/Provider/Geekgpt.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import annotations -import requests, json - -from .base_provider import BaseProvider -from ..typing import CreateResult, Messages -from json import dumps - - -class GeekGpt(BaseProvider): - url = 'https://chat.geekgpt.org' - supports_stream = True - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - **kwargs - ) -> CreateResult: - if not model: - model = "gpt-3.5-turbo" - json_data = { - 'messages': messages, - 'model': model, - 'temperature': kwargs.get('temperature', 0.9), - 'presence_penalty': kwargs.get('presence_penalty', 0), - 'top_p': kwargs.get('top_p', 1), - 'frequency_penalty': kwargs.get('frequency_penalty', 0), - 'stream': True - } - - data = dumps(json_data, separators=(',', ':')) - - headers = { - 'authority': 'ai.fakeopen.com', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'authorization': 'Bearer pk-this-is-a-real-free-pool-token-for-everyone', - 'content-type': 'application/json', - 'origin': 'https://chat.geekgpt.org', - 'referer': 'https://chat.geekgpt.org/', - 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', - } - - response = requests.post("https://ai.fakeopen.com/v1/chat/completions", - headers=headers, data=data, stream=True) - response.raise_for_status() - - for chunk in response.iter_lines(): - if b'content' in chunk: - json_data = chunk.decode().replace("data: ", "") - - if json_data == "[DONE]": - break - - try: - content = json.loads(json_data)["choices"][0]["delta"].get("content") - except Exception as e: - raise RuntimeError(f'error | {e} :', json_data) - - if content: - yield content - - @classmethod - @property - def params(cls): - params = [ - ('model', 'str'), - ('messages', 'list[dict[str, str]]'), - ('stream', 'bool'), - ('temperature', 'float'), - ] - param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' diff --git a/g4f/Provider/GptChatly.py b/g4f/Provider/GptChatly.py index 465d2527..a493c745 100644 --- a/g4f/Provider/GptChatly.py +++ b/g4f/Provider/GptChatly.py @@ -9,10 +9,11 @@ from .helper import get_cookies class GptChatly(AsyncProvider): - url = "https://gptchatly.com" + url = "https://gptchatly.com" + working = True + supports_message_history = True supports_gpt_35_turbo = True - supports_gpt_4 = True - working = True + supports_gpt_4 = True @classmethod async def create_async( diff --git a/g4f/Provider/GptForLove.py b/g4f/Provider/GptForLove.py index 4b31809c..e4787e5d 100644 --- a/g4f/Provider/GptForLove.py +++ b/g4f/Provider/GptForLove.py @@ -9,9 +9,8 @@ from .helper import format_prompt class GptForLove(AsyncGeneratorProvider): url = "https://ai18.gptforlove.com" - supports_message_history = True - supports_gpt_35_turbo = True working = True + supports_gpt_35_turbo = True @classmethod async def create_async_generator( diff --git a/g4f/Provider/Hashnode.py b/g4f/Provider/Hashnode.py index 7f308d7e..f562787b 100644 --- a/g4f/Provider/Hashnode.py +++ b/g4f/Provider/Hashnode.py @@ -13,8 +13,9 @@ class SearchTypes(): class Hashnode(AsyncGeneratorProvider): url = "https://hashnode.com" - supports_gpt_35_turbo = True working = True + supports_message_history = True + supports_gpt_35_turbo = True _sources = [] @classmethod diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 9dc52aae..b56d08ab 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -31,6 +31,7 @@ models = { class Liaobots(AsyncGeneratorProvider): url = "https://liaobots.site" working = True + supports_message_history = True supports_gpt_35_turbo = True supports_gpt_4 = True _auth_code = None diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama2.py index 1b332f86..efe5bdc6 100644 --- a/g4f/Provider/Llama2.py +++ b/g4f/Provider/Llama2.py @@ -13,8 +13,9 @@ models = { } class Llama2(AsyncGeneratorProvider): - url = "https://www.llama2.ai" - working = True + url = "https://www.llama2.ai" + working = True + supports_message_history = True @classmethod async def create_async_generator( diff --git a/g4f/Provider/NoowAi.py b/g4f/Provider/NoowAi.py index 9dc26d35..c337514a 100644 --- a/g4f/Provider/NoowAi.py +++ b/g4f/Provider/NoowAi.py @@ -8,9 +8,10 @@ from .base_provider import AsyncGeneratorProvider class NoowAi(AsyncGeneratorProvider): - url = "https://noowai.com" + url = "https://noowai.com" + supports_message_history = True supports_gpt_35_turbo = True - working = True + working = True @classmethod async def create_async_generator( diff --git a/g4f/Provider/Opchatgpts.py b/g4f/Provider/Opchatgpts.py index e56f5b40..c77acb22 100644 --- a/g4f/Provider/Opchatgpts.py +++ b/g4f/Provider/Opchatgpts.py @@ -8,9 +8,10 @@ from .base_provider import AsyncGeneratorProvider class Opchatgpts(AsyncGeneratorProvider): - url = "https://opchatgpts.net" + url = "https://opchatgpts.net" + working = False + supports_message_history = True supports_gpt_35_turbo = True - working = False @classmethod async def create_async_generator( diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py index eb5aa20a..a7bbc496 100644 --- a/g4f/Provider/Vercel.py +++ b/g4f/Provider/Vercel.py @@ -9,9 +9,10 @@ from ..debug import logging class Vercel(BaseProvider): url = 'https://sdk.vercel.ai' - working = True + working = True + supports_message_history = True supports_gpt_35_turbo = True - supports_stream = True + supports_stream = True @staticmethod def create_completion( diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py index dce76f22..abf4d9c1 100644 --- a/g4f/Provider/Ylokh.py +++ b/g4f/Provider/Ylokh.py @@ -7,8 +7,9 @@ from .base_provider import AsyncGeneratorProvider from ..typing import AsyncResult, Messages class Ylokh(AsyncGeneratorProvider): - url = "https://chat.ylokh.xyz" - working = False + url = "https://chat.ylokh.xyz" + working = False + supports_message_history = True supports_gpt_35_turbo = True diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index 34972586..91a195cf 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -10,7 +10,6 @@ from .base_provider import AsyncGeneratorProvider, format_prompt class You(AsyncGeneratorProvider): url = "https://you.com" working = True - supports_message_history = True supports_gpt_35_turbo = True diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py index 12eb7bbb..2829c5bf 100644 --- a/g4f/Provider/Yqcloud.py +++ b/g4f/Provider/Yqcloud.py @@ -10,7 +10,6 @@ from .base_provider import AsyncGeneratorProvider, format_prompt class Yqcloud(AsyncGeneratorProvider): url = "https://chat9.yqcloud.top/" working = True - supports_message_history = True supports_gpt_35_turbo = True @staticmethod diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 60d3bd25..1dd603b1 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -36,7 +36,7 @@ from .Vercel import Vercel from .Ylokh import Ylokh from .You import You from .Yqcloud import Yqcloud -from .Geekgpt import GeekGpt +from .GeekGpt import GeekGpt from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider from .retry_provider import RetryProvider @@ -111,7 +111,7 @@ class ProviderUtils: 'Ylokh': Ylokh, 'You': You, 'Yqcloud': Yqcloud, - 'Geekgpt': GeekGpt, + 'GeekGpt': GeekGpt, 'BaseProvider': BaseProvider, 'AsyncProvider': AsyncProvider, -- cgit v1.2.3