diff options
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/ChatBase.py | 62 | ||||
-rw-r--r-- | g4f/Provider/CodeLinkAva.py | 63 | ||||
-rw-r--r-- | g4f/Provider/HuggingChat.py | 2 | ||||
-rw-r--r-- | g4f/Provider/OpenaiChat.py | 2 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 4 | ||||
-rw-r--r-- | testing/test_async.py | 24 |
6 files changed, 142 insertions, 15 deletions
diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py new file mode 100644 index 00000000..7d73fd2f --- /dev/null +++ b/g4f/Provider/ChatBase.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class ChatBase(AsyncGeneratorProvider): + url = "https://www.chatbase.co" + supports_gpt_35_turbo = True + supports_gpt_4 = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + if model == "gpt-4": + chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn" + elif model == "gpt-3.5-turbo" or True: + chat_id = "chatbase--1--pdf-p680fxvnm" + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + data = { + "messages": messages, + "captchaCode": "hadsa", + "chatId": chat_id, + "conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}" + } + async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response: + response.raise_for_status() + async for stream in response.content.iter_any(): + stream = stream.decode() + if stream: + yield stream + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/CodeLinkAva.py b/g4f/Provider/CodeLinkAva.py new file mode 100644 index 00000000..3ab4e264 --- /dev/null +++ b/g4f/Provider/CodeLinkAva.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider + + +class CodeLinkAva(AsyncGeneratorProvider): + url = "https://ava-ai-ef611.web.app" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + data = { + "messages": messages, + "temperature": 0.6, + "stream": True, + **kwargs + } + async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response: + response.raise_for_status() + start = "data: " + async for line in response.content: + line = line.decode() + if line.startswith("data: ") and not line.startswith("data: [DONE]"): + line = json.loads(line[len(start):-1]) + content = line["choices"][0]["delta"].get("content") + if content: + yield content + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 11310a69..85f879f3 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -68,7 +68,7 @@ class HuggingChat(AsyncGeneratorProvider): if "error" in data: raise RuntimeError(data["error"]) elif isinstance(data, list): - yield data[0]["generated_text"] + yield data[0]["generated_text"].strip() else: raise RuntimeError(f"Response: {data}") else: diff --git a/g4f/Provider/OpenaiChat.py b/g4f/Provider/OpenaiChat.py index c93977ec..cbe886f0 100644 --- a/g4f/Provider/OpenaiChat.py +++ b/g4f/Provider/OpenaiChat.py @@ -34,7 +34,7 @@ class OpenaiChat(AsyncProvider): "https": proxy } if not access_token: - access_token = await cls.get_access_token(cookies) + access_token = await cls.get_access_token(cookies, proxies) headers = { "Accept": "text/event-stream", "Authorization": f"Bearer {access_token}", diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index fa1bdb87..a1abe452 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -6,8 +6,10 @@ from .AiService import AiService from .AItianhu import AItianhu from .Bard import Bard from .Bing import Bing +from .ChatBase import ChatBase from .ChatgptAi import ChatgptAi from .ChatgptLogin import ChatgptLogin +from .CodeLinkAva import CodeLinkAva from .DeepAi import DeepAi from .DfeHub import DfeHub from .EasyChat import EasyChat @@ -42,8 +44,10 @@ __all__ = [ 'AItianhu', 'Bard', 'Bing', + 'ChatBase', 'ChatgptAi', 'ChatgptLogin', + 'CodeLinkAva', 'DeepAi', 'DfeHub', 'EasyChat', diff --git a/testing/test_async.py b/testing/test_async.py index 692946ea..bef2c75f 100644 --- a/testing/test_async.py +++ b/testing/test_async.py @@ -7,31 +7,29 @@ sys.path.append(str(Path(__file__).parent.parent)) import g4f from g4f.Provider import AsyncProvider from testing.test_providers import get_providers -from testing.log_time import log_time_async +from testing.log_time import log_time_async -async def create_async(provider: AsyncProvider): +async def create_async(provider): model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name try: - response = await log_time_async( + response = await log_time_async( provider.create_async, model=model, messages=[{"role": "user", "content": "Hello Assistant!"}] ) - assert type(response) is str - assert len(response) > 0 - return response + print(f"{provider.__name__}:", response) except Exception as e: - return e + return f"{provider.__name__}: {e.__class__.__name__}: {e}" async def run_async(): - _providers: list[AsyncProvider] = [ - _provider + responses: list = [ + create_async(_provider) for _provider in get_providers() - if _provider.working and hasattr(_provider, "create_async") + if _provider.working and issubclass(_provider, AsyncProvider) ] - responses = [create_async(_provider) for _provider in _providers] responses = await asyncio.gather(*responses) - for idx, provider in enumerate(_providers): - print(f"{provider.__name__}:", responses[idx]) + for error in responses: + if error: + print(error) print("Total:", asyncio.run(log_time_async(run_async)))
\ No newline at end of file |