diff options
author | kqlio67 <kqlio67@users.noreply.github.com> | 2024-10-16 20:00:08 +0200 |
---|---|---|
committer | kqlio67 <kqlio67@users.noreply.github.com> | 2024-10-16 20:00:08 +0200 |
commit | 9f394f9613469c7ca56ae3cdc9a198f5c196fc13 (patch) | |
tree | 92ed3d2093a62b41710bc19e38ea0db964eac205 /g4f/Provider/AiMathGPT.py | |
parent | feat(g4f/Provider/HuggingChat.py): add new model support for Nemotron (diff) | |
download | gpt4free-9f394f9613469c7ca56ae3cdc9a198f5c196fc13.tar gpt4free-9f394f9613469c7ca56ae3cdc9a198f5c196fc13.tar.gz gpt4free-9f394f9613469c7ca56ae3cdc9a198f5c196fc13.tar.bz2 gpt4free-9f394f9613469c7ca56ae3cdc9a198f5c196fc13.tar.lz gpt4free-9f394f9613469c7ca56ae3cdc9a198f5c196fc13.tar.xz gpt4free-9f394f9613469c7ca56ae3cdc9a198f5c196fc13.tar.zst gpt4free-9f394f9613469c7ca56ae3cdc9a198f5c196fc13.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/AiMathGPT.py | 78 |
1 files changed, 78 insertions, 0 deletions
diff --git a/g4f/Provider/AiMathGPT.py b/g4f/Provider/AiMathGPT.py new file mode 100644 index 00000000..4399320a --- /dev/null +++ b/g4f/Provider/AiMathGPT.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class AiMathGPT(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://aimathgpt.forit.ai" + api_endpoint = "https://aimathgpt.forit.ai/api/ai" + working = True + supports_stream = False + supports_system_message = True + supports_message_history = True + + default_model = 'llama3' + models = ['llama3'] + + model_aliases = {"llama-3.1-70b": "llama3",} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': f'{cls.url}/', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": [ + { + "role": "system", + "content": "" + }, + { + "role": "user", + "content": format_prompt(messages) + } + ], + "model": model + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + filtered_response = response_data['result']['response'] + yield filtered_response |