summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Nexra.py
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-09-04 01:09:29 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-09-04 01:09:29 +0200
commit7483a7c310d581c6012ed51607b6b57b3cab8018 (patch)
tree5a7607911a4a7ad64f3e8d543faf3229443beea8 /g4f/Provider/Nexra.py
parentNew TwitterBio provider with support for gpt-3.5-turbo and mixtral-8x7b models (diff)
downloadgpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.gz
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.bz2
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.lz
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.xz
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.zst
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.zip
Diffstat (limited to 'g4f/Provider/Nexra.py')
-rw-r--r--g4f/Provider/Nexra.py111
1 files changed, 111 insertions, 0 deletions
diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py
new file mode 100644
index 00000000..4914b930
--- /dev/null
+++ b/g4f/Provider/Nexra.py
@@ -0,0 +1,111 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://nexra.aryahcr.cc"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ # Working with text
+ 'gpt-4',
+ 'gpt-4-0613',
+ 'gpt-4-32k',
+ 'gpt-4-0314',
+ 'gpt-4-32k-0314',
+
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-16k',
+ 'gpt-3.5-turbo-0613',
+ 'gpt-3.5-turbo-16k-0613',
+ 'gpt-3.5-turbo-0301',
+
+ 'gpt-3',
+ 'text-davinci-003',
+ 'text-davinci-002',
+ 'code-davinci-002',
+ 'text-curie-001',
+ 'text-babbage-001',
+ 'text-ada-001',
+ 'davinci',
+ 'curie',
+ 'babbage',
+ 'ada',
+ 'babbage-002',
+ 'davinci-002',
+ ]
+
+ model_aliases = {
+ "gpt-4": "gpt-4-0613",
+ "gpt-4": "gpt-4-32k",
+ "gpt-4": "gpt-4-0314",
+ "gpt-4": "gpt-4-32k-0314",
+
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
+
+ "gpt-3": "text-davinci-003",
+ "gpt-3": "text-davinci-002",
+ "gpt-3": "code-davinci-002",
+ "gpt-3": "text-curie-001",
+ "gpt-3": "text-babbage-001",
+ "gpt-3": "text-ada-001",
+ "gpt-3": "text-ada-001",
+ "gpt-3": "davinci",
+ "gpt-3": "curie",
+ "gpt-3": "babbage",
+ "gpt-3": "ada",
+ "gpt-3": "babbage-002",
+ "gpt-3": "davinci-002",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": messages,
+ "prompt": format_prompt(messages),
+ "model": model,
+ "markdown": False,
+ "stream": False,
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ result = await response.text()
+ json_result = json.loads(result)
+ yield json_result["gpt"]