diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-09-29 22:41:08 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-09-29 22:41:08 +0200 |
commit | 0deb0f60dd4985017d3fcb946e108be8d1f63846 (patch) | |
tree | e926d6f5551b4eb069e35b41479275056999e6c9 /g4f/Provider/nexra/NexraGeminiPro.py | |
parent | Added gpt-4o provider (diff) | |
parent | feat(g4f/Provider/Nexra.py): enhance model handling and add new providers (diff) | |
download | gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.gz gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.bz2 gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.lz gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.xz gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.zst gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.zip |
Diffstat (limited to 'g4f/Provider/nexra/NexraGeminiPro.py')
-rw-r--r-- | g4f/Provider/nexra/NexraGeminiPro.py | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/g4f/Provider/nexra/NexraGeminiPro.py b/g4f/Provider/nexra/NexraGeminiPro.py new file mode 100644 index 00000000..a57daed4 --- /dev/null +++ b/g4f/Provider/nexra/NexraGeminiPro.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + + +class NexraGeminiPro(AsyncGeneratorProvider, ProviderModelMixin): + label = "Nexra Gemini PRO" + api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements" + models = ['gemini-pro'] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "Content-Type": "application/json" + } + async with ClientSession(headers=headers) as session: + data = { + "messages": [ + {'role': 'assistant', 'content': ''}, + {'role': 'user', 'content': format_prompt(messages)} + ], + "markdown": False, + "stream": True, + "model": model + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + full_response = '' + async for line in response.content: + if line: + messages = line.decode('utf-8').split('\x1e') + for message_str in messages: + try: + message = json.loads(message_str) + if message.get('message'): + full_response = message['message'] + if message.get('finish'): + yield full_response.strip() + return + except json.JSONDecodeError: + pass |