diff options
author | kqlio67 <kqlio67@users.noreply.github.com> | 2024-10-11 08:33:30 +0200 |
---|---|---|
committer | kqlio67 <kqlio67@users.noreply.github.com> | 2024-10-11 08:33:30 +0200 |
commit | a9bc67362f2be529fe9165ebb13347195ba1ddcf (patch) | |
tree | 1a91836eaa94f14c18ad5d55f687ff8a2118c357 /g4f/Provider/nexra/NexraQwen.py | |
parent | feat(g4f/Provider/__init__.py): add new providers and update imports (diff) | |
download | gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.gz gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.bz2 gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.lz gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.xz gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.zst gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.zip |
Diffstat (limited to 'g4f/Provider/nexra/NexraQwen.py')
-rw-r--r-- | g4f/Provider/nexra/NexraQwen.py | 72 |
1 files changed, 53 insertions, 19 deletions
diff --git a/g4f/Provider/nexra/NexraQwen.py b/g4f/Provider/nexra/NexraQwen.py index ae8e9a0e..8bdf5475 100644 --- a/g4f/Provider/nexra/NexraQwen.py +++ b/g4f/Provider/nexra/NexraQwen.py @@ -1,7 +1,7 @@ from __future__ import annotations -import json from aiohttp import ClientSession +import json from ...typing import AsyncResult, Messages from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin @@ -10,8 +10,17 @@ from ..helper import format_prompt class NexraQwen(AsyncGeneratorProvider, ProviderModelMixin): label = "Nexra Qwen" + url = "https://nexra.aryahcr.cc/documentation/qwen/en" api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements" - models = ['qwen'] + working = True + supports_stream = True + + default_model = 'qwen' + models = [default_model] + + @classmethod + def get_model(cls, model: str) -> str: + return cls.default_model @classmethod async def create_async_generator( @@ -19,34 +28,59 @@ class NexraQwen(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + stream: bool = False, + markdown: bool = False, **kwargs ) -> AsyncResult: + model = cls.get_model(model) + headers = { - "Content-Type": "application/json" + "Content-Type": "application/json", + "accept": "application/json", + "origin": cls.url, + "referer": f"{cls.url}/chat", } async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) data = { "messages": [ - {'role': 'assistant', 'content': ''}, - {'role': 'user', 'content': format_prompt(messages)} + { + "role": "user", + "content": prompt + } ], - "markdown": False, - "stream": True, + "markdown": markdown, + "stream": stream, "model": model } async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: response.raise_for_status() - full_response = '' - async for line in response.content: - if line: - messages = line.decode('utf-8').split('\x1e') - for message_str in messages: + + complete_message = "" + + # If streaming, process each chunk separately + if stream: + async for chunk in response.content.iter_any(): + if chunk: try: - message = json.loads(message_str) - if message.get('message'): - full_response = message['message'] - if message.get('finish'): - yield full_response.strip() - return + # Decode the chunk and split by the delimiter + parts = chunk.decode('utf-8').split('\x1e') + for part in parts: + if part.strip(): # Ensure the part is not empty + response_data = json.loads(part) + message_part = response_data.get('message') + if message_part: + complete_message = message_part except json.JSONDecodeError: - pass + continue + + # Yield the final complete message + if complete_message: + yield complete_message + else: + # Handle non-streaming response + text_response = await response.text() + response_data = json.loads(text_response) + message = response_data.get('message') + if message: + yield message |