diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-10-15 11:51:53 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-10-15 11:51:53 +0200 |
commit | 5ed3467d07181e876d957984c16782d687abd3b5 (patch) | |
tree | 23bd0fd3481d81fca70ac3c7842cb7ffa8f6497f /g4f/Provider/nexra/NexraFluxPro.py | |
parent | Merge pull request #2268 from yjg30737/patch-1 (diff) | |
parent | Updated(docs/client.md) (diff) | |
download | gpt4free-5ed3467d07181e876d957984c16782d687abd3b5.tar gpt4free-5ed3467d07181e876d957984c16782d687abd3b5.tar.gz gpt4free-5ed3467d07181e876d957984c16782d687abd3b5.tar.bz2 gpt4free-5ed3467d07181e876d957984c16782d687abd3b5.tar.lz gpt4free-5ed3467d07181e876d957984c16782d687abd3b5.tar.xz gpt4free-5ed3467d07181e876d957984c16782d687abd3b5.tar.zst gpt4free-5ed3467d07181e876d957984c16782d687abd3b5.zip |
Diffstat (limited to 'g4f/Provider/nexra/NexraFluxPro.py')
-rw-r--r-- | g4f/Provider/nexra/NexraFluxPro.py | 74 |
1 files changed, 74 insertions, 0 deletions
diff --git a/g4f/Provider/nexra/NexraFluxPro.py b/g4f/Provider/nexra/NexraFluxPro.py new file mode 100644 index 00000000..1dbab633 --- /dev/null +++ b/g4f/Provider/nexra/NexraFluxPro.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...image import ImageResponse + + +class NexraFluxPro(AsyncGeneratorProvider, ProviderModelMixin): + label = "Nexra Flux PRO" + url = "https://nexra.aryahcr.cc/documentation/flux-pro/en" + api_endpoint = "https://nexra.aryahcr.cc/api/image/complements" + working = True + + default_model = 'flux' + models = [default_model] + model_aliases = { + "flux-pro": "flux", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + response: str = "url", # base64 or url + **kwargs + ) -> AsyncResult: + # Retrieve the correct model to use + model = cls.get_model(model) + + # Format the prompt from the messages + prompt = messages[0]['content'] + + headers = { + "Content-Type": "application/json" + } + payload = { + "prompt": prompt, + "model": model, + "response": response + } + + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + text_data = await response.text() + + try: + # Parse the JSON response + json_start = text_data.find('{') + json_data = text_data[json_start:] + data = json.loads(json_data) + + # Check if the response contains images + if 'images' in data and len(data['images']) > 0: + image_url = data['images'][0] + yield ImageResponse(image_url, prompt) + else: + yield ImageResponse("No images found in the response.", prompt) + except json.JSONDecodeError: + yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt) |