From e0f5e837299aab9a6b398aa498dee5baeb177d61 Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Sat, 28 Dec 2024 19:11:48 +0000 Subject: Update docs/. etc/. g4f/. README.md (#2515) Co-authored-by: kqlio67 <> --- g4f/Provider/Airforce.py | 51 +++- g4f/Provider/Blackbox.py | 340 +++++++++++---------- g4f/Provider/BlackboxCreateAgent.py | 257 ++++++++++++++++ g4f/Provider/ClaudeSon.py | 46 +++ g4f/Provider/DeepInfraChat.py | 54 +++- g4f/Provider/Flux.py | 58 ---- g4f/Provider/PollinationsAI.py | 227 ++++++++++---- g4f/Provider/Prodia.py | 11 +- g4f/Provider/__init__.py | 53 ++-- g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py | 59 ++++ .../hf_space/BlackForestLabsFlux1Schnell.py | 81 +++++ g4f/Provider/hf_space/VoodoohopFlux1Schnell.py | 81 +++++ g4f/Provider/hf_space/__init__.py | 3 + g4f/models.py | 26 +- 14 files changed, 1025 insertions(+), 322 deletions(-) create mode 100644 g4f/Provider/BlackboxCreateAgent.py create mode 100644 g4f/Provider/ClaudeSon.py delete mode 100644 g4f/Provider/Flux.py create mode 100644 g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py create mode 100644 g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py create mode 100644 g4f/Provider/hf_space/VoodoohopFlux1Schnell.py create mode 100644 g4f/Provider/hf_space/__init__.py (limited to 'g4f') diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index 6f55834c..3862b10b 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -4,7 +4,6 @@ import re import requests from aiohttp import ClientSession from typing import List -from requests.packages.urllib3.exceptions import InsecureRequestWarning from ..typing import AsyncResult, Messages from ..image import ImageResponse @@ -12,8 +11,6 @@ from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .. import debug - -requests.packages.urllib3.disable_warnings(InsecureRequestWarning) def split_message(message: str, max_length: int = 1000) -> List[str]: """Splits the message into parts up to (max_length).""" @@ -29,7 +26,7 @@ def split_message(message: str, max_length: int = 1000) -> List[str]: return chunks class Airforce(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://llmplayground.net" + url = "https://api.airforce" api_endpoint_completions = "https://api.airforce/chat/completions" api_endpoint_imagine2 = "https://api.airforce/imagine2" @@ -41,6 +38,9 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): default_model = "gpt-4o-mini" default_image_model = "flux" + models = [] + image_models = [] + hidden_models = {"Flux-1.1-Pro"} additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"] model_aliases = { @@ -54,7 +54,10 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): "lfm-40b": "lfm-40b-moe", "german-7b": "discolm-german-7b-v1", "llama-2-7b": "llama-2-7b-chat-int8", + "llama-3.1-70b": "llama-3.1-70b-chat", + "llama-3.1-8b": "llama-3.1-8b-chat", "llama-3.1-70b": "llama-3.1-70b-turbo", + "llama-3.1-8b": "llama-3.1-8b-turbo", "neural-7b": "neural-chat-7b-v3-1", "zephyr-7b": "zephyr-7b-beta", "evil": "any-uncensored", @@ -66,29 +69,51 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): @classmethod def get_models(cls): + """Get available models with error handling""" if not cls.image_models: try: - url = "https://api.airforce/imagine2/models" - response = requests.get(url, verify=False) + response = requests.get( + f"{cls.url}/imagine2/models", + headers={ + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", + } + ) response.raise_for_status() cls.image_models = response.json() - cls.image_models.extend(cls.additional_models_imagine) + if isinstance(cls.image_models, list): + cls.image_models.extend(cls.additional_models_imagine) + else: + cls.image_models = cls.additional_models_imagine.copy() except Exception as e: debug.log(f"Error fetching image models: {e}") + cls.image_models = cls.additional_models_imagine.copy() if not cls.models: try: - url = "https://api.airforce/models" - response = requests.get(url, verify=False) + response = requests.get( + f"{cls.url}/models", + headers={ + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", + } + ) response.raise_for_status() data = response.json() - cls.models = [model['id'] for model in data['data']] - cls.models.extend(cls.image_models) - cls.models = [model for model in cls.models if model not in cls.hidden_models] + if isinstance(data, dict) and 'data' in data: + cls.models = [model['id'] for model in data['data']] + cls.models.extend(cls.image_models) + cls.models = [model for model in cls.models if model not in cls.hidden_models] + else: + cls.models = list(cls.model_aliases.keys()) except Exception as e: debug.log(f"Error fetching text models: {e}") + cls.models = list(cls.model_aliases.keys()) + + return cls.models or list(cls.model_aliases.keys()) - return cls.models + @classmethod + def get_model(cls, model: str) -> str: + """Get the actual model name from alias""" + return cls.model_aliases.get(model, model) @classmethod async def check_api_key(cls, api_key: str) -> bool: diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index fd788576..e4d4cb96 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -6,16 +6,18 @@ import string import json import re import aiohttp - -import json +import asyncio from pathlib import Path from ..typing import AsyncResult, Messages, ImagesType from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..image import ImageResponse, to_data_uri from ..cookies import get_cookies_dir +from ..web_search import get_search_message from .helper import format_prompt +from .. import debug + class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): label = "Blackbox AI" url = "https://www.blackbox.ai" @@ -30,12 +32,15 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): default_vision_model = default_model default_image_model = 'flux' image_models = ['ImageGeneration', 'repomap'] - vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] + vision_models = [default_vision_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] + + web_search_models = ['blackboxai', 'meta-llama/Llama-3.3-70B-Instruct-Turbo', 'meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro'] userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro'] agentMode = { 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, + # 'meta-llama/Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"}, 'mistralai/Mistral-7B-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"}, 'deepseek-ai/deepseek-llm-67b-chat': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"}, @@ -88,20 +93,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'builder Agent': {'mode': True, 'id': "builder Agent"}, } - additional_prefixes = { - 'gpt-4o': '@GPT-4o', - 'gemini-pro': '@Gemini-PRO', - 'claude-sonnet-3.5': '@Claude-Sonnet-3.5' - } - - model_prefixes = { - **{ - mode: f"@{value['id']}" for mode, value in trendingAgentMode.items() - if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"] - }, - **additional_prefixes - } - models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())])) model_aliases = { @@ -120,7 +111,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): ### image ### "flux": "ImageGeneration", } - + @classmethod def _get_cache_file(cls) -> Path: dir = Path(get_cookies_dir()) @@ -136,7 +127,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): data = json.load(f) return data.get('validated_value') except Exception as e: - print(f"Error reading cache file: {e}") + debug.log(f"Error reading cache file: {e}") return None @classmethod @@ -146,68 +137,69 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): with open(cache_file, 'w') as f: json.dump({'validated_value': value}, f) except Exception as e: - print(f"Error writing to cache file: {e}") + debug.log(f"Error writing to cache file: {e}") @classmethod async def fetch_validated(cls): cached_value = cls._load_cached_value() + + async with aiohttp.ClientSession() as session: + # Let's try both URLs + urls_to_try = [ + "https://www.blackbox.ai", + "https://api.blackbox.ai" + ] + + for base_url in urls_to_try: + try: + async with session.get(base_url) as response: + if response.status != 200: + continue + + page_content = await response.text() + js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) + + if not js_files: + js_files = re.findall(r'static/js/[a-zA-Z0-9-]+\.js', page_content) + + uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']' + + def is_valid_context(text_around): + return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz') + + for js_file in js_files: + js_url = f"{base_url}/_next/{js_file}" + try: + async with session.get(js_url) as js_response: + if js_response.status == 200: + js_content = await js_response.text() + for match in re.finditer(uuid_format, js_content): + start = max(0, match.start() - 10) + end = min(len(js_content), match.end() + 10) + context = js_content[start:end] + + if is_valid_context(context): + validated_value = match.group(1) + cls._save_cached_value(validated_value) + return validated_value + except Exception: + continue + + except Exception as e: + debug.log(f"Error trying {base_url}: {e}") + continue + + # If we failed to get a new validated_value, we return the cached one if cached_value: return cached_value - - async with aiohttp.ClientSession() as session: - try: - async with session.get(cls.url) as response: - if response.status != 200: - print("Failed to load the page.") - return cached_value - - page_content = await response.text() - js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) - - uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']' - - def is_valid_context(text_around): - return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz') - - for js_file in js_files: - js_url = f"{cls.url}/_next/{js_file}" - async with session.get(js_url) as js_response: - if js_response.status == 200: - js_content = await js_response.text() - for match in re.finditer(uuid_format, js_content): - start = max(0, match.start() - 10) - end = min(len(js_content), match.end() + 10) - context = js_content[start:end] - - if is_valid_context(context): - validated_value = match.group(1) - cls._save_cached_value(validated_value) - return validated_value - except Exception as e: - print(f"Error fetching validated value: {e}") - - return cached_value + + raise RuntimeError("Failed to get validated value from both URLs") @staticmethod def generate_id(length=7): characters = string.ascii_letters + string.digits return ''.join(random.choice(characters) for _ in range(length)) - @classmethod - def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages: - prefix = cls.model_prefixes.get(model, "") - if not prefix: - return messages - - new_messages = [] - for message in messages: - new_message = message.copy() - if message['role'] == 'user': - new_message['content'] = (prefix + " " + message['content']).strip() - new_messages.append(new_message) - - return new_messages - @classmethod async def create_async_generator( cls, @@ -217,93 +209,135 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): proxy: str = None, web_search: bool = False, images: ImagesType = None, - top_p: float = None, - temperature: float = None, + top_p: float = 0.9, + temperature: float = 0.5, max_tokens: int = None, + max_retries: int = 3, + delay: int = 1, **kwargs ) -> AsyncResult: - message_id = cls.generate_id() - messages = cls.add_prefix_to_messages(messages, model) - validated_value = await cls.fetch_validated() - formatted_message = format_prompt(messages) - model = cls.get_model(model) + + use_internal_search = web_search and model in cls.web_search_models - messages = [{"id": message_id, "content": formatted_message, "role": "user"}] - - if images is not None: - messages[-1]['data'] = { - "imagesData": [ - { - "filePath": f"MultipleFiles/{image_name}", - "contents": to_data_uri(image) - } - for image, image_name in images - ], - "fileText": "", - "title": "" + if web_search and not use_internal_search: + + def run_search(): + return get_search_message(messages[-1]["content"]) + + import concurrent.futures + with concurrent.futures.ThreadPoolExecutor() as executor: + messages[-1]["content"] = await asyncio.get_event_loop().run_in_executor( + executor, run_search + ) + web_search = False + + async def process_request(): + validated_value = await cls.fetch_validated() + + if not validated_value: + raise RuntimeError("Failed to get validated value") + + formatted_message = format_prompt(messages) + current_model = cls.get_model(model) + + first_message = next((msg for msg in messages if msg['role'] == 'user'), None) + chat_id = cls.generate_id() + current_messages = [{"id": chat_id, "content": formatted_message, "role": "user"}] + + if images is not None: + current_messages[-1]['data'] = { + "imagesData": [ + { + "filePath": f"/{image_name}", + "contents": to_data_uri(image) + } + for image, image_name in images + ], + "fileText": "", + "title": "" + } + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'origin': 'https://www.blackbox.ai', + 'referer': 'https://www.blackbox.ai/', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + data = { + "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, + "clickedAnswer2": False, + "clickedAnswer3": False, + "clickedForceWebSearch": False, + "codeModelMode": True, + "deepSearchMode": False, + "domains": None, + "githubToken": None, + "id": chat_id, + "imageGenerationMode": False, + "isChromeExt": False, + "isMicMode": False, + "maxTokens": max_tokens, + "messages": current_messages, + "mobileClient": False, + "playgroundTemperature": temperature, + "playgroundTopP": top_p, + "previewToken": None, + "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, + "userId": None, + "userSelectedModel": model if model in cls.userSelectedModel else None, + "userSystemPrompt": None, + "validated": validated_value, + "visitFromDelta": False, + "webSearchModePrompt": False, + "webSearchMode": use_internal_search } - headers = { - 'accept': '*/*', - 'accept-language': 'en-US,en;q=0.9', - 'content-type': 'application/json', - 'origin': cls.url, - 'referer': f'{cls.url}/', - 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' - } - - data = { - "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, - "clickedAnswer2": False, - "clickedAnswer3": False, - "clickedForceWebSearch": False, - "codeModelMode": True, - "deepSearchMode": False, - "githubToken": None, - "id": message_id, - "imageGenerationMode": False, - "isChromeExt": False, - "isMicMode": False, - "maxTokens": max_tokens, - "messages": messages, - "mobileClient": False, - "playgroundTemperature": temperature, - "playgroundTopP": top_p, - "previewToken": None, - "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, - "userId": None, - "userSelectedModel": model if model in cls.userSelectedModel else None, - "userSystemPrompt": None, - "validated": validated_value, - "visitFromDelta": False, - "webSearchModePrompt": False, - "webSearchMode": web_search - } - - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - response_text = await response.text() - - if model in cls.image_models: - image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text) - if image_matches: - image_url = image_matches[0] - yield ImageResponse(image_url, prompt) - return - - response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL) - response_text = re.sub(r'and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai', '', response_text, flags=re.DOTALL) - - json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL) - if json_match: - search_results = json.loads(json_match.group(1)) - answer = response_text.split('$~~~$')[-1].strip() - - formatted_response = f"{answer}\n\n**Source:**" - for i, result in enumerate(search_results, 1): - formatted_response += f"\n{i}. {result['title']}: {result['link']}" - - yield formatted_response - else: - yield response_text.strip() + for attempt in range(max_retries): + try: + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_text = await response.text() + + if current_model in cls.image_models: + image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text) + if image_matches: + yield ImageResponse(image_matches[0], prompt) + return + + response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL) + response_text = re.sub(r'and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai', '', response_text, flags=re.DOTALL) + + response_text = response_text.strip() + + if not response_text: + raise ValueError("Empty response received") + + json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL) + if json_match: + search_results = json.loads(json_match.group(1)) + answer = response_text.split('$~~~$')[-1].strip() + + formatted_response = f"{answer}\n\n**Source:**" + for i, result in enumerate(search_results, 1): + formatted_response += f"\n{i}. {result['title']}: {result['link']}" + + yield formatted_response + else: + yield response_text + return + + except Exception as e: + debug.log(f"Error: {str(e)}") + if attempt == max_retries - 1: + raise RuntimeError("Failed after all retries") + else: + wait_time = delay * (2 ** attempt) + random.uniform(0, 1) + debug.log(f"Attempt {attempt + 1} failed. Retrying in {wait_time:.2f} seconds...") + await asyncio.sleep(wait_time) + + async for chunk in process_request(): + yield chunk diff --git a/g4f/Provider/BlackboxCreateAgent.py b/g4f/Provider/BlackboxCreateAgent.py new file mode 100644 index 00000000..d329ea0e --- /dev/null +++ b/g4f/Provider/BlackboxCreateAgent.py @@ -0,0 +1,257 @@ +from __future__ import annotations + +import random +import asyncio +import re +import json +from pathlib import Path +from aiohttp import ClientSession +from typing import AsyncIterator, Optional + +from ..typing import AsyncResult, Messages +from ..image import ImageResponse +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..cookies import get_cookies_dir + +from .. import debug + + +class BlackboxCreateAgent(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://www.blackbox.ai" + api_endpoints = { + "llama-3.1-70b": "https://www.blackbox.ai/api/improve-prompt", + "flux": "https://www.blackbox.ai/api/image-generator" + } + + working = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3.1-70b' + chat_models = [default_model] + image_models = ['flux'] + models = [*chat_models, *image_models] + + @classmethod + def _get_cache_file(cls) -> Path: + """Returns the path to the cache file.""" + dir = Path(get_cookies_dir()) + dir.mkdir(exist_ok=True) + return dir / 'blackbox2.json' + + @classmethod + def _load_cached_value(cls) -> str | None: + cache_file = cls._get_cache_file() + if cache_file.exists(): + try: + with open(cache_file, 'r') as f: + data = json.load(f) + return data.get('validated_value') + except Exception as e: + debug.log(f"Error reading cache file: {e}") + return None + + @classmethod + def _save_cached_value(cls, value: str): + cache_file = cls._get_cache_file() + try: + with open(cache_file, 'w') as f: + json.dump({'validated_value': value}, f) + except Exception as e: + debug.log(f"Error writing to cache file: {e}") + + @classmethod + async def fetch_validated(cls) -> Optional[str]: + """ + Asynchronously retrieves the validated value from cache or website. + + :return: The validated value or None if retrieval fails. + """ + cached_value = cls._load_cached_value() + if cached_value: + return cached_value + + js_file_pattern = r'static/chunks/\d{4}-[a-fA-F0-9]+\.js' + v_pattern = r'j\s*=\s*[\'"]([0-9a-fA-F-]{36})[\'"]' + + def is_valid_context(text: str) -> bool: + """Checks if the context is valid.""" + return any(char + '=' in text for char in 'abcdefghijklmnopqrstuvwxyz') + + async with ClientSession() as session: + try: + async with session.get(cls.url) as response: + if response.status != 200: + debug.log("Failed to download the page.") + return cached_value + + page_content = await response.text() + js_files = re.findall(js_file_pattern, page_content) + + for js_file in js_files: + js_url = f"{cls.url}/_next/{js_file}" + async with session.get(js_url) as js_response: + if js_response.status == 200: + js_content = await js_response.text() + for match in re.finditer(v_pattern, js_content): + start = max(0, match.start() - 50) + end = min(len(js_content), match.end() + 50) + context = js_content[start:end] + + if is_valid_context(context): + validated_value = match.group(1) + cls._save_cached_value(validated_value) + return validated_value + except Exception as e: + debug.log(f"Error while retrieving validated_value: {e}") + + return cached_value + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + prompt: str = None, + **kwargs + ) -> AsyncIterator[str | ImageResponse]: + """ + Creates an async generator for text or image generation. + """ + if model in cls.chat_models: + async for text in cls._generate_text(model, messages, proxy=proxy, **kwargs): + yield text + elif model in cls.image_models: + prompt = messages[-1]['content'] + async for image in cls._generate_image(model, prompt, proxy=proxy, **kwargs): + yield image + else: + raise ValueError(f"Model {model} not supported") + + @classmethod + async def _generate_text( + cls, + model: str, + messages: Messages, + proxy: str = None, + max_retries: int = 3, + delay: int = 1, + max_tokens: int = None, + **kwargs + ) -> AsyncIterator[str]: + headers = cls._get_headers() + + for outer_attempt in range(2): # Add outer loop for retrying with a new key + validated_value = await cls.fetch_validated() + if not validated_value: + raise RuntimeError("Failed to get validated value") + + async with ClientSession(headers=headers) as session: + api_endpoint = cls.api_endpoints[model] + + data = { + "messages": messages, + "max_tokens": max_tokens, + "validated": validated_value + } + + for attempt in range(max_retries): + try: + async with session.post(api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + + if response_data.get('status') == 200 and 'prompt' in response_data: + yield response_data['prompt'] + return # Successful execution + else: + raise KeyError("Invalid response format or missing 'prompt' key") + except Exception as e: + if attempt == max_retries - 1: + if outer_attempt == 0: # If this is the first attempt with this key + # Remove the cached key and try to get a new one + cls._save_cached_value("") + debug.log("Invalid key, trying to get a new one...") + break # Exit the inner loop to get a new key + else: + raise RuntimeError(f"Error after all attempts: {str(e)}") + else: + wait_time = delay * (2 ** attempt) + random.uniform(0, 1) + debug.log(f"Attempt {attempt + 1} failed. Retrying in {wait_time:.2f} seconds...") + await asyncio.sleep(wait_time) + + @classmethod + async def _generate_image( + cls, + model: str, + prompt: str, + proxy: str = None, + **kwargs + ) -> AsyncIterator[ImageResponse]: + headers = { + **cls._get_headers() + } + + api_endpoint = cls.api_endpoints[model] + + async with ClientSession(headers=headers) as session: + data = { + "query": prompt + } + + async with session.post(api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + + if 'markdown' in response_data: + # Extract URL from markdown format: ![](url) + image_url = re.search(r'\!\[\]\((.*?)\)', response_data['markdown']) + if image_url: + yield ImageResponse(images=[image_url.group(1)], alt=prompt) + else: + raise ValueError("Could not extract image URL from markdown") + else: + raise KeyError("'markdown' key not found in response") + + @staticmethod + def _get_headers() -> dict: + return { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'authorization': f'Bearer 56c8eeff9971269d7a7e625ff88e8a83a34a556003a5c87c289ebe9a3d8a3d2c', + 'content-type': 'application/json', + 'origin': 'https://www.blackbox.ai', + 'referer': 'https://www.blackbox.ai', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + @classmethod + async def create_async( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + """ + Creates an async response for the provider. + + Args: + model: The model to use + messages: The messages to process + proxy: Optional proxy to use + **kwargs: Additional arguments + + Returns: + AsyncResult: The response from the provider + """ + if model in cls.chat_models: + async for text in cls._generate_text(model, messages, proxy=proxy, **kwargs): + return text + elif model in cls.image_models: + prompt = messages[-1]['content'] + async for image in cls._generate_image(model, prompt, proxy=proxy, **kwargs): + return image + else: + raise ValueError(f"Model {model} not supported") diff --git a/g4f/Provider/ClaudeSon.py b/g4f/Provider/ClaudeSon.py new file mode 100644 index 00000000..5adc4f38 --- /dev/null +++ b/g4f/Provider/ClaudeSon.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class ClaudeSon(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://claudeson.net" + api_endpoint = "https://claudeson.net/api/coze/chat" + working = True + + supports_system_message = True + supports_message_history = True + + default_model = 'claude-3.5-sonnet' + models = [default_model] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "content-type": "application/json", + "origin": "https://claudeson.net", + "referer": "https://claudeson.net/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" + } + async with ClientSession(headers=headers) as session: + data = { + "textStr": format_prompt(messages), + "type": "company" + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py index 6453d167..48b87b9b 100644 --- a/g4f/Provider/DeepInfraChat.py +++ b/g4f/Provider/DeepInfraChat.py @@ -1,17 +1,25 @@ from __future__ import annotations +import json + +from aiohttp import ClientSession from ..typing import AsyncResult, Messages -from .needs_auth import OpenaiAPI +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -class DeepInfraChat(OpenaiAPI): - label = "DeepInfra Chat" +class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): url = "https://deepinfra.com/chat" + api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions" working = True - api_base = "https://api.deepinfra.com/v1/openai" - + needs_auth = False + supports_stream = True + supports_system_message = True + supports_message_history = True + default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' models = [ + 'meta-llama/Llama-3.3-70B-Instruct', 'meta-llama/Meta-Llama-3.1-8B-Instruct', + 'meta-llama/Llama-3.3-70B-Instruct-Turbo', default_model, 'Qwen/QwQ-32B-Preview', 'microsoft/WizardLM-2-8x22B', @@ -20,7 +28,9 @@ class DeepInfraChat(OpenaiAPI): 'nvidia/Llama-3.1-Nemotron-70B-Instruct', ] model_aliases = { + "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "qwq-32b": "Qwen/QwQ-32B-Preview", "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", @@ -30,20 +40,48 @@ class DeepInfraChat(OpenaiAPI): } @classmethod - def create_async_generator( + async def create_async_generator( cls, model: str, messages: Messages, proxy: str = None, **kwargs ) -> AsyncResult: + model = cls.get_model(model) + headers = { 'Accept-Language': 'en-US,en;q=0.9', 'Content-Type': 'application/json', 'Origin': 'https://deepinfra.com', 'Referer': 'https://deepinfra.com/', - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', 'X-Deepinfra-Source': 'web-page', 'accept': 'text/event-stream', } - return super().create_async_generator(model, messages, proxy, headers=headers, **kwargs) \ No newline at end of file + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "messages": messages, + "stream": True + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + chunk_text = chunk.decode() + try: + # Handle streaming response + if chunk_text.startswith("data: "): + if chunk_text.strip() == "data: [DONE]": + continue + chunk_data = json.loads(chunk_text[6:]) + if content := chunk_data["choices"][0]["delta"].get("content"): + yield content + # Handle non-streaming response + else: + chunk_data = json.loads(chunk_text) + if content := chunk_data["choices"][0]["message"].get("content"): + yield content + except (json.JSONDecodeError, KeyError): + continue diff --git a/g4f/Provider/Flux.py b/g4f/Provider/Flux.py deleted file mode 100644 index d3949153..00000000 --- a/g4f/Provider/Flux.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import annotations - -import json -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from ..image import ImageResponse, ImagePreview -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin - -class Flux(AsyncGeneratorProvider, ProviderModelMixin): - label = "Flux (HuggingSpace)" - url = "https://black-forest-labs-flux-1-dev.hf.space" - api_endpoint = "/gradio_api/call/infer" - working = True - default_model = 'flux-dev' - models = [default_model] - image_models = [default_model] - - @classmethod - async def create_async_generator( - cls, model: str, messages: Messages, prompt: str = None, api_key: str = None, proxy: str = None, **kwargs - ) -> AsyncResult: - headers = { - "Content-Type": "application/json", - "Accept": "application/json", - } - if api_key is not None: - headers["Authorization"] = f"Bearer {api_key}" - async with ClientSession(headers=headers) as session: - prompt = messages[-1]["content"] if prompt is None else prompt - data = { - "data": [prompt, 0, True, 1024, 1024, 3.5, 28] - } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: - response.raise_for_status() - event_id = (await response.json()).get("event_id") - async with session.get(f"{cls.url}{cls.api_endpoint}/{event_id}") as event_response: - event_response.raise_for_status() - event = None - async for chunk in event_response.content: - if chunk.startswith(b"event: "): - event = chunk[7:].decode(errors="replace").strip() - if chunk.startswith(b"data: "): - if event == "error": - raise RuntimeError(f"GPU token limit exceeded: {chunk.decode(errors='replace')}") - if event in ("complete", "generating"): - try: - data = json.loads(chunk[6:]) - if data is None: - continue - url = data[0]["url"] - except (json.JSONDecodeError, KeyError, TypeError) as e: - raise RuntimeError(f"Failed to parse image URL: {chunk.decode(errors='replace')}", e) - if event == "generating": - yield ImagePreview(url, prompt) - else: - yield ImageResponse(url, prompt) - break diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index dece4c39..f1ad0031 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -1,109 +1,228 @@ from __future__ import annotations -from urllib.parse import quote +import json import random import requests +from typing import Optional from aiohttp import ClientSession +from ..requests.raise_for_status import raise_for_status from ..typing import AsyncResult, Messages from ..image import ImageResponse -from ..requests.raise_for_status import raise_for_status -from ..requests.aiohttp import get_connector from .needs_auth.OpenaiAPI import OpenaiAPI -from .helper import format_prompt_max_length class PollinationsAI(OpenaiAPI): label = "Pollinations AI" url = "https://pollinations.ai" + working = True - needs_auth = False + needs_auth = True supports_stream = True + supports_system_message = True + supports_message_history = True + + # API endpoints base api_base = "https://text.pollinations.ai/openai" - + + # API endpoints + text_api_endpoint = "https://text.pollinations.ai" + image_api_endpoint = "https://image.pollinations.ai" + + # Models configuration default_model = "openai" + default_image_model = "flux" + + image_models = [] + models = [] + additional_models_image = ["midjourney", "dall-e-3"] additional_models_text = ["sur", "sur-mistral", "claude"] model_aliases = { "gpt-4o": "openai", "mistral-nemo": "mistral", - "llama-3.1-70b": "llama", # + "llama-3.1-70b": "llama", "gpt-4": "searchgpt", "gpt-4": "claude", - "qwen-2.5-coder-32b": "qwen-coder", - "claude-3.5-sonnet": "sur", + "qwen-2.5-coder-32b": "qwen-coder", + "claude-3.5-sonnet": "sur", } - - headers = { - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" - } - + @classmethod def get_models(cls, **kwargs): + # Initialize model lists if not exists if not hasattr(cls, 'image_models'): cls.image_models = [] + if not hasattr(cls, 'text_models'): + cls.text_models = [] + + # Fetch image models if not cached if not cls.image_models: url = "https://image.pollinations.ai/models" - response = requests.get(url, headers=cls.headers) + response = requests.get(url) raise_for_status(response) cls.image_models = response.json() cls.image_models.extend(cls.additional_models_image) - if not hasattr(cls, 'models'): - cls.models = [] - if not cls.models: + + # Fetch text models if not cached + if not cls.text_models: url = "https://text.pollinations.ai/models" - response = requests.get(url, headers=cls.headers) + response = requests.get(url) raise_for_status(response) - cls.models = [model.get("name") for model in response.json()] - cls.models.extend(cls.image_models) - cls.models.extend(cls.additional_models_text) - return cls.models + cls.text_models = [model.get("name") for model in response.json()] + cls.text_models.extend(cls.additional_models_text) + + # Return combined models + return cls.text_models + cls.image_models @classmethod async def create_async_generator( cls, model: str, messages: Messages, - prompt: str = None, - api_key: str = None, proxy: str = None, - seed: str = None, + # Image specific parameters + prompt: str = None, width: int = 1024, height: int = 1024, + seed: Optional[int] = None, + nologo: bool = True, + private: bool = False, + enhance: bool = False, + safe: bool = False, + # Text specific parameters + api_key: str = None, + temperature: float = 0.5, + presence_penalty: float = 0, + top_p: float = 1, + frequency_penalty: float = 0, + stream: bool = True, **kwargs ) -> AsyncResult: model = cls.get_model(model) - if cls.get_models() and model in cls.image_models: - async for response in cls._generate_image(model, messages, prompt, proxy, seed, width, height): - yield response - elif model in cls.models: - async for response in cls._generate_text(model, messages, api_key, proxy, **kwargs): - yield response + + # Check if models + # Image generation + if model in cls.image_models: + async for result in cls._generate_image( + model=model, + messages=messages, + prompt=prompt, + proxy=proxy, + width=width, + height=height, + seed=seed, + nologo=nologo, + private=private, + enhance=enhance, + safe=safe + ): + yield result else: - raise ValueError(f"Unknown model: {model}") + # Text generation + async for result in cls._generate_text( + model=model, + messages=messages, + proxy=proxy, + api_key=api_key, + temperature=temperature, + presence_penalty=presence_penalty, + top_p=top_p, + frequency_penalty=frequency_penalty, + stream=stream + ): + yield result @classmethod - async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, proxy: str = None, seed: str = None, width: int = 1024, height: int = 1024): - if prompt is None: - prompt = messages[-1]["content"] + async def _generate_image( + cls, + model: str, + messages: Messages, + prompt: str, + proxy: str, + width: int, + height: int, + seed: Optional[int], + nologo: bool, + private: bool, + enhance: bool, + safe: bool + ) -> AsyncResult: if seed is None: - seed = random.randint(0, 100000) - image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}" - async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session: - async with session.get(image) as response: - await raise_for_status(response) - yield ImageResponse(image, prompt) + seed = random.randint(0, 10000) + + + headers = { + 'Accept': '*/*', + 'Accept-Language': 'en-US,en;q=0.9', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36', + } + + params = { + "seed": seed, + "width": width, + "height": height, + "model": model, + "nologo": nologo, + "private": private, + "enhance": enhance, + "safe": safe + } + params = {k: v for k, v in params.items() if v is not None} + + async with ClientSession(headers=headers) as session: + prompt = quote(messages[-1]["content"]) + param_string = "&".join(f"{k}={v}" for k, v in params.items()) + url = f"{cls.image_api_endpoint}/prompt/{prompt}?{param_string}" + + async with session.head(url, proxy=proxy) as response: + if response.status == 200: + image_response = ImageResponse(images=url, alt=messages[-1]["content"]) + yield image_response @classmethod - async def _generate_text(cls, model: str, messages: Messages, api_key: str = None, proxy: str = None, **kwargs): + async def _generate_text( + cls, + model: str, + messages: Messages, + proxy: str, + api_key: str, + temperature: float, + presence_penalty: float, + top_p: float, + frequency_penalty: float, + stream: bool + ) -> AsyncResult: if api_key is None: - async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session: - prompt = format_prompt_max_length(messages, 5000) - async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response: - await raise_for_status(response) - async for line in response.content.iter_any(): - yield line.decode(errors="ignore") - else: - async for chunk in super().create_async_generator( - model, messages, proxy=proxy, **kwargs - ): - yield chunk \ No newline at end of file + api_key = "dummy" # Default value if api_key is not provided + + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "authorization": f"Bearer {api_key}", + "content-type": "application/json", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": messages, + "model": model, + "temperature": temperature, + "presence_penalty": presence_penalty, + "top_p": top_p, + "frequency_penalty": frequency_penalty, + "jsonMode": False, + "stream": stream + } + + async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + decoded_chunk = chunk.decode() + try: + json_response = json.loads(decoded_chunk) + content = json_response['choices'][0]['message']['content'] + yield content + except json.JSONDecodeError: + yield decoded_chunk diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py index 847da6d7..76516e2f 100644 --- a/g4f/Provider/Prodia.py +++ b/g4f/Provider/Prodia.py @@ -2,6 +2,7 @@ from __future__ import annotations from aiohttp import ClientSession import asyncio +import random from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin @@ -13,10 +14,11 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): working = True default_model = 'absolutereality_v181.safetensors [3d9d4d2b]' + default_image_model = default_model image_models = [ '3Guofeng3_v34.safetensors [50f420de]', 'absolutereality_V16.safetensors [37db0fc3]', - default_model, + default_image_model, 'amIReal_V41.safetensors [0a8a2e61]', 'analog-diffusion-1.0.ckpt [9ca13f02]', 'aniverse_v30.safetensors [579e6f85]', @@ -78,7 +80,7 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): 'shoninsBeautiful_v10.safetensors [25d8c546]', 'theallys-mix-ii-churned.safetensors [5d9225a4]', 'timeless-1.0.ckpt [7c4971d4]', - 'toonyou_beta6.safetensors [980f6b15]', + 'toonyou_beta6.safetensors [980f6b15]' ] models = [*image_models] @@ -100,13 +102,16 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): negative_prompt: str = "", steps: str = 20, # 1-25 cfg: str = 7, # 0-20 - seed: str = "-1", + seed: Optional[int] = None, sampler: str = "DPM++ 2M Karras", # "Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM" aspect_ratio: str = "square", # "square", "portrait", "landscape" **kwargs ) -> AsyncResult: model = cls.get_model(model) + if seed is None: + seed = random.randint(0, 10000) + headers = { "accept": "*/*", "accept-language": "en-US,en;q=0.9", diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 04ff8396..82e8da06 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -10,33 +10,34 @@ from .selenium import * from .needs_auth import * from .not_working import * from .local import * +from .hf_space import * -from .Airforce import Airforce -from .AmigoChat import AmigoChat -from .Blackbox import Blackbox -from .Blackbox2 import Blackbox2 -from .ChatGpt import ChatGpt -from .ChatGptEs import ChatGptEs -from .Cloudflare import Cloudflare -from .Copilot import Copilot -from .DarkAI import DarkAI -from .DDG import DDG -from .DeepInfraChat import DeepInfraChat -from .Flux import Flux -from .Free2GPT import Free2GPT -from .FreeGpt import FreeGpt -from .GizAI import GizAI -from .Liaobots import Liaobots -from .Mhystical import Mhystical -from .PerplexityLabs import PerplexityLabs -from .Pi import Pi -from .Pizzagpt import Pizzagpt -from .PollinationsAI import PollinationsAI -from .Prodia import Prodia -from .ReplicateHome import ReplicateHome -from .RubiksAI import RubiksAI -from .TeachAnything import TeachAnything -from .You import You +from .Airforce import Airforce +from .AmigoChat import AmigoChat +from .Blackbox import Blackbox +from .BlackboxCreateAgent import BlackboxCreateAgent +from .ChatGpt import ChatGpt +from .ChatGptEs import ChatGptEs +from .ClaudeSon import ClaudeSon +from .Cloudflare import Cloudflare +from .Copilot import Copilot +from .DarkAI import DarkAI +from .DDG import DDG +from .DeepInfraChat import DeepInfraChat +from .Free2GPT import Free2GPT +from .FreeGpt import FreeGpt +from .GizAI import GizAI +from .Liaobots import Liaobots +from .Mhystical import Mhystical +from .PerplexityLabs import PerplexityLabs +from .Pi import Pi +from .Pizzagpt import Pizzagpt +from .PollinationsAI import PollinationsAI +from .Prodia import Prodia +from .ReplicateHome import ReplicateHome +from .RubiksAI import RubiksAI +from .TeachAnything import TeachAnything +from .You import You import sys diff --git a/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py b/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py new file mode 100644 index 00000000..7987cc1b --- /dev/null +++ b/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ...image import ImageResponse, ImagePreview +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + +class BlackForestLabsFlux1Dev(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://black-forest-labs-flux-1-dev.hf.space" + api_endpoint = "/gradio_api/call/infer" + + working = True + + default_model = 'flux-dev' + models = [default_model] + image_models = [default_model] + + @classmethod + async def create_async_generator( + cls, model: str, messages: Messages, prompt: str = None, api_key: str = None, proxy: str = None, **kwargs + ) -> AsyncResult: + headers = { + "Content-Type": "application/json", + "Accept": "application/json", + } + if api_key is not None: + headers["Authorization"] = f"Bearer {api_key}" + async with ClientSession(headers=headers) as session: + prompt = messages[-1]["content"] if prompt is None else prompt + data = { + "data": [prompt, 0, True, 1024, 1024, 3.5, 28] + } + async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: + response.raise_for_status() + event_id = (await response.json()).get("event_id") + async with session.get(f"{cls.url}{cls.api_endpoint}/{event_id}") as event_response: + event_response.raise_for_status() + event = None + async for chunk in event_response.content: + if chunk.startswith(b"event: "): + event = chunk[7:].decode(errors="replace").strip() + if chunk.startswith(b"data: "): + if event == "error": + raise RuntimeError(f"GPU token limit exceeded: {chunk.decode(errors='replace')}") + if event in ("complete", "generating"): + try: + data = json.loads(chunk[6:]) + if data is None: + continue + url = data[0]["url"] + except (json.JSONDecodeError, KeyError, TypeError) as e: + raise RuntimeError(f"Failed to parse image URL: {chunk.decode(errors='replace')}", e) + if event == "generating": + yield ImagePreview(url, prompt) + else: + yield ImageResponse(url, prompt) + break diff --git a/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py b/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py new file mode 100644 index 00000000..7b29b7af --- /dev/null +++ b/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json +import random +from typing import Optional + +from ...typing import AsyncResult, Messages +from ...image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + +class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://black-forest-labs-flux-1-schnell.hf.space" + api_endpoint = "https://black-forest-labs-flux-1-schnell.hf.space/call/infer" + + working = True + + default_model = "flux-schnell" + default_image_model = default_model + image_models = [default_image_model] + models = [*image_models] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + width: int = 768, + height: int = 768, + num_inference_steps: int = 2, + seed: Optional[int] = None, + randomize_seed: bool = False, + **kwargs + ) -> AsyncResult: + if seed is None: + seed = random.randint(0, 10000) + + width = max(32, width - (width % 8)) + height = max(32, height - (height % 8)) + + prompt = messages[-1]["content"] + + payload = { + "data": [ + prompt, + seed, + randomize_seed, + width, + height, + num_inference_steps + ] + } + + async with ClientSession() as session: + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + event_id = response_data['event_id'] + + while True: + async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response: + status_response.raise_for_status() + events = (await status_response.text()).split('\n\n') + + for event in events: + if event.startswith('event:'): + event_parts = event.split('\ndata: ') + if len(event_parts) < 2: + continue + + event_type = event_parts[0].split(': ')[1] + data = event_parts[1] + + if event_type == 'error': + raise Exception(f"Error generating image: {data}") + elif event_type == 'complete': + json_data = json.loads(data) + image_url = json_data[0]['url'] + yield ImageResponse(images=[image_url], alt=prompt) + return diff --git a/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py b/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py new file mode 100644 index 00000000..bd55b20b --- /dev/null +++ b/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json +import random +from typing import Optional + +from ...typing import AsyncResult, Messages +from ...image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + +class VoodoohopFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://voodoohop-flux-1-schnell.hf.space" + api_endpoint = "https://voodoohop-flux-1-schnell.hf.space/call/infer" + + working = True + + default_model = "flux-schnell" + default_image_model = default_model + image_models = [default_image_model] + models = [*image_models] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + width: int = 768, + height: int = 768, + num_inference_steps: int = 2, + seed: Optional[int] = None, + randomize_seed: bool = False, + **kwargs + ) -> AsyncResult: + if seed is None: + seed = random.randint(0, 10000) + + width = max(32, width - (width % 8)) + height = max(32, height - (height % 8)) + + prompt = messages[-1]["content"] + + payload = { + "data": [ + prompt, + seed, + randomize_seed, + width, + height, + num_inference_steps + ] + } + + async with ClientSession() as session: + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + response_data = await response.json() + event_id = response_data['event_id'] + + while True: + async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response: + status_response.raise_for_status() + events = (await status_response.text()).split('\n\n') + + for event in events: + if event.startswith('event:'): + event_parts = event.split('\ndata: ') + if len(event_parts) < 2: + continue + + event_type = event_parts[0].split(': ')[1] + data = event_parts[1] + + if event_type == 'error': + raise Exception(f"Error generating image: {data}") + elif event_type == 'complete': + json_data = json.loads(data) + image_url = json_data[0]['url'] + yield ImageResponse(images=[image_url], alt=prompt) + return diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py new file mode 100644 index 00000000..94524e35 --- /dev/null +++ b/g4f/Provider/hf_space/__init__.py @@ -0,0 +1,3 @@ +from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev +from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell +from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell diff --git a/g4f/models.py b/g4f/models.py index 19354a85..0d2b5dbc 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -5,17 +5,17 @@ from dataclasses import dataclass from .Provider import IterListProvider, ProviderType from .Provider import ( Blackbox, - Blackbox2, + BlackboxCreateAgent, BingCreateImages, ChatGpt, ChatGptEs, + ClaudeSon, Cloudflare, Copilot, CopilotAccount, DarkAI, DDG, DeepInfraChat, - Flux, GigaChat, Gemini, GeminiPro, @@ -36,6 +36,11 @@ from .Provider import ( ReplicateHome, RubiksAI, TeachAnything, + + ## HuggingSpace ## + BlackForestLabsFlux1Dev, + BlackForestLabsFlux1Schnell, + VoodoohopFlux1Schnell, ) @dataclass(unsafe_hash=True) @@ -67,7 +72,7 @@ default = Model( best_provider = IterListProvider([ DDG, Pizzagpt, - Blackbox2, + BlackboxCreateAgent, Blackbox, Copilot, DeepInfraChat, @@ -175,7 +180,7 @@ llama_3_1_8b = Model( llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, Blackbox2, TeachAnything, PollinationsAI, DarkAI, Airforce, RubiksAI, PerplexityLabs]) + best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, BlackboxCreateAgent, TeachAnything, PollinationsAI, DarkAI, Airforce, RubiksAI, PerplexityLabs]) ) llama_3_1_405b = Model( @@ -313,7 +318,7 @@ claude_3_haiku = Model( claude_3_5_sonnet = Model( name = 'claude-3.5-sonnet', base_provider = 'Anthropic', - best_provider = IterListProvider([Blackbox, PollinationsAI, Liaobots]) + best_provider = IterListProvider([Blackbox, PollinationsAI, ClaudeSon, Liaobots]) ) ### Reka AI ### @@ -555,7 +560,7 @@ playground_v2_5 = ImageModel( flux = ImageModel( name = 'flux', base_provider = 'Flux AI', - best_provider = IterListProvider([Blackbox, Blackbox2, PollinationsAI, Airforce]) + best_provider = IterListProvider([Blackbox, BlackboxCreateAgent, PollinationsAI, Airforce]) ) flux_pro = ImageModel( @@ -567,7 +572,13 @@ flux_pro = ImageModel( flux_dev = ImageModel( name = 'flux-dev', base_provider = 'Flux AI', - best_provider = IterListProvider([Flux, HuggingChat, HuggingFace]) + best_provider = IterListProvider([BlackForestLabsFlux1Dev, HuggingChat, HuggingFace]) +) + +flux_schnell = ImageModel( + name = 'flux-schnell', + base_provider = 'Flux AI', + best_provider = IterListProvider([BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell]) ) flux_realism = ImageModel( @@ -813,6 +824,7 @@ class ModelUtils: flux.name: flux, flux_pro.name: flux_pro, flux_dev.name: flux_dev, + flux_schnell.name: flux_schnell, flux_realism.name: flux_realism, flux_cablyai.name: flux_cablyai, flux_anime.name: flux_anime, -- cgit v1.2.3