From 315a2f25955e90ed3f653787762abb5bdee878ff Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sat, 14 Dec 2024 23:34:13 +0100 Subject: Add streaming and system messages support in Airforce --- etc/unittest/models.py | 7 +- g4f/Provider/Airforce.py | 27 +++--- g4f/Provider/Copilot.py | 51 ++++++---- g4f/Provider/openai/har_file.py | 2 + g4f/gui/client/static/css/style.css | 2 +- g4f/gui/server/api.py | 6 +- g4f/models.py | 184 ++++++++++++++++-------------------- 7 files changed, 140 insertions(+), 139 deletions(-) diff --git a/etc/unittest/models.py b/etc/unittest/models.py index 39fdbb29..cea6e36d 100644 --- a/etc/unittest/models.py +++ b/etc/unittest/models.py @@ -26,4 +26,9 @@ class TestProviderHasModel(unittest.IsolatedAsyncioTestCase): except (MissingRequirementsError, MissingAuthError): return if self.cache[provider.__name__]: - self.assertIn(model, self.cache[provider.__name__], provider.__name__) \ No newline at end of file + self.assertIn(model, self.cache[provider.__name__], provider.__name__) + + async def test_all_providers_working(self): + for model, providers in __models__.values(): + for provider in providers: + self.assertTrue(provider.working, f"{provider.__name__} in {model.name}") \ No newline at end of file diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index 7e825dac..42c654dd 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -5,8 +5,10 @@ import requests from aiohttp import ClientSession from typing import List from requests.packages.urllib3.exceptions import InsecureRequestWarning + from ..typing import AsyncResult, Messages from ..image import ImageResponse +from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .. import debug @@ -32,7 +34,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): api_endpoint_imagine2 = "https://api.airforce/imagine2" working = True - supports_stream = False + supports_stream = True supports_system_message = True supports_message_history = True @@ -87,7 +89,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): debug.log(f"Error fetching text models: {e}") return cls.models - + @classmethod async def check_api_key(cls, api_key: str) -> bool: """ @@ -95,12 +97,11 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): """ if not api_key or api_key == "null": return True # No restrictions if no key. - + headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", "Accept": "*/*", } - try: async with ClientSession(headers=headers) as session: async with session.get(f"https://api.airforce/check?key={api_key}") as response: @@ -195,11 +196,13 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): "Content-Type": "application/json", "Authorization": f"Bearer {api_key}", } - full_message = "\n".join([msg['content'] for msg in messages]) - message_chunks = split_message(full_message, max_length=1000) + final_messages = [] + for message in messages: + message_chunks = split_message(message["content"], max_length=1000) + final_messages.extend([{"role": message["role"], "content": chunk} for chunk in message_chunks]) data = { - "messages": [{"role": "user", "content": chunk} for chunk in message_chunks], + "messages": final_messages, "model": model, "max_tokens": max_tokens, "temperature": temperature, @@ -209,10 +212,9 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): async with ClientSession(headers=headers) as session: async with session.post(cls.api_endpoint_completions, json=data, proxy=proxy) as response: - response.raise_for_status() + await raise_for_status(response) if stream: - buffer = [] # Buffer to collect partial responses async for line in response.content: line = line.decode('utf-8').strip() if line.startswith('data: '): @@ -222,12 +224,11 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): if 'choices' in chunk and chunk['choices']: delta = chunk['choices'][0].get('delta', {}) if 'content' in delta: - buffer.append(delta['content']) + chunk = cls._filter_response(delta['content']) + if chunk: + yield chunk except json.JSONDecodeError: continue - # Combine the buffered response and filter it - filtered_response = cls._filter_response(''.join(buffer)) - yield filtered_response else: # Non-streaming response result = await response.json() diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index 6b0db2ae..bd161c1f 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -47,7 +47,7 @@ class Copilot(AbstractProvider, ProviderModelMixin): websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2" conversation_url = f"{url}/c/api/conversations" - + _access_token: str = None _cookies: CookieJar = None @@ -94,20 +94,20 @@ class Copilot(AbstractProvider, ProviderModelMixin): ) as session: if cls._access_token is not None: cls._cookies = session.cookies.jar - if cls._access_token is None: - try: - url = "https://copilot.microsoft.com/cl/eus-sc/collect" - headers = { - "Accept": "application/x-clarity-gzip", - "referrer": "https://copilot.microsoft.com/onboarding" - } - response = session.post(url, headers=headers, data=get_clarity()) - clarity_token = json.loads(response.text.split(" ", maxsplit=1)[-1])[0]["value"] - debug.log(f"Copilot: Clarity Token: ...{clarity_token[-12:]}") - except Exception as e: - debug.log(f"Copilot: {e}") - else: - clarity_token = None + # if cls._access_token is None: + # try: + # url = "https://copilot.microsoft.com/cl/eus-sc/collect" + # headers = { + # "Accept": "application/x-clarity-gzip", + # "referrer": "https://copilot.microsoft.com/onboarding" + # } + # response = session.post(url, headers=headers, data=get_clarity()) + # clarity_token = json.loads(response.text.split(" ", maxsplit=1)[-1])[0]["value"] + # debug.log(f"Copilot: Clarity Token: ...{clarity_token[-12:]}") + # except Exception as e: + # debug.log(f"Copilot: {e}") + # else: + # clarity_token = None response = session.get("https://copilot.microsoft.com/c/api/user") raise_for_status(response) user = response.json().get('firstName') @@ -121,6 +121,14 @@ class Copilot(AbstractProvider, ProviderModelMixin): if return_conversation: yield Conversation(conversation_id) prompt = format_prompt(messages) + if len(prompt) > 10000: + if len(messages) > 6: + prompt = format_prompt(messages[:3]+messages[-3:]) + elif len(messages) > 2: + prompt = format_prompt(messages[:2]+messages[-1:]) + if len(prompt) > 10000: + prompt = messages[-1]["content"] + debug.log(f"Copilot: Trim messages to: {len(prompt)}") debug.log(f"Copilot: Created conversation: {conversation_id}") else: conversation_id = conversation.conversation_id @@ -138,14 +146,15 @@ class Copilot(AbstractProvider, ProviderModelMixin): ) raise_for_status(response) uploaded_images.append({"type":"image", "url": response.json().get("url")}) + break wss = session.ws_connect(cls.websocket_url) - if clarity_token is not None: - wss.send(json.dumps({ - "event": "challengeResponse", - "token": clarity_token, - "method":"clarity" - }).encode(), CurlWsFlag.TEXT) + # if clarity_token is not None: + # wss.send(json.dumps({ + # "event": "challengeResponse", + # "token": clarity_token, + # "method":"clarity" + # }).encode(), CurlWsFlag.TEXT) wss.send(json.dumps({ "event": "send", "conversationId": conversation_id, diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py index ff416267..989a9efc 100644 --- a/g4f/Provider/openai/har_file.py +++ b/g4f/Provider/openai/har_file.py @@ -44,6 +44,8 @@ class arkReq: self.userAgent = userAgent def get_har_files(): + if not os.access(get_cookies_dir(), os.R_OK): + raise NoValidHarFileError("har_and_cookies dir is not readable") harPath = [] for root, _, files in os.walk(get_cookies_dir()): for file in files: diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index e3c6dc27..10f20607 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -1028,7 +1028,7 @@ ul { .buttons { align-items: flex-start; flex-wrap: wrap; - gap: 15px; + gap: 12px; } .mobile-sidebar { diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 9994f251..877d47f2 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -120,11 +120,11 @@ class Api: } def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str, download_images: bool = True) -> Iterator: - def log_handler(text: str): + def decorated_log(text: str): debug.logs.append(text) if debug.logging: - print(text) - debug.log_handler = log_handler + debug.log_handler(text) + debug.log = decorated_log proxy = os.environ.get("G4F_PROXY") provider = kwargs.get("provider") model, provider_handler = get_model_and_provider( diff --git a/g4f/models.py b/g4f/models.py index 6fc6bff3..cb7d9697 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -4,7 +4,6 @@ from dataclasses import dataclass from .Provider import IterListProvider, ProviderType from .Provider import ( - AIChatFree, Blackbox, Blackbox2, BingCreateImages, @@ -72,10 +71,10 @@ default = Model( ReplicateHome, Blackbox2, Blackbox, + Copilot, Free2GPT, DeepInfraChat, Airforce, - ChatGptEs, Cloudflare, Mhystical, PollinationsAI, @@ -256,7 +255,7 @@ phi_3_5_mini = Model( gemini_pro = Model( name = 'gemini-pro', base_provider = 'Google DeepMind', - best_provider = IterListProvider([Blackbox, AIChatFree, Gemini, GeminiPro, Liaobots]) + best_provider = IterListProvider([Blackbox, Gemini, GeminiPro, Liaobots]) ) gemini_flash = Model( @@ -408,7 +407,6 @@ grok_beta = Model( best_provider = Liaobots ) - ### Perplexity AI ### sonar_online = Model( name = 'sonar-online', @@ -429,7 +427,6 @@ nemotron_70b = Model( best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace]) ) - ### Teknium ### openhermes_2_5 = Model( name = 'openhermes-2.5', @@ -466,13 +463,6 @@ neural_7b = Model( best_provider = Airforce ) -### PollinationsAI ### -p1 = Model( - name = 'p1', - base_provider = 'PollinationsAI', - best_provider = PollinationsAI -) - ### Uncensored AI ### evil = Model( name = 'evil', @@ -529,7 +519,6 @@ playground_v2_5 = ImageModel( best_provider = ReplicateHome ) - ### Flux AI ### flux = ImageModel( name = 'flux', @@ -629,184 +618,181 @@ class ModelUtils: 'gpt-3': gpt_35_turbo, # gpt-3.5 - 'gpt-3.5-turbo': gpt_35_turbo, + gpt_35_turbo.name: gpt_35_turbo, # gpt-4 - 'gpt-4': gpt_4, - 'gpt-4-turbo': gpt_4_turbo, + gpt_4.name: gpt_4, + gpt_4_turbo.name: gpt_4_turbo, # gpt-4o - 'gpt-4o': gpt_4o, - 'gpt-4o-mini': gpt_4o_mini, + gpt_4o.name: gpt_4o, + gpt_4o_mini.name: gpt_4o_mini, # o1 - 'o1-preview': o1_preview, - 'o1-mini': o1_mini, + o1_preview.name: o1_preview, + o1_mini.name: o1_mini, ### Meta ### - "meta-ai": meta, + meta.name: meta, # llama-2 - 'llama-2-7b': llama_2_7b, + llama_2_7b.name: llama_2_7b, # llama-3 - 'llama-3-8b': llama_3_8b, + llama_3_8b.name: llama_3_8b, # llama-3.1 - 'llama-3.1-8b': llama_3_1_8b, - 'llama-3.1-70b': llama_3_1_70b, - 'llama-3.1-405b': llama_3_1_405b, + llama_3_1_8b.name: llama_3_1_8b, + llama_3_1_70b.name: llama_3_1_70b, + llama_3_1_405b.name: llama_3_1_405b, # llama-3.2 - 'llama-3.2-1b': llama_3_2_1b, - 'llama-3.2-11b': llama_3_2_11b, + llama_3_2_1b.name: llama_3_2_1b, + llama_3_2_11b.name: llama_3_2_11b, # llama-3.3 - 'llama-3.3-70b': llama_3_3_70b, + llama_3_3_70b.name: llama_3_3_70b, ### Mistral ### - 'mixtral-8x7b': mixtral_8x7b, - 'mistral-nemo': mistral_nemo, - 'mistral-large': mistral_large, + mixtral_8x7b.name: mixtral_8x7b, + mistral_nemo.name: mistral_nemo, + mistral_large.name: mistral_large, ### NousResearch ### - 'hermes-2-dpo': hermes_2_dpo, - 'hermes-2-pro': hermes_2_pro, - 'hermes-3': hermes_3, + hermes_2_dpo.name: hermes_2_dpo, + hermes_2_pro.name: hermes_2_pro, + hermes_3.name: hermes_3, ### Microsoft ### - 'phi-2': phi_2, - 'phi-3.5-mini': phi_3_5_mini, + phi_2.name: phi_2, + phi_3_5_mini.name: phi_3_5_mini, ### Google ### # gemini - 'gemini': gemini, - 'gemini-pro': gemini_pro, - 'gemini-flash': gemini_flash, + gemini.name: gemini, + gemini_pro.name: gemini_pro, + gemini_flash.name: gemini_flash, # gemma - 'gemma-2b': gemma_2b, + gemma_2b.name: gemma_2b, ### Anthropic ### # claude 3 - 'claude-3-opus': claude_3_opus, - 'claude-3-sonnet': claude_3_sonnet, - 'claude-3-haiku': claude_3_haiku, + claude_3_opus.name: claude_3_opus, + claude_3_sonnet.name: claude_3_sonnet, + claude_3_haiku.name: claude_3_haiku, # claude 3.5 - 'claude-3.5-sonnet': claude_3_5_sonnet, + claude_3_5_sonnet.name: claude_3_5_sonnet, ### Reka AI ### - 'reka-core': reka_core, + reka_core.name: reka_core, ### Blackbox AI ### - 'blackboxai': blackboxai, - 'blackboxai-pro': blackboxai_pro, + blackboxai.name: blackboxai, + blackboxai_pro.name: blackboxai_pro, ### CohereForAI ### - 'command-r+': command_r_plus, - 'command-r': command_r, + command_r_plus.name: command_r_plus, + command_r.name: command_r, ### GigaChat ### - 'gigachat': gigachat, + gigachat.name: gigachat, ### Qwen ### # qwen 1_5 - 'qwen-1.5-7b': qwen_1_5_7b, + qwen_1_5_7b.name: qwen_1_5_7b, # qwen 2 - 'qwen-2-72b': qwen_2_72b, + qwen_2_72b.name: qwen_2_72b, # qwen 2.5 - 'qwen-2.5-72b': qwen_2_5_72b, - 'qwen-2.5-coder-32b': qwen_2_5_coder_32b, - 'qwq-32b': qwq_32b, + qwen_2_5_72b.name: qwen_2_5_72b, + qwen_2_5_coder_32b.name: qwen_2_5_coder_32b, + qwq_32b.name: qwq_32b, ### Inflection ### - 'pi': pi, + pi.name: pi, ### WizardLM ### - 'wizardlm-2-8x22b': wizardlm_2_8x22b, + wizardlm_2_8x22b.name: wizardlm_2_8x22b, ### OpenChat ### - 'openchat-3.5': openchat_3_5, + openchat_3_5.name: openchat_3_5, ### x.ai ### - 'grok-beta': grok_beta, + grok_beta.name: grok_beta, ### Perplexity AI ### - 'sonar-online': sonar_online, - 'sonar-chat': sonar_chat, + sonar_online.name: sonar_online, + sonar_chat.name: sonar_chat, ### DeepSeek ### - 'deepseek-coder': deepseek_coder, + deepseek_coder.name: deepseek_coder, ### TheBloke ### - 'german-7b': german_7b, + german_7b.name: german_7b, ### Nvidia ### - 'nemotron-70b': nemotron_70b, + nemotron_70b.name: nemotron_70b, ### Teknium ### - 'openhermes-2.5': openhermes_2_5, + openhermes_2_5.name: openhermes_2_5, ### Liquid ### - 'lfm-40b': lfm_40b, + lfm_40b.name: lfm_40b, ### HuggingFaceH4 ### - 'zephyr-7b': zephyr_7b, + zephyr_7b.name: zephyr_7b, ### Inferless ### - 'neural-7b': neural_7b, - - ### PollinationsAI ### - 'p1': p1, + neural_7b.name: neural_7b, ### Uncensored AI ### - 'evil': evil, + evil.name: evil, ### Other ### - 'midijourney': midijourney, - 'turbo': turbo, - 'unity': unity, - 'rtist': rtist, + midijourney.name: midijourney, + turbo.name: turbo, + unity.name: unity, + rtist.name: rtist, ############# ### Image ### ############# ### Stability AI ### - 'sdxl': sdxl, - 'sd-3': sd_3, + sdxl.name: sdxl, + sd_3.name: sd_3, ### Playground ### - 'playground-v2.5': playground_v2_5, + playground_v2_5.name: playground_v2_5, ### Flux AI ### - 'flux': flux, - 'flux-pro': flux_pro, - 'flux-dev': flux_dev, - 'flux-realism': flux_realism, - 'flux-cablyai': flux_cablyai, - 'flux-anime': flux_anime, - 'flux-3d': flux_3d, - 'flux-disney': flux_disney, - 'flux-pixel': flux_pixel, - 'flux-4o': flux_4o, + flux.name: flux, + flux_pro.name: flux_pro, + flux_dev.name: flux_dev, + flux_realism.name: flux_realism, + flux_cablyai.name: flux_cablyai, + flux_anime.name: flux_anime, + flux_3d.name: flux_3d, + flux_disney.name: flux_disney, + flux_pixel.name: flux_pixel, + flux_4o.name: flux_4o, ### OpenAI ### - 'dall-e-3': dall_e_3, + dall_e_3.name: dall_e_3, ### Midjourney ### - 'midjourney': midjourney, + midjourney.name: midjourney, ### Other ### - 'any-dark': any_dark, + any_dark.name: any_dark, } -# Create a list of all working models -__models__ = {model.name: (model, providers) for model, providers in [ - (model, [provider for provider in providers if provider.working]) +# Create a list of all models and his providers +__models__ = { + model.name: (model, providers) for model, providers in [ (model, model.best_provider.providers if isinstance(model.best_provider, IterListProvider) @@ -814,7 +800,5 @@ __models__ = {model.name: (model, providers) for model, providers in [ if model.best_provider is not None else []) for model in ModelUtils.convert.values()] - ] if providers} -# Update the ModelUtils.convert with the working models -ModelUtils.convert = {model.name: model for model, _ in __models__.values()} -_all_models = list(ModelUtils.convert.keys()) + } +_all_models = list(__models__.keys()) -- cgit v1.2.3 From 3b7b79f5bac793c2d76ab6c58f8d553859332ded Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 15 Dec 2024 12:23:04 +0100 Subject: Fix show html in gui --- g4f/client/service.py | 12 +++++++----- g4f/gui/client/static/js/chat.v1.js | 26 ++++++++++++-------------- g4f/gui/server/api.py | 3 ++- g4f/requests/raise_for_status.py | 6 ++++-- 4 files changed, 25 insertions(+), 22 deletions(-) diff --git a/g4f/client/service.py b/g4f/client/service.py index 80dc70df..aa0d4871 100644 --- a/g4f/client/service.py +++ b/g4f/client/service.py @@ -26,7 +26,8 @@ def get_model_and_provider(model : Union[Model, str], stream : bool, ignored : list[str] = None, ignore_working: bool = False, - ignore_stream: bool = False) -> tuple[str, ProviderType]: + ignore_stream: bool = False, + logging: bool = True) -> tuple[str, ProviderType]: """ Retrieves the model and provider based on input parameters. @@ -92,10 +93,11 @@ def get_model_and_provider(model : Union[Model, str], if not ignore_stream and not provider.supports_stream and stream: raise StreamNotSupportedError(f'{provider_name} does not support "stream" argument') - if model: - debug.log(f'Using {provider_name} provider and {model} model') - else: - debug.log(f'Using {provider_name} provider') + if logging: + if model: + debug.log(f'Using {provider_name} provider and {model} model') + else: + debug.log(f'Using {provider_name} provider') debug.last_provider = provider debug.last_model = model diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index 54853226..f8bd894d 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -503,7 +503,7 @@ async function add_message_chunk(message, message_id) { } else if (message.type == "error") { error_storage[message_id] = message.error console.error(message.error); - content_map.inner.innerHTML += `

An error occured: ${message.error}

`; + content_map.inner.innerHTML += markdown_render(`**An error occured:** ${message.error}`); let p = document.createElement("p"); p.innerText = message.error; log_storage.appendChild(p); @@ -609,7 +609,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi const files = input && input.files.length > 0 ? input.files : null; const auto_continue = document.getElementById("auto_continue")?.checked; const download_images = document.getElementById("download_images")?.checked; - let api_key = get_api_key_by_provider(provider); + const api_key = get_api_key_by_provider(provider); const ignored = Array.from(settings.querySelectorAll("input.provider:not(:checked)")).map((el)=>el.value); await api("conversation", { id: message_id, @@ -635,7 +635,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi console.error(e); if (e.name != "AbortError") { error_storage[message_id] = true; - content_map.inner.innerHTML += `

An error occured: ${e}

`; + content_map.inner.innerHTML += markdown_render(`**An error occured:** ${e}`); } } delete controller_storage[message_id]; @@ -771,7 +771,6 @@ const set_conversation = async (conversation_id) => { await load_conversation(conversation_id); load_conversations(); hide_sidebar(); - log_storage.classList.add("hidden"); }; const new_conversation = async () => { @@ -785,7 +784,6 @@ const new_conversation = async () => { } load_conversations(); hide_sidebar(); - log_storage.classList.add("hidden"); say_hello(); }; @@ -1104,10 +1102,10 @@ async function hide_sidebar() { window.addEventListener('popstate', hide_sidebar, false); -sidebar_button.addEventListener("click", (event) => { +sidebar_button.addEventListener("click", async () => { settings.classList.add("hidden"); if (sidebar.classList.contains("shown")) { - hide_sidebar(); + await hide_sidebar(); } else { sidebar.classList.add("shown"); sidebar_button.classList.add("rotated"); @@ -1365,11 +1363,10 @@ async function on_api() { option = document.createElement("div"); option.classList.add("field"); option.innerHTML = ` -
- Enable ${provider.label} - - -
`; + Enable ${provider.label} + + + `; option.querySelector("input").addEventListener("change", (event) => load_provider_option(event.target, provider.name)); settings.querySelector(".paper").appendChild(option); provider_options[provider.name] = option; @@ -1405,7 +1402,7 @@ async function on_api() { }); document.querySelector(".slide-systemPrompt")?.addEventListener("click", () => { hide_systemPrompt.click(); - let checked = hide_systemPrompt.checked; + const checked = hide_systemPrompt.checked; systemPrompt.classList[checked ? "add": "remove"]("hidden"); slide_systemPrompt_icon.classList[checked ? "remove": "add"]("fa-angles-up"); slide_systemPrompt_icon.classList[checked ? "add": "remove"]("fa-angles-down"); @@ -1599,8 +1596,9 @@ function get_api_key_by_provider(provider) { let api_key = null; if (provider) { api_key = document.getElementById(`${provider}-api_key`)?.value || null; - if (api_key == null) + if (api_key == null) { api_key = document.querySelector(`.${provider}-api_key`)?.value || null; + } } return api_key; } diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 877d47f2..d811bfc3 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -130,7 +130,8 @@ class Api: model, provider_handler = get_model_and_provider( kwargs.get("model"), provider, stream=True, - ignore_stream=True + ignore_stream=True, + logging=False ) first = True try: diff --git a/g4f/requests/raise_for_status.py b/g4f/requests/raise_for_status.py index 3566ead2..2864132a 100644 --- a/g4f/requests/raise_for_status.py +++ b/g4f/requests/raise_for_status.py @@ -29,8 +29,8 @@ async def raise_for_status_async(response: Union[StreamResponse, ClientResponse] elif response.status == 403 and is_openai(message): raise ResponseStatusError(f"Response {response.status}: OpenAI Bot detected") elif not response.ok: - if "" in message: - message = "HTML content" + if message is None and response.headers.get("content-type") == "text/html": + message = "Bad gateway" if response.status == 502 else "HTML content" raise ResponseStatusError(f"Response {response.status}: {message}") def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, RequestsResponse], message: str = None): @@ -42,4 +42,6 @@ def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, R elif response.status_code == 403 and is_cloudflare(response.text): raise CloudflareError(f"Response {response.status_code}: Cloudflare detected") elif not response.ok: + if message is None and response.headers.get("content-type") == "text/html": + message = "Bad gateway" if response.status_code == 502 else "HTML content" raise ResponseStatusError(f"Response {response.status_code}: {response.text if message is None else message}") \ No newline at end of file -- cgit v1.2.3