From 5cbbe2fd3d2ec37d990c33f2d72018e526936c6f Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Mon, 24 Feb 2025 08:53:43 +0100 Subject: Fix model and provider in chat completion response Add login button to HuggingFace demo Custom conversation ids in chat ui Remove rate limiter in demo mode Improve YouTube support in Gemini --- g4f/Provider/needs_auth/Gemini.py | 15 +++++++- g4f/api/__init__.py | 12 +++++- g4f/client/__init__.py | 59 ++++++++++++++++++++-------- g4f/gui/client/demo.html | 49 +++++++++++++++--------- g4f/gui/client/static/js/chat.v1.js | 76 ++++++++++++++++++++++++++----------- g4f/gui/server/backend_api.py | 37 +++--------------- g4f/gui/server/website.py | 2 - g4f/providers/retry_provider.py | 2 +- 8 files changed, 160 insertions(+), 92 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index a615eefc..af953129 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -203,7 +203,14 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): skip -= 1 continue yield item - reasoning = "".join(find_str(response_part[4][0], 3)) + reasoning = "\n\n".join(find_str(response_part[4][0], 3)) + reasoning = re.sub(r"|", "**", reasoning) + def replace_image(match): + return f"})" + reasoning = re.sub(r"//yt3.(?:ggpht.com|googleusercontent.com/ytc)/[\w=-]+", replace_image, reasoning) + reasoning = re.sub(r"\nyoutube\n", "\n\n\n", reasoning) + reasoning = re.sub(r"\nYouTube\n", "\nYouTube ", reasoning) + reasoning = reasoning.replace('https://www.gstatic.com/images/branding/productlogos/youtube/v9/192px.svg', '') content = response_part[4][0][1][0] if reasoning: yield Reasoning(status="🤔") @@ -215,8 +222,12 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): if match: image_prompt = match.group(1) content = content.replace(match.group(0), '') - pattern = r"http://googleusercontent.com/image_generation_content/\d+" + pattern = r"http://googleusercontent.com/(?:image_generation|youtube)_content/\d+" content = re.sub(pattern, "", content) + content = content.replace("", "") + content = content.replace("https://www.google.com/search?q=http://", "https://") + content = content.replace("https://www.google.com/search?q=https://", "https://") + content = content.replace("https://www.google.com/url?sa=E&source=gmail&q=http://", "http://") if last_content and content.startswith(last_content): yield content[len(last_content):] else: diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 05f92526..8ff4b424 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -170,7 +170,9 @@ class Api: try: user_g4f_api_key = await self.get_g4f_api_key(request) except HTTPException: - user_g4f_api_key = None + user_g4f_api_key = await self.security(request) + if hasattr(user_g4f_api_key, "credentials"): + user_g4f_api_key = user_g4f_api_key.credentials path = request.url.path if path.startswith("/v1") or path.startswith("/api/") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'): if user_g4f_api_key is None: @@ -581,11 +583,17 @@ class Api: pass if not os.path.isfile(target): source_url = get_source_url(str(request.query_params)) + ssl = None + if source_url is None: + backend_url = os.environ.get("G4F_BACKEND_URL") + if backend_url: + source_url = f"{backend_url}/images/{filename}" + ssl = False if source_url is not None: try: await copy_images( [source_url], - target=target) + target=target, ssl=ssl) debug.log(f"Image copied from {source_url}") except Exception as e: debug.error(f"Download failed: {source_url}\n{type(e).__name__}: {e}") diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index 3c7de6a7..f4d1a009 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -12,7 +12,7 @@ from typing import Union, AsyncIterator, Iterator, Awaitable, Optional from ..image.copy_images import copy_images from ..typing import Messages, ImageType from ..providers.types import ProviderType, BaseRetryProvider -from ..providers.response import ResponseType, ImageResponse, FinishReason, BaseConversation, SynthesizeData, ToolCalls, Usage +from ..providers.response import * from ..errors import NoImageResponseError from ..providers.retry_provider import IterListProvider from ..providers.asyncio import to_sync_generator @@ -49,6 +49,7 @@ def iter_response( finish_reason = None tool_calls = None usage = None + provider: ProviderInfo = None completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) idx = 0 @@ -65,10 +66,13 @@ def iter_response( elif isinstance(chunk, Usage): usage = chunk continue + elif isinstance(chunk, ProviderInfo): + provider = chunk + continue elif isinstance(chunk, BaseConversation): yield chunk continue - elif isinstance(chunk, SynthesizeData) or not chunk: + elif isinstance(chunk, HiddenResponse): continue elif isinstance(chunk, Exception): continue @@ -76,7 +80,6 @@ def iter_response( if isinstance(chunk, list): chunk = "".join(map(str, chunk)) else: - temp = chunk.__str__() if not isinstance(temp, str): if isinstance(temp, list): @@ -84,6 +87,8 @@ def iter_response( else: temp = repr(chunk) chunk = temp + if not chunk: + continue content += chunk @@ -96,7 +101,11 @@ def iter_response( finish_reason = "stop" if stream: - yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) + chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) + if provider is not None: + chunk.provider = provider.name + chunk.model = provider.model + yield chunk if finish_reason is not None: break @@ -108,7 +117,7 @@ def iter_response( finish_reason = "stop" if finish_reason is None else finish_reason if stream: - yield ChatCompletionChunk.model_construct( + chat_completion = ChatCompletionChunk.model_construct( None, finish_reason, completion_id, int(time.time()), usage=usage ) @@ -116,19 +125,24 @@ def iter_response( if response_format is not None and "type" in response_format: if response_format["type"] == "json_object": content = filter_json(content) - yield ChatCompletion.model_construct( + chat_completion = ChatCompletion.model_construct( content, finish_reason, completion_id, int(time.time()), usage=UsageModel.model_construct(**usage.get_dict()), **filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {} ) + if provider is not None: + chat_completion.provider = provider.name + chat_completion.model = provider.model + yield chat_completion # Synchronous iter_append_model_and_provider function def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType: if isinstance(last_provider, BaseRetryProvider): - last_provider = last_provider.last_provider + yield from response + return for chunk in response: if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)): - if last_provider is not None: + if chunk.provider is None and last_provider is not None: chunk.model = getattr(last_provider, "last_model", last_model) chunk.provider = last_provider.__name__ yield chunk @@ -146,6 +160,7 @@ async def async_iter_response( idx = 0 tool_calls = None usage = None + provider: ProviderInfo = None try: async for chunk in response: @@ -161,12 +176,17 @@ async def async_iter_response( elif isinstance(chunk, Usage): usage = chunk continue - elif isinstance(chunk, SynthesizeData) or not chunk: + elif isinstance(chunk, ProviderInfo): + provider = chunk + continue + elif isinstance(chunk, HiddenResponse): continue elif isinstance(chunk, Exception): continue chunk = str(chunk) + if not chunk: + continue content += chunk idx += 1 @@ -179,7 +199,11 @@ async def async_iter_response( finish_reason = "stop" if stream: - yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) + chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) + if provider is not None: + chunk.provider = provider.name + chunk.model = provider.model + yield chunk if finish_reason is not None: break @@ -190,7 +214,7 @@ async def async_iter_response( usage = Usage(completion_tokens=idx, total_tokens=idx) if stream: - yield ChatCompletionChunk.model_construct( + chat_completion = ChatCompletionChunk.model_construct( None, finish_reason, completion_id, int(time.time()), usage=usage.get_dict() ) @@ -198,11 +222,15 @@ async def async_iter_response( if response_format is not None and "type" in response_format: if response_format["type"] == "json_object": content = filter_json(content) - yield ChatCompletion.model_construct( + chat_completion = ChatCompletion.model_construct( content, finish_reason, completion_id, int(time.time()), usage=UsageModel.model_construct(**usage.get_dict()), **filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {} ) + if provider is not None: + chat_completion.provider = provider.name + chat_completion.model = provider.model + yield chat_completion finally: await safe_aclose(response) @@ -214,11 +242,12 @@ async def async_iter_append_model_and_provider( last_provider = None try: if isinstance(last_provider, BaseRetryProvider): - if last_provider is not None: - last_provider = last_provider.last_provider + async for chunk in response: + yield chunk + return async for chunk in response: if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)): - if last_provider is not None: + if chunk.provider is None and last_provider is not None: chunk.model = getattr(last_provider, "last_model", last_model) chunk.provider = last_provider.__name__ yield chunk diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html index 1d75135f..36c2afed 100644 --- a/g4f/gui/client/demo.html +++ b/g4f/gui/client/demo.html @@ -183,25 +183,10 @@ const isIframe = window.self !== window.top; const backendUrl = "{{backend_url}}"; let url = new URL(window.location.href) - let params = new URLSearchParams(url.search); if (isIframe && backendUrl) { - if (params.get("get_gpu_token")) { - window.addEventListener('DOMContentLoaded', async function() { - const link = document.getElementById("new_window"); - link.href = `${backendUrl}${url.search}`; - link.click(); - }); - } else { - window.location.replace(`${backendUrl}${url.search}`); - } + window.location.replace(`${backendUrl}${url.search}`); return; } - if (params.get("__sign")) { - localStorage.setItem("HuggingSpace-api_key", params.get("__sign")); - if (!isIframe) { - window.location.replace("/"); - } - } })(); @@ -240,10 +225,13 @@
+ + diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index 9537f78c..66845ba7 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -39,6 +39,7 @@ let finish_storage = {}; let usage_storage = {}; let reasoning_storage = {}; let generate_storage = {}; +let title_ids_storage = {}; let is_demo = false; let wakeLock = null; let countTokensEnabled = true; @@ -74,6 +75,8 @@ if (window.markdownit) { ) .replaceAll("', '')
+ .replaceAll('<i class="', '')
}
}
@@ -301,7 +304,9 @@ const register_message_buttons = async () => {
const conversation = await get_conversation(window.conversation_id);
let buffer = "";
conversation.items.forEach(message => {
- buffer += render_reasoning_text(message.reasoning);
+ if (message.reasoning) {
+ buffer += render_reasoning_text(message.reasoning);
+ }
buffer += `${message.role == 'user' ? 'User' : 'Assistant'}: ${message.content.trim()}\n\n\n`;
});
var download = document.getElementById("download");
@@ -435,25 +440,27 @@ const handle_ask = async (do_ask_gpt = true) => {
imageInput.dataset.objects = images.join(" ");
}
}
- message_box.innerHTML += `
-