From 5cbbe2fd3d2ec37d990c33f2d72018e526936c6f Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Mon, 24 Feb 2025 08:53:43 +0100 Subject: Fix model and provider in chat completion response Add login button to HuggingFace demo Custom conversation ids in chat ui Remove rate limiter in demo mode Improve YouTube support in Gemini --- g4f/Provider/needs_auth/Gemini.py | 15 +++++++- g4f/api/__init__.py | 12 +++++- g4f/client/__init__.py | 59 ++++++++++++++++++++-------- g4f/gui/client/demo.html | 49 +++++++++++++++--------- g4f/gui/client/static/js/chat.v1.js | 76 ++++++++++++++++++++++++++----------- g4f/gui/server/backend_api.py | 37 +++--------------- g4f/gui/server/website.py | 2 - g4f/providers/retry_provider.py | 2 +- 8 files changed, 160 insertions(+), 92 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index a615eefc..af953129 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -203,7 +203,14 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): skip -= 1 continue yield item - reasoning = "".join(find_str(response_part[4][0], 3)) + reasoning = "\n\n".join(find_str(response_part[4][0], 3)) + reasoning = re.sub(r"|", "**", reasoning) + def replace_image(match): + return f"![](https:{match.group(0)})" + reasoning = re.sub(r"//yt3.(?:ggpht.com|googleusercontent.com/ytc)/[\w=-]+", replace_image, reasoning) + reasoning = re.sub(r"\nyoutube\n", "\n\n\n", reasoning) + reasoning = re.sub(r"\nYouTube\n", "\nYouTube ", reasoning) + reasoning = reasoning.replace('https://www.gstatic.com/images/branding/productlogos/youtube/v9/192px.svg', '') content = response_part[4][0][1][0] if reasoning: yield Reasoning(status="🤔") @@ -215,8 +222,12 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): if match: image_prompt = match.group(1) content = content.replace(match.group(0), '') - pattern = r"http://googleusercontent.com/image_generation_content/\d+" + pattern = r"http://googleusercontent.com/(?:image_generation|youtube)_content/\d+" content = re.sub(pattern, "", content) + content = content.replace("", "") + content = content.replace("https://www.google.com/search?q=http://", "https://") + content = content.replace("https://www.google.com/search?q=https://", "https://") + content = content.replace("https://www.google.com/url?sa=E&source=gmail&q=http://", "http://") if last_content and content.startswith(last_content): yield content[len(last_content):] else: diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 05f92526..8ff4b424 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -170,7 +170,9 @@ class Api: try: user_g4f_api_key = await self.get_g4f_api_key(request) except HTTPException: - user_g4f_api_key = None + user_g4f_api_key = await self.security(request) + if hasattr(user_g4f_api_key, "credentials"): + user_g4f_api_key = user_g4f_api_key.credentials path = request.url.path if path.startswith("/v1") or path.startswith("/api/") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'): if user_g4f_api_key is None: @@ -581,11 +583,17 @@ class Api: pass if not os.path.isfile(target): source_url = get_source_url(str(request.query_params)) + ssl = None + if source_url is None: + backend_url = os.environ.get("G4F_BACKEND_URL") + if backend_url: + source_url = f"{backend_url}/images/{filename}" + ssl = False if source_url is not None: try: await copy_images( [source_url], - target=target) + target=target, ssl=ssl) debug.log(f"Image copied from {source_url}") except Exception as e: debug.error(f"Download failed: {source_url}\n{type(e).__name__}: {e}") diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index 3c7de6a7..f4d1a009 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -12,7 +12,7 @@ from typing import Union, AsyncIterator, Iterator, Awaitable, Optional from ..image.copy_images import copy_images from ..typing import Messages, ImageType from ..providers.types import ProviderType, BaseRetryProvider -from ..providers.response import ResponseType, ImageResponse, FinishReason, BaseConversation, SynthesizeData, ToolCalls, Usage +from ..providers.response import * from ..errors import NoImageResponseError from ..providers.retry_provider import IterListProvider from ..providers.asyncio import to_sync_generator @@ -49,6 +49,7 @@ def iter_response( finish_reason = None tool_calls = None usage = None + provider: ProviderInfo = None completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) idx = 0 @@ -65,10 +66,13 @@ def iter_response( elif isinstance(chunk, Usage): usage = chunk continue + elif isinstance(chunk, ProviderInfo): + provider = chunk + continue elif isinstance(chunk, BaseConversation): yield chunk continue - elif isinstance(chunk, SynthesizeData) or not chunk: + elif isinstance(chunk, HiddenResponse): continue elif isinstance(chunk, Exception): continue @@ -76,7 +80,6 @@ def iter_response( if isinstance(chunk, list): chunk = "".join(map(str, chunk)) else: - temp = chunk.__str__() if not isinstance(temp, str): if isinstance(temp, list): @@ -84,6 +87,8 @@ def iter_response( else: temp = repr(chunk) chunk = temp + if not chunk: + continue content += chunk @@ -96,7 +101,11 @@ def iter_response( finish_reason = "stop" if stream: - yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) + chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) + if provider is not None: + chunk.provider = provider.name + chunk.model = provider.model + yield chunk if finish_reason is not None: break @@ -108,7 +117,7 @@ def iter_response( finish_reason = "stop" if finish_reason is None else finish_reason if stream: - yield ChatCompletionChunk.model_construct( + chat_completion = ChatCompletionChunk.model_construct( None, finish_reason, completion_id, int(time.time()), usage=usage ) @@ -116,19 +125,24 @@ def iter_response( if response_format is not None and "type" in response_format: if response_format["type"] == "json_object": content = filter_json(content) - yield ChatCompletion.model_construct( + chat_completion = ChatCompletion.model_construct( content, finish_reason, completion_id, int(time.time()), usage=UsageModel.model_construct(**usage.get_dict()), **filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {} ) + if provider is not None: + chat_completion.provider = provider.name + chat_completion.model = provider.model + yield chat_completion # Synchronous iter_append_model_and_provider function def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType: if isinstance(last_provider, BaseRetryProvider): - last_provider = last_provider.last_provider + yield from response + return for chunk in response: if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)): - if last_provider is not None: + if chunk.provider is None and last_provider is not None: chunk.model = getattr(last_provider, "last_model", last_model) chunk.provider = last_provider.__name__ yield chunk @@ -146,6 +160,7 @@ async def async_iter_response( idx = 0 tool_calls = None usage = None + provider: ProviderInfo = None try: async for chunk in response: @@ -161,12 +176,17 @@ async def async_iter_response( elif isinstance(chunk, Usage): usage = chunk continue - elif isinstance(chunk, SynthesizeData) or not chunk: + elif isinstance(chunk, ProviderInfo): + provider = chunk + continue + elif isinstance(chunk, HiddenResponse): continue elif isinstance(chunk, Exception): continue chunk = str(chunk) + if not chunk: + continue content += chunk idx += 1 @@ -179,7 +199,11 @@ async def async_iter_response( finish_reason = "stop" if stream: - yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) + chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) + if provider is not None: + chunk.provider = provider.name + chunk.model = provider.model + yield chunk if finish_reason is not None: break @@ -190,7 +214,7 @@ async def async_iter_response( usage = Usage(completion_tokens=idx, total_tokens=idx) if stream: - yield ChatCompletionChunk.model_construct( + chat_completion = ChatCompletionChunk.model_construct( None, finish_reason, completion_id, int(time.time()), usage=usage.get_dict() ) @@ -198,11 +222,15 @@ async def async_iter_response( if response_format is not None and "type" in response_format: if response_format["type"] == "json_object": content = filter_json(content) - yield ChatCompletion.model_construct( + chat_completion = ChatCompletion.model_construct( content, finish_reason, completion_id, int(time.time()), usage=UsageModel.model_construct(**usage.get_dict()), **filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {} ) + if provider is not None: + chat_completion.provider = provider.name + chat_completion.model = provider.model + yield chat_completion finally: await safe_aclose(response) @@ -214,11 +242,12 @@ async def async_iter_append_model_and_provider( last_provider = None try: if isinstance(last_provider, BaseRetryProvider): - if last_provider is not None: - last_provider = last_provider.last_provider + async for chunk in response: + yield chunk + return async for chunk in response: if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)): - if last_provider is not None: + if chunk.provider is None and last_provider is not None: chunk.model = getattr(last_provider, "last_model", last_model) chunk.provider = last_provider.__name__ yield chunk diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html index 1d75135f..36c2afed 100644 --- a/g4f/gui/client/demo.html +++ b/g4f/gui/client/demo.html @@ -183,25 +183,10 @@ const isIframe = window.self !== window.top; const backendUrl = "{{backend_url}}"; let url = new URL(window.location.href) - let params = new URLSearchParams(url.search); if (isIframe && backendUrl) { - if (params.get("get_gpu_token")) { - window.addEventListener('DOMContentLoaded', async function() { - const link = document.getElementById("new_window"); - link.href = `${backendUrl}${url.search}`; - link.click(); - }); - } else { - window.location.replace(`${backendUrl}${url.search}`); - } + window.location.replace(`${backendUrl}${url.search}`); return; } - if (params.get("__sign")) { - localStorage.setItem("HuggingSpace-api_key", params.get("__sign")); - if (!isIframe) { - window.location.replace("/"); - } - } })(); @@ -240,10 +225,13 @@

Get Access Token

+ + diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index 9537f78c..66845ba7 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -39,6 +39,7 @@ let finish_storage = {}; let usage_storage = {}; let reasoning_storage = {}; let generate_storage = {}; +let title_ids_storage = {}; let is_demo = false; let wakeLock = null; let countTokensEnabled = true; @@ -74,6 +75,8 @@ if (window.markdownit) { ) .replaceAll("', '') + .replaceAll('<i class="', '') } } @@ -301,7 +304,9 @@ const register_message_buttons = async () => { const conversation = await get_conversation(window.conversation_id); let buffer = ""; conversation.items.forEach(message => { - buffer += render_reasoning_text(message.reasoning); + if (message.reasoning) { + buffer += render_reasoning_text(message.reasoning); + } buffer += `${message.role == 'user' ? 'User' : 'Assistant'}: ${message.content.trim()}\n\n\n`; }); var download = document.getElementById("download"); @@ -435,25 +440,27 @@ const handle_ask = async (do_ask_gpt = true) => { imageInput.dataset.objects = images.join(" "); } } - message_box.innerHTML += ` -
-
- ${user_image} - - + const message_el = document.createElement("div"); + message_el.classList.add("message"); + message_el.dataset.index = message_index; + message_el.innerHTML = ` +
+ ${user_image} + + +
+
+
+ ${markdown_render(message)} + ${images.map((object)=>`Image upload`).join("")}
-
-
- ${markdown_render(message)} - ${images.map((object)=>'Image upload').join("")} -
-
- ${countTokensEnabled ? count_words_and_tokens(message, get_selected_model()?.value) : ""} -
+
+ ${countTokensEnabled ? count_words_and_tokens(message, get_selected_model()?.value) : ""}
`; - highlight(message_box); + message_box.appendChild(message_el); + highlight(message_el); if (do_ask_gpt) { const all_pinned = document.querySelectorAll(".buttons button.pinned") if (all_pinned.length > 0) { @@ -1012,7 +1019,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi } try { let api_key; - if (is_demo && provider == "Feature") { + if (is_demo && ["OpenaiChat", "DeepSeekAPI", "PollinationsAI"].includes(provider)) { api_key = localStorage.getItem("user"); } else if (["HuggingSpace", "G4F"].includes(provider)) { api_key = localStorage.getItem("HuggingSpace-api_key"); @@ -1096,9 +1103,30 @@ const clear_conversation = async () => { } }; +var illegalRe = /[\/\?<>\\:\*\|":]/g; +var controlRe = /[\x00-\x1f\x80-\x9f]/g; +var reservedRe = /^\.+$/; +var windowsReservedRe = /^(con|prn|aux|nul|com[0-9]|lpt[0-9])(\..*)?$/i; + +function sanitize(input, replacement) { + var sanitized = input + .replace(illegalRe, replacement) + .replace(controlRe, replacement) + .replace(reservedRe, replacement) + .replace(windowsReservedRe, replacement); + return sanitized.replaceAll(/\/|#|\s{2,}/g, replacement).trim(); +} + async function set_conversation_title(conversation_id, title) { conversation = await get_conversation(conversation_id) conversation.new_title = title; + const new_id = sanitize(title, " "); + if (new_id && !appStorage.getItem(`conversation:${new_id}`)) { + appStorage.removeItem(`conversation:${conversation.id}`); + title_ids_storage[conversation_id] = new_id; + conversation.id = new_id; + add_url_to_history(`/chat/${conversation_id}`); + } appStorage.setItem( `conversation:${conversation.id}`, JSON.stringify(conversation) @@ -1123,6 +1151,7 @@ const show_option = async (conversation_id) => { input_el.onclick = (e) => e.stopPropagation() input_el.onfocus = () => trash_el.style.display = "none"; input_el.onchange = () => set_conversation_title(conversation_id, input_el.value); + input_el.onblur = () => set_conversation_title(conversation_id, input_el.value); left_el.removeChild(title_el); left_el.appendChild(input_el); } @@ -1162,6 +1191,9 @@ const delete_conversation = async (conversation_id) => { }; const set_conversation = async (conversation_id) => { + if (title_ids_storage[conversation_id]) { + conversation_id = title_ids_storage[conversation_id]; + } try { add_url_to_history(`/chat/${conversation_id}`); } catch (e) { @@ -1912,11 +1944,11 @@ async function on_load() { messageInput.focus(); //await handle_ask(); } - } else if (/\/chat\/[^?]+/.test(window.location.href)) { - load_conversation(window.conversation_id); - } else { + } else if (/\/chat\/[?$]/.test(window.location.href)) { chatPrompt.value = document.getElementById("systemPrompt")?.value || ""; say_hello(); + } else { + load_conversation(window.conversation_id); } load_conversations(); } @@ -2007,7 +2039,8 @@ async function on_api() { } providerSelect.innerHTML = ` - + + @@ -2340,7 +2373,6 @@ fileInput.addEventListener('change', async (event) => { Object.keys(data).forEach(key => { if (key == "options") { Object.keys(data[key]).forEach(keyOption => { - console.log(keyOption, data[key][keyOption]); appStorage.setItem(keyOption, data[key][keyOption]); count += 1; }); diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py index 28c74251..dbbd2e65 100644 --- a/g4f/gui/server/backend_api.py +++ b/g4f/gui/server/backend_api.py @@ -14,12 +14,6 @@ from pathlib import Path from urllib.parse import quote_plus from hashlib import sha256 from werkzeug.utils import secure_filename -try: - from flask_limiter import Limiter - from flask_limiter.util import get_remote_address - has_flask_limiter = True -except ImportError: - has_flask_limiter = False from ...image import is_allowed_extension, to_image from ...client.service import convert_to_provider @@ -62,19 +56,8 @@ class Backend_Api(Api): """ self.app: Flask = app - if has_flask_limiter and app.demo: - limiter = Limiter( - get_remote_address, - app=app, - default_limits=["200 per day", "50 per hour"], - storage_uri="memory://", - auto_check=False, - strategy="moving-window", - ) - - if has_flask_limiter and app.demo: + if app.demo: @app.route('/', methods=['GET']) - @limiter.exempt def home(): return render_template('demo.html', backend_url=os.environ.get("G4F_BACKEND_URL", "")) else: @@ -116,7 +99,7 @@ class Backend_Api(Api): } for model, providers in models.demo_models.values()] - def handle_conversation(limiter_check: callable = None): + def handle_conversation(): """ Handles conversation requests and streams responses back. @@ -135,7 +118,7 @@ class Backend_Api(Api): else: json_data = request.json - if app.demo and json_data.get("provider") not in ["Custom", "Feature", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]: + if app.demo and json_data.get("provider") not in ["DeepSeekAPI", "OpenaiChat", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]: model = json_data.get("model") if model != "default" and model in models.demo_models: json_data["provider"] = random.choice(models.demo_models[model][1]) @@ -143,8 +126,6 @@ class Backend_Api(Api): if not model or model == "default": json_data["model"] = models.demo_models["default"][0].name json_data["provider"] = random.choice(models.demo_models["default"][1]) - if limiter_check is not None and json_data.get("provider") in ["Feature"]: - limiter_check() if "images" in json_data: kwargs["images"] = json_data["images"] kwargs = self._prepare_conversation_kwargs(json_data, kwargs) @@ -158,15 +139,9 @@ class Backend_Api(Api): mimetype='text/event-stream' ) - if has_flask_limiter and app.demo: - @app.route('/backend-api/v2/conversation', methods=['POST']) - @limiter.limit("2 per minute") - def _handle_conversation(): - return handle_conversation(limiter.check) - else: - @app.route('/backend-api/v2/conversation', methods=['POST']) - def _handle_conversation(): - return handle_conversation() + @app.route('/backend-api/v2/conversation', methods=['POST']) + def _handle_conversation(): + return handle_conversation() @app.route('/backend-api/v2/usage', methods=['POST']) def add_usage(): diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py index 407fc706..213d4ab9 100644 --- a/g4f/gui/server/website.py +++ b/g4f/gui/server/website.py @@ -33,8 +33,6 @@ class Website: def _chat(self, conversation_id): if conversation_id == "share": return render_template('index.html', chat_id=str(uuid.uuid4())) - if '-' not in conversation_id: - return redirect_home() return render_template('index.html', chat_id=conversation_id) def _index(self): diff --git a/g4f/providers/retry_provider.py b/g4f/providers/retry_provider.py index 6b8ca3b0..1c3f2c21 100644 --- a/g4f/providers/retry_provider.py +++ b/g4f/providers/retry_provider.py @@ -87,7 +87,7 @@ class IterListProvider(BaseRetryProvider): for provider in self.get_providers(stream and not ignore_stream, ignored): self.last_provider = provider debug.log(f"Using {provider.__name__} provider") - yield ProviderInfo(**provider.get_dict()) + yield ProviderInfo(**provider.get_dict(), model=model if model else getattr(provider, "default_model")) try: response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs) if hasattr(response, "__aiter__"): -- cgit v1.2.3 From ee9e0c38268ae01acce4245782bb306d9cf800e5 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Mon, 24 Feb 2025 15:50:50 +0100 Subject: Add Gemini provider to Demo --- g4f/gui/client/static/js/chat.v1.js | 3 ++- g4f/gui/server/backend_api.py | 2 +- g4f/providers/retry_provider.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'g4f') diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index 66845ba7..68641b8e 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -1019,7 +1019,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi } try { let api_key; - if (is_demo && ["OpenaiChat", "DeepSeekAPI", "PollinationsAI"].includes(provider)) { + if (is_demo && ["OpenaiChat", "DeepSeekAPI", "PollinationsAI", "Gemini"].includes(provider)) { api_key = localStorage.getItem("user"); } else if (["HuggingSpace", "G4F"].includes(provider)) { api_key = localStorage.getItem("HuggingSpace-api_key"); @@ -2043,6 +2043,7 @@ async function on_api() { + `; diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py index dbbd2e65..1dc66223 100644 --- a/g4f/gui/server/backend_api.py +++ b/g4f/gui/server/backend_api.py @@ -118,7 +118,7 @@ class Backend_Api(Api): else: json_data = request.json - if app.demo and json_data.get("provider") not in ["DeepSeekAPI", "OpenaiChat", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]: + if app.demo and not json_data.get("provider"): model = json_data.get("model") if model != "default" and model in models.demo_models: json_data["provider"] = random.choice(models.demo_models[model][1]) diff --git a/g4f/providers/retry_provider.py b/g4f/providers/retry_provider.py index 1c3f2c21..a85c136c 100644 --- a/g4f/providers/retry_provider.py +++ b/g4f/providers/retry_provider.py @@ -59,7 +59,7 @@ class IterListProvider(BaseRetryProvider): for chunk in response: if chunk: yield chunk - if isinstance(chunk, str) or isinstance(chunk, ImageResponse): + if isinstance(chunk, (str, ImageResponse)): started = True if started: return @@ -94,7 +94,7 @@ class IterListProvider(BaseRetryProvider): async for chunk in response: if chunk: yield chunk - if isinstance(chunk, str) or isinstance(chunk, ImageResponse): + if isinstance(chunk, (str, ImageResponse)): started = True elif response: response = await response -- cgit v1.2.3