From 9994bb67a1cc1a09f4c5eba70dfbd77c96d0ed76 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Wed, 5 Feb 2025 17:07:20 +0100 Subject: Add zerogpu_uuid to demo --- g4f/Provider/CablyAI.py | 3 +-- g4f/Provider/hf_space/G4F.py | 18 +++++++++++++++--- g4f/Provider/hf_space/Janus_Pro_7B.py | 5 ++++- g4f/Provider/template/OpenaiTemplate.py | 3 ++- g4f/gui/client/demo.html | 12 ++++++++++++ g4f/gui/client/static/js/chat.v1.js | 5 +---- g4f/tools/files.py | 4 ++-- 7 files changed, 37 insertions(+), 13 deletions(-) diff --git a/g4f/Provider/CablyAI.py b/g4f/Provider/CablyAI.py index 1ccc343d..c9bd0cc2 100644 --- a/g4f/Provider/CablyAI.py +++ b/g4f/Provider/CablyAI.py @@ -26,7 +26,6 @@ class CablyAI(OpenaiTemplate): ] + reasoning_models model_aliases = { - "searchgpt": "searchgpt (free)", "gpt-4o-mini": "searchgpt", "llama-3.1-8b": "llama-3.1-8b-instruct", "deepseek-r1": "deepseek-r1-uncensored", @@ -43,6 +42,6 @@ class CablyAI(OpenaiTemplate): model = super().get_model(model, **kwargs) return model.split(" (free)")[0] except ModelNotSupportedError: - if f"f{model} (free)" in cls.models: + if f"{model} (free)" in cls.models: return model raise \ No newline at end of file diff --git a/g4f/Provider/hf_space/G4F.py b/g4f/Provider/hf_space/G4F.py index 1cff0097..9f16d789 100644 --- a/g4f/Provider/hf_space/G4F.py +++ b/g4f/Provider/hf_space/G4F.py @@ -34,10 +34,21 @@ class G4F(Janus_Pro_7B): height: int = 1024, seed: int = None, cookies: dict = None, + zerogpu_token: str = None, + zerogpu_uuid: str = None, **kwargs ) -> AsyncResult: if cls.default_model not in model: - async for chunk in super().create_async_generator(model, messages, prompt=prompt, seed=seed, cookies=cookies, **kwargs): + async for chunk in super().create_async_generator( + model, messages, + proxy=proxy, + prompt=prompt, + seed=seed, + cookies=cookies, + zerogpu_token=zerogpu_token, + zerogpu_uuid=zerogpu_uuid, + **kwargs + ): yield chunk return @@ -64,8 +75,9 @@ class G4F(Janus_Pro_7B): "trigger_id": 10 } async with ClientSession() as session: - yield Reasoning(status="Acquiring GPU Token") - zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies) + if zerogpu_token is None: + yield Reasoning(status="Acquiring GPU Token") + zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies) headers = { "x-zerogpu-token": zerogpu_token, "x-zerogpu-uuid": zerogpu_uuid, diff --git a/g4f/Provider/hf_space/Janus_Pro_7B.py b/g4f/Provider/hf_space/Janus_Pro_7B.py index 0484316b..69708f13 100644 --- a/g4f/Provider/hf_space/Janus_Pro_7B.py +++ b/g4f/Provider/hf_space/Janus_Pro_7B.py @@ -70,6 +70,8 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin): prompt: str = None, proxy: str = None, cookies: Cookies = None, + zerogpu_token: str = None, + zerogpu_uuid: str = None, return_conversation: bool = False, conversation: JsonConversation = None, seed: int = None, @@ -90,7 +92,8 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin): session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash") async with StreamSession(proxy=proxy, impersonate="chrome") as session: session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash") - zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, conversation, cookies) + if zerogpu_token is None: + zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, conversation, cookies) if conversation is None or not hasattr(conversation, "session_hash"): conversation = JsonConversation(session_hash=session_hash, zerogpu_token=zerogpu_token, zerogpu_uuid=zerogpu_uuid) conversation.zerogpu_token = zerogpu_token diff --git a/g4f/Provider/template/OpenaiTemplate.py b/g4f/Provider/template/OpenaiTemplate.py index 85086ccd..54938286 100644 --- a/g4f/Provider/template/OpenaiTemplate.py +++ b/g4f/Provider/template/OpenaiTemplate.py @@ -85,8 +85,9 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin # Proxy for image generation feature if model and model in cls.image_models: + prompt = format_image_prompt(messages, prompt) data = { - "prompt": format_image_prompt(messages, prompt), + "prompt": prompt, "model": model, } async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data, ssl=cls.ssl) as response: diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html index 61e565ea..16189f48 100644 --- a/g4f/gui/client/demo.html +++ b/g4f/gui/client/demo.html @@ -198,6 +198,15 @@ Get Access Token

+ @@ -193,22 +224,21 @@
- +
+ + +

Get Access Token

- @@ -236,7 +232,11 @@ import * as hub from "@huggingface/hub"; import { init } from "@huggingface/space-header"; - if (window.self === window.top) { + const isIframe = window.self !== window.top; + const button = document.querySelector('form a.button'); + if (isIframe) { + button.classList.remove('hidden'); + } else { init("roxky/g4f-space"); } diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index 320a8e98..7f64a993 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -811,19 +811,18 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m log_storage.appendChild(p); await api("log", {...message, provider: provider_storage[message_id]}); } else if (message.type == "preview") { - if (content_map.inner.clientHeight > 200) - content_map.inner.style.height = content_map.inner.clientHeight + "px"; if (img = content_map.inner.querySelector("img")) if (!img.complete) return; else img.src = message.images; - else - content_map.inner.innerHTML = markdown_render(message.preview); + else { + content_map.inner.innerHTML = markdown_render(message.preview); + await register_message_images(); + } } else if (message.type == "content") { message_storage[message_id] += message.content; update_message(content_map, message_id, null, scroll); - content_map.inner.style.height = ""; } else if (message.type == "log") { let p = document.createElement("p"); p.innerText = message.log; -- cgit v1.2.3