From 3b96c27e3c7483f63d758d056ec5f0cd2fde0f8b Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Thu, 29 Feb 2024 14:44:51 +0100 Subject: Ignore empty auth header in api Add live token count in gui --- g4f/api/__init__.py | 4 +- g4f/gui/client/css/style.css | 30 +++++++++++--- g4f/gui/client/html/index.html | 30 ++++++++------ g4f/gui/client/js/chat.v1.js | 91 ++++++++++++++++++++++++++++-------------- 4 files changed, 106 insertions(+), 49 deletions(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index f8d0b4af..d8e68bed 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -85,7 +85,9 @@ class Api: if config.api_key is None and request is not None: auth_header = request.headers.get("Authorization") if auth_header is not None: - config.api_key = auth_header.split(None, 1)[-1] + auth_header = auth_header.split(None, 1)[-1] + if auth_header and auth_header != "Bearer": + config.api_key = auth_header response = self.client.chat.completions.create( **config.dict(exclude_none=True), ignored=self.list_ignored_providers diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css index bed54f88..6ae720f3 100644 --- a/g4f/gui/client/css/style.css +++ b/g4f/gui/client/css/style.css @@ -119,7 +119,7 @@ body { width: 100%; display: flex; flex-direction: column; - gap: 15px; + gap: 5px; } .conversation #messages { @@ -129,11 +129,12 @@ body { flex-direction: column; overflow: auto; overflow-wrap: break-word; - padding-bottom: 20px; + padding-bottom: 10px; } .conversation .user-input { max-height: 200px; + margin-bottom: 10px; } .conversation .user-input input { @@ -385,12 +386,29 @@ body { font-size: 14px; } +.toolbar { + position: relative; +} + +#input-count { + width: fit-content; + font-size: 12px; + padding: 6px 15px; +} + .stop_generating, .regenerate { position: absolute; - bottom: 122px; - left: 50%; - transform: translateX(-50%); z-index: 1000000; + top: 0; + right: 0; +} + +@media only screen and (min-width: 40em) { + .stop_generating, .regenerate { + left: 50%; + transform: translateX(-50%); + right: auto; + } } .stop_generating button, .regenerate button{ @@ -399,7 +417,7 @@ body { background-color: var(--blur-bg); border-radius: var(--border-radius-1); border: 1px solid var(--blur-border); - padding: 10px 15px; + padding: 5px 15px; color: var(--colour-3); display: flex; justify-content: center; diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html index 9ef8a820..96829b2c 100644 --- a/g4f/gui/client/html/index.html +++ b/g4f/gui/client/html/index.html @@ -112,19 +112,23 @@
-
- -
-
- -
-
+
+
+
+   +
+
+ +
+
+ +
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 8b065be2..ff486c6e 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -5,10 +5,12 @@ const message_input = document.getElementById(`message-input`); const box_conversations = document.querySelector(`.top`); const stop_generating = document.querySelector(`.stop_generating`); const regenerate = document.querySelector(`.regenerate`); -const send_button = document.querySelector(`#send-button`); -const imageInput = document.querySelector('#image'); -const cameraInput = document.querySelector('#camera'); -const fileInput = document.querySelector('#file'); +const send_button = document.getElementById("send-button"); +const imageInput = document.getElementById("image"); +const cameraInput = document.getElementById("camera"); +const fileInput = document.getElementById("file"); +const inputCount = document.getElementById("input-count") +const modelSelect = document.getElementById("model"); let prompt_lock = false; @@ -75,6 +77,7 @@ const handle_ask = async () => { if (message.length > 0) { message_input.value = ''; prompt_lock = true; + count_input() await add_conversation(window.conversation_id, message); if ("text" in fileInput.dataset) { message += '\n```' + fileInput.dataset.type + '\n'; @@ -89,6 +92,7 @@ const handle_ask = async () => { if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]); else delete imageInput.dataset.src + model = modelSelect.options[modelSelect.selectedIndex].value message_box.innerHTML += `
@@ -97,11 +101,14 @@ const handle_ask = async () => {
+
${markdown_render(message)} ${imageInput.dataset.src ? 'Image upload' : '' } +
+
${count_words_and_tokens(message, model)}
`; @@ -120,19 +127,25 @@ const remove_cancel_button = async () => { }, 300); }; -const filter_messages = (messages) => { +const filter_messages = (messages, filter_last_message = true) => { // Removes none user messages at end - let last_message; - while (last_message = messages.pop()) { - if (last_message["role"] == "user") { - messages.push(last_message); - break; + if (filter_last_message) { + let last_message; + while (last_message = messages.pop()) { + if (last_message["role"] == "user") { + messages.push(last_message); + break; + } } } // Remove history, if it is selected if (document.getElementById('history')?.checked) { - messages = [messages[messages.length-1]]; + if (filter_last_message) { + messages = [messages.pop()]; + } else { + messages = [messages.pop(), messages.pop()]; + } } let new_messages = []; @@ -165,7 +178,6 @@ const ask_gpt = async () => { jailbreak = document.getElementById("jailbreak"); provider = document.getElementById("provider"); - model = document.getElementById("model"); window.text = ''; stop_generating.classList.remove(`stop_generating-hidden`); @@ -188,11 +200,13 @@ const ask_gpt = async () => {
+
`; content = document.getElementById(`gpt_${window.token}`); content_inner = content.querySelector('.content_inner'); + content_count = content.querySelector('.count'); message_box.scrollTop = message_box.scrollHeight; window.scrollTo(0, 0); @@ -200,7 +214,7 @@ const ask_gpt = async () => { let body = JSON.stringify({ id: window.token, conversation_id: window.conversation_id, - model: model.options[model.selectedIndex].value, + model: modelSelect.options[modelSelect.selectedIndex].value, jailbreak: jailbreak.options[jailbreak.selectedIndex].value, web_search: document.getElementById(`switch`).checked, provider: provider.options[provider.selectedIndex].value, @@ -270,6 +284,7 @@ const ask_gpt = async () => { html = html.substring(0, lastIndex) + '' + lastElement; } content_inner.innerHTML = html; + content_count.innerText = count_words_and_tokens(text, provider?.model); highlight(content_inner); } @@ -397,13 +412,13 @@ const load_conversation = async (conversation_id) => { let messages = await get_messages(conversation_id); let elements = ""; + let last_model = null; for (i in messages) { let item = messages[i]; + last_model = item?.provider?.model; let next_i = parseInt(i) + 1; let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null); - let tokens_count = next_provider?.model ? count_tokens(next_provider.model, item.content) : ""; - let append_count = tokens_count ? `, ${tokens_count} tokens` : ""; - let words_count = `(${count_words(item.content)} words${append_count})` + let provider_link = item?.provider?.name ? `${item.provider.name}` : ""; let provider = provider_link ? `
@@ -424,17 +439,18 @@ const load_conversation = async (conversation_id) => {
${provider}
${markdown_render(item.content)}
-
${words_count}
+
${count_words_and_tokens(item.content, next_provider?.model)}
`; } - const filtered = filter_messages(messages); + const filtered = filter_messages(messages, false); if (filtered.length > 0) { - let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, "gpt-3.5-turbo").length + last_model = last_model?.startsWith("gpt-4") ? "gpt-4" : "gpt-3.5-turbo" + let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length if (count_total > 0) { - elements += `
(${count_total} tokens used)
`; + elements += `
(${count_total} tokens total)
`; } } @@ -467,6 +483,11 @@ function count_tokens(model, text) { } } +function count_words_and_tokens(text, model) { + const tokens_count = model ? `, ${count_tokens(model, text)} tokens` : ""; + return `(${count_words(text)} words${tokens_count})` +} + const get_conversation = async (conversation_id) => { let conversation = await JSON.parse( localStorage.getItem(`conversation:${conversation_id}`) @@ -703,6 +724,16 @@ colorThemes.forEach((themeOption) => { }); }); +const count_input = async () => { + if (message_input.value) { + model = modelSelect.options[modelSelect.selectedIndex].value; + inputCount.innerText = count_words_and_tokens(message_input.value, model); + } else { + inputCount.innerHTML = " " + } +}; +message_input.addEventListener("keyup", count_input); + window.onload = async () => { setTheme(); @@ -713,18 +744,21 @@ window.onload = async () => { } } - await setTimeout(() => { - load_conversations(); - }, 1); + count_input(); if (/\/chat\/.+/.test(window.location.href)) { - await load_conversation(window.conversation_id); + load_conversation(window.conversation_id); } else { - await say_hello() + say_hello() } - - message_input.addEventListener(`keydown`, async (evt) => { + + setTimeout(() => { + load_conversations(); + }, 1); + + message_input.addEventListener("keydown", async (evt) => { if (prompt_lock) return; + if (evt.keyCode === 13 && !evt.shiftKey) { evt.preventDefault(); console.log("pressed enter"); @@ -768,12 +802,11 @@ observer.observe(message_input, { attributes: true }); (async () => { response = await fetch('/backend-api/v2/models') models = await response.json() - let select = document.getElementById('model'); for (model of models) { let option = document.createElement('option'); option.value = option.text = model; - select.appendChild(option); + modelSelect.appendChild(option); } response = await fetch('/backend-api/v2/providers') -- cgit v1.2.3 From 1ec37aea224938640a4eee7664087d6eb0da6abc Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sat, 2 Mar 2024 17:32:12 +0100 Subject: Fix history on error, fix count words for chinese --- g4f/gui/client/js/chat.v1.js | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index ff486c6e..7e1bb397 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -466,13 +466,14 @@ const load_conversation = async (conversation_id) => { }, 500); }; -function count_words(text) { - var matches = text.match(/[\w\d\’\'-]+/gi); +// https://stackoverflow.com/questions/20396456/how-to-do-word-counts-for-a-mixture-of-english-and-chinese-in-javascript +function count_words(str) { + var matches = str.match(/[\u00ff-\uffff]|\S+/g); return matches ? matches.length : 0; } function count_tokens(model, text) { - if (model.startsWith("gpt-3") || model.startsWith("gpt-4") || model.startsWith("text-davinci")) { + if (model.startsWith("gpt-3") || model.startsWith("gpt-4")) { return GPTTokenizer_cl100k_base?.encode(text).length; } if (model.startsWith("llama2") || model.startsWith("codellama")) { @@ -524,7 +525,9 @@ const add_conversation = async (conversation_id, content) => { const hide_last_message = async (conversation_id) => { const conversation = await get_conversation(conversation_id) const last_message = conversation.items.pop(); - last_message["regenerate"] = true; + if (last_message["role"] == "assistant") { + last_message["regenerate"] = true; + } conversation.items.push(last_message); localStorage.setItem( -- cgit v1.2.3 From 20ab17f31ace225b434236b9c61fa6e8ae12c573 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 3 Mar 2024 11:34:56 +0100 Subject: Improve gui handling, Improve count tokens --- g4f/gui/client/js/chat.v1.js | 50 +++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 7e1bb397..9772fbf7 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -5,6 +5,8 @@ const message_input = document.getElementById(`message-input`); const box_conversations = document.querySelector(`.top`); const stop_generating = document.querySelector(`.stop_generating`); const regenerate = document.querySelector(`.regenerate`); +const sidebar = document.querySelector(".conversations"); +const sidebar_button = document.querySelector(".mobile-sidebar"); const send_button = document.getElementById("send-button"); const imageInput = document.getElementById("image"); const cameraInput = document.getElementById("camera"); @@ -65,6 +67,13 @@ const register_remove_message = async () => { const delete_conversations = async () => { localStorage.clear(); + for (let i = 0; i < localStorage.length; i++){ + let key = localStorage.key(i); + if (key.startsWith("conversation:")) { + localStorage.removeItem(key); + } + } + hide_sidebar(); await new_conversation(); }; @@ -395,7 +404,8 @@ const set_conversation = async (conversation_id) => { await clear_conversation(); await load_conversation(conversation_id); - await load_conversations(); + load_conversations(); + hide_sidebar(); }; const new_conversation = async () => { @@ -403,9 +413,9 @@ const new_conversation = async () => { window.conversation_id = uuid(); await clear_conversation(); - await load_conversations(); - - await say_hello() + load_conversations(); + hide_sidebar(); + say_hello(); }; const load_conversation = async (conversation_id) => { @@ -419,7 +429,7 @@ const load_conversation = async (conversation_id) => { let next_i = parseInt(i) + 1; let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null); - let provider_link = item?.provider?.name ? `${item.provider.name}` : ""; + let provider_link = item.provider?.name ? `${item.provider.name}` : ""; let provider = provider_link ? `
${provider_link} @@ -428,7 +438,7 @@ const load_conversation = async (conversation_id) => { ` : ""; elements += `
-
+
${item.role == "assistant" ? gpt_image : user_image} ${item.role == "assistant" @@ -450,13 +460,13 @@ const load_conversation = async (conversation_id) => { last_model = last_model?.startsWith("gpt-4") ? "gpt-4" : "gpt-3.5-turbo" let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length if (count_total > 0) { - elements += `
(${count_total} tokens total)
`; + elements += `
(${count_total} tokens used)
`; } } message_box.innerHTML = elements; - await register_remove_message(); + register_remove_message(); highlight(message_box); message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" }); @@ -485,8 +495,9 @@ function count_tokens(model, text) { } function count_words_and_tokens(text, model) { - const tokens_count = model ? `, ${count_tokens(model, text)} tokens` : ""; - return `(${count_words(text)} words${tokens_count})` + const tokens_count = model ? count_tokens(model, text) : null; + const tokens_append = tokens_count ? `, ${tokens_count} tokens` : ""; + return `(${count_words(text)} words${tokens_append})` } const get_conversation = async (conversation_id) => { @@ -629,15 +640,17 @@ const message_id = () => { return BigInt(`0b${unix}${random_bytes}`).toString(); }; -document.querySelector(".mobile-sidebar").addEventListener("click", (event) => { - const sidebar = document.querySelector(".conversations"); +async function hide_sidebar() { + sidebar.classList.remove("shown"); + sidebar_button.classList.remove("rotated"); +} +sidebar_button.addEventListener("click", (event) => { if (sidebar.classList.contains("shown")) { - sidebar.classList.remove("shown"); - event.target.classList.remove("rotated"); + hide_sidebar(); } else { sidebar.classList.add("shown"); - event.target.classList.add("rotated"); + sidebar_button.classList.add("rotated"); } window.scrollTo(0, 0); @@ -740,13 +753,6 @@ message_input.addEventListener("keyup", count_input); window.onload = async () => { setTheme(); - let conversations = 0; - for (let i = 0; i < localStorage.length; i++) { - if (localStorage.key(i).startsWith("conversation:")) { - conversations += 1; - } - } - count_input(); if (/\/chat\/.+/.test(window.location.href)) { -- cgit v1.2.3 From b521f53c2b71978d16bbfc166fa674f4e382c83d Mon Sep 17 00:00:00 2001 From: Tekky <98614666+xtekky@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:49:10 +0000 Subject: ~ --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 03df1326..19995645 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,7 @@ docker pull hlohaus789/g4f - [/docs/guides/help_me](/docs/guides/help_me.md) - Join our Telegram Channel: [t.me/g4f_channel](https://telegram.me/g4f_channel) - Join our Discord Group: [discord.gg/XfybzPXPH5](https://discord.gg/XfybzPXPH5) +- Check out [G4F, but 100% local](https://github.com/gpt4free/g4f-local) ## 🔻 Site Takedown Is your site on this repository and you want to take it down? Send an email to takedown@g4f.ai with proof it is yours and it will be removed as fast as possible. To prevent reproduction please secure your API ;) -- cgit v1.2.3