summaryrefslogtreecommitdiffstats
path: root/g4f/gui
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-01-01 17:48:57 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-01-01 17:48:57 +0100
commitc617b18d12c2f9d82ce7c73aae46d353b83f625a (patch)
tree898f5090865a8aea64fb87e56f9ebfc979a6b706 /g4f/gui
parentPatch event loop on win, Check event loop closed (diff)
downloadgpt4free-c617b18d12c2f9d82ce7c73aae46d353b83f625a.tar
gpt4free-c617b18d12c2f9d82ce7c73aae46d353b83f625a.tar.gz
gpt4free-c617b18d12c2f9d82ce7c73aae46d353b83f625a.tar.bz2
gpt4free-c617b18d12c2f9d82ce7c73aae46d353b83f625a.tar.lz
gpt4free-c617b18d12c2f9d82ce7c73aae46d353b83f625a.tar.xz
gpt4free-c617b18d12c2f9d82ce7c73aae46d353b83f625a.tar.zst
gpt4free-c617b18d12c2f9d82ce7c73aae46d353b83f625a.zip
Diffstat (limited to 'g4f/gui')
-rw-r--r--g4f/gui/client/css/style.css7
-rw-r--r--g4f/gui/client/js/chat.v1.js63
-rw-r--r--g4f/gui/server/backend.py32
3 files changed, 64 insertions, 38 deletions
diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css
index b6d73650..e619b409 100644
--- a/g4f/gui/client/css/style.css
+++ b/g4f/gui/client/css/style.css
@@ -295,11 +295,12 @@ body {
gap: 18px;
}
-.message .content p,
-.message .content li,
-.message .content code {
+.message .content,
+.message .content a:link,
+.message .content a:visited{
font-size: 15px;
line-height: 1.3;
+ color: var(--colour-3);
}
.message .content pre {
white-space: pre-wrap;
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js
index 644ff77a..638ce0ac 100644
--- a/g4f/gui/client/js/chat.v1.js
+++ b/g4f/gui/client/js/chat.v1.js
@@ -73,7 +73,7 @@ const ask_gpt = async () => {
provider = document.getElementById("provider");
model = document.getElementById("model");
prompt_lock = true;
- window.text = ``;
+ window.text = '';
stop_generating.classList.remove(`stop_generating-hidden`);
@@ -88,10 +88,13 @@ const ask_gpt = async () => {
${gpt_image} <i class="fa-regular fa-phone-arrow-down-left"></i>
</div>
<div class="content" id="gpt_${window.token}">
- <div id="cursor"></div>
+ <div class="provider"></div>
+ <div class="content_inner"><div id="cursor"></div></div>
</div>
</div>
`;
+ content = document.getElementById(`gpt_${window.token}`);
+ content_inner = content.querySelector('.content_inner');
message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0);
@@ -123,28 +126,38 @@ const ask_gpt = async () => {
await new Promise((r) => setTimeout(r, 1000));
window.scrollTo(0, 0);
- const reader = response.body.getReader();
+ const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+
+ error = provider = null;
while (true) {
const { value, done } = await reader.read();
if (done) break;
-
- chunk = new TextDecoder().decode(value);
-
- text += chunk;
-
- document.getElementById(`gpt_${window.token}`).innerHTML = markdown_render(text);
- document.querySelectorAll(`code`).forEach((el) => {
- hljs.highlightElement(el);
- });
+ for (const line of value.split("\n")) {
+ if (!line) continue;
+ const message = JSON.parse(line);
+ if (message["type"] == "content") {
+ text += message["content"];
+ } else if (message["type"] == "provider") {
+ provider = message["provider"];
+ content.querySelector('.provider').innerHTML =
+ '<a href="' + provider.url + '" target="_blank">' + provider.name + "</a>"
+ } else if (message["type"] == "error") {
+ error = message["error"];
+ }
+ }
+ if (error) {
+ console.error(error);
+ content_inner.innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
+ } else {
+ content_inner.innerHTML = markdown_render(text);
+ document.querySelectorAll('code').forEach((el) => {
+ hljs.highlightElement(el);
+ });
+ }
window.scrollTo(0, 0);
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
}
-
- if (text.includes(`G4F_ERROR`)) {
- console.log("response", text);
- document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
- }
} catch (e) {
console.log(e);
@@ -153,13 +166,13 @@ const ask_gpt = async () => {
if (e.name != `AbortError`) {
text = `oops ! something went wrong, please try again / reload. [stacktrace in console]`;
- document.getElementById(`gpt_${window.token}`).innerHTML = text;
+ content_inner.innerHTML = text;
} else {
- document.getElementById(`gpt_${window.token}`).innerHTML += ` [aborted]`;
+ content_inner.innerHTML += ` [aborted]`;
text += ` [aborted]`
}
}
- add_message(window.conversation_id, "assistant", text);
+ add_message(window.conversation_id, "assistant", text, provider);
message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button();
prompt_lock = false;
@@ -259,10 +272,11 @@ const load_conversation = async (conversation_id) => {
}
</div>
<div class="content">
- ${item.role == "assistant"
- ? markdown_render(item.content)
- : item.content
+ ${item.provider
+ ? '<div class="provider"><a href="' + item.provider.url + '" target="_blank">' + item.provider.name + '</a></div>'
+ : ''
}
+ <div class="content_inner">${markdown_render(item.content)}</div>
</div>
</div>
`;
@@ -323,12 +337,13 @@ const remove_last_message = async (conversation_id) => {
);
};
-const add_message = async (conversation_id, role, content) => {
+const add_message = async (conversation_id, role, content, provider) => {
const conversation = await get_conversation(conversation_id);
conversation.items.push({
role: role,
content: content,
+ provider: provider
});
localStorage.setItem(
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 105edb43..1aa506b2 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -4,7 +4,7 @@ from g4f.Provider import __providers__
import json
from flask import request, Flask
from .internet import get_search_message
-from g4f import debug
+from g4f import debug, version
debug.logging = True
@@ -53,8 +53,8 @@ class Backend_Api:
def version(self):
return {
- "version": debug.get_version(),
- "lastet_version": debug.get_latest_version(),
+ "version": version.utils.current_version,
+ "lastet_version": version.utils.latest_version,
}
def _gen_title(self):
@@ -65,7 +65,7 @@ class Backend_Api:
def _conversation(self):
#jailbreak = request.json['jailbreak']
messages = request.json['meta']['content']['parts']
- if request.json['internet_access']:
+ if request.json.get('internet_access'):
messages[-1]["content"] = get_search_message(messages[-1]["content"])
model = request.json.get('model')
model = model if model else g4f.models.default
@@ -74,20 +74,30 @@ class Backend_Api:
def try_response():
try:
- yield from g4f.ChatCompletion.create(
+ first = True
+ for chunk in g4f.ChatCompletion.create(
model=model,
provider=provider,
messages=messages,
stream=True,
ignore_stream_and_auth=True
- )
+ ):
+ if first:
+ first = False
+ yield json.dumps({
+ 'type' : 'provider',
+ 'provider': g4f.get_last_provider(True)
+ }) + "\n"
+ yield json.dumps({
+ 'type' : 'content',
+ 'content': chunk,
+ }) + "\n"
+
except Exception as e:
- print(e)
yield json.dumps({
- 'code' : 'G4F_ERROR',
- '_action': '_ask',
- 'success': False,
- 'error' : f'{e.__class__.__name__}: {e}'
+ 'type' : 'error',
+ 'error': f'{e.__class__.__name__}: {e}'
})
+ raise e
return self.app.response_class(try_response(), mimetype='text/event-stream') \ No newline at end of file