diff options
Diffstat (limited to 'g4f/gui')
-rw-r--r-- | g4f/gui/client/css/style.css | 13 | ||||
-rw-r--r-- | g4f/gui/client/html/index.html | 24 | ||||
-rw-r--r-- | g4f/gui/client/js/chat.v1.js | 80 | ||||
-rw-r--r-- | g4f/gui/server/backend.py | 211 |
4 files changed, 238 insertions, 90 deletions
diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css index 2d4c9857..e77410ab 100644 --- a/g4f/gui/client/css/style.css +++ b/g4f/gui/client/css/style.css @@ -404,7 +404,7 @@ body { display: none; } -#image { +#image, #file { display: none; } @@ -412,13 +412,22 @@ label[for="image"]:has(> input:valid){ color: var(--accent); } -label[for="image"] { +label[for="file"]:has(> input:valid){ + color: var(--accent); +} + +label[for="image"], label[for="file"] { cursor: pointer; position: absolute; top: 10px; left: 10px; } +label[for="file"] { + top: 32px; + left: 10px; +} + .buttons input[type="checkbox"] { height: 0; width: 0; diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html index 3f2bb0c0..95489ba4 100644 --- a/g4f/gui/client/html/index.html +++ b/g4f/gui/client/html/index.html @@ -118,6 +118,10 @@ <input type="file" id="image" name="image" accept="image/png, image/gif, image/jpeg" required/> <i class="fa-regular fa-image"></i> </label> + <label for="file"> + <input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .svg, .log, .csv, .twig, .md" required/> + <i class="fa-solid fa-paperclip"></i> + </label> <div id="send-button"> <i class="fa-solid fa-paper-plane-top"></i> </div> @@ -125,7 +129,14 @@ </div> <div class="buttons"> <div class="field"> - <select name="model" id="model"></select> + <select name="model" id="model"> + <option value="">Model: Default</option> + <option value="gpt-4">gpt-4</option> + <option value="gpt-3.5-turbo">gpt-3.5-turbo</option> + <option value="llama2-70b">llama2-70b</option> + <option value="gemini-pro">gemini-pro</option> + <option value="">----</option> + </select> </div> <div class="field"> <select name="jailbreak" id="jailbreak" style="display: none;"> @@ -138,7 +149,16 @@ <option value="gpt-evil-1.0">evil 1.0</option> </select> <div class="field"> - <select name="provider" id="provider"></select> + <select name="provider" id="provider"> + <option value="">Provider: Auto</option> + <option value="Bing">Bing</option> + <option value="OpenaiChat">OpenaiChat</option> + <option value="HuggingChat">HuggingChat</option> + <option value="Bard">Bard</option> + <option value="Liaobots">Liaobots</option> + <option value="Phind">Phind</option> + <option value="">----</option> + </select> </div> </div> <div class="field"> diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index ccc9461b..8b9bc181 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -7,7 +7,9 @@ const spinner = box_conversations.querySelector(".spinner"); const stop_generating = document.querySelector(`.stop_generating`); const regenerate = document.querySelector(`.regenerate`); const send_button = document.querySelector(`#send-button`); -const imageInput = document.querySelector('#image') ; +const imageInput = document.querySelector('#image'); +const fileInput = document.querySelector('#file'); + let prompt_lock = false; hljs.addPlugin(new CopyButtonPlugin()); @@ -42,6 +44,11 @@ const handle_ask = async () => { if (message.length > 0) { message_input.value = ''; await add_conversation(window.conversation_id, message); + if ("text" in fileInput.dataset) { + message += '\n```' + fileInput.dataset.type + '\n'; + message += fileInput.dataset.text; + message += '\n```' + } await add_message(window.conversation_id, "user", message); window.token = message_id(); message_box.innerHTML += ` @@ -55,6 +62,9 @@ const handle_ask = async () => { </div> </div> `; + document.querySelectorAll('code:not(.hljs').forEach((el) => { + hljs.highlightElement(el); + }); await ask_gpt(); } }; @@ -171,17 +181,30 @@ const ask_gpt = async () => { content_inner.innerHTML += "<p>An error occured, please try again, if the problem persists, please use a other model or provider.</p>"; } else { html = markdown_render(text); - html = html.substring(0, html.lastIndexOf('</p>')) + '<span id="cursor"></span></p>'; + let lastElement, lastIndex = null; + for (element of ['</p>', '</code></pre>', '</li>\n</ol>']) { + const index = html.lastIndexOf(element) + if (index > lastIndex) { + lastElement = element; + lastIndex = index; + } + } + if (lastIndex) { + html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement; + } content_inner.innerHTML = html; - document.querySelectorAll('code').forEach((el) => { + document.querySelectorAll('code:not(.hljs').forEach((el) => { hljs.highlightElement(el); }); } window.scrollTo(0, 0); - message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" }); + if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) { + message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" }); + } } if (!error && imageInput) imageInput.value = ""; + if (!error && fileInput) fileInput.value = ""; } catch (e) { console.error(e); @@ -305,7 +328,7 @@ const load_conversation = async (conversation_id) => { `; } - document.querySelectorAll(`code`).forEach((el) => { + document.querySelectorAll('code:not(.hljs').forEach((el) => { hljs.highlightElement(el); }); @@ -400,7 +423,7 @@ const load_conversations = async (limit, offset, loader) => { `; } - document.querySelectorAll(`code`).forEach((el) => { + document.querySelectorAll('code:not(.hljs').forEach((el) => { hljs.highlightElement(el); }); }; @@ -602,14 +625,7 @@ observer.observe(message_input, { attributes: true }); (async () => { response = await fetch('/backend-api/v2/models') models = await response.json() - let select = document.getElementById('model'); - select.textContent = ''; - - let auto = document.createElement('option'); - auto.value = ''; - auto.text = 'Model: Default'; - select.appendChild(auto); for (model of models) { let option = document.createElement('option'); @@ -619,14 +635,7 @@ observer.observe(message_input, { attributes: true }); response = await fetch('/backend-api/v2/providers') providers = await response.json() - select = document.getElementById('provider'); - select.textContent = ''; - - auto = document.createElement('option'); - auto.value = ''; - auto.text = 'Provider: Auto'; - select.appendChild(auto); for (provider of providers) { let option = document.createElement('option'); @@ -643,11 +652,34 @@ observer.observe(message_input, { attributes: true }); document.title = 'g4f - gui - ' + versions["version"]; text = "version ~ " - if (versions["version"] != versions["lastet_version"]) { - release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["lastet_version"]; - text += '<a href="' + release_url +'" target="_blank" title="New version: ' + versions["lastet_version"] +'">' + versions["version"] + ' 🆕</a>'; + if (versions["version"] != versions["latest_version"]) { + release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"]; + text += '<a href="' + release_url +'" target="_blank" title="New version: ' + versions["latest_version"] +'">' + versions["version"] + ' 🆕</a>'; } else { text += versions["version"]; } document.getElementById("version_text").innerHTML = text -})()
\ No newline at end of file +})() + +fileInput.addEventListener('change', async (event) => { + if (fileInput.files.length) { + type = fileInput.files[0].type; + if (type && type.indexOf('/')) { + type = type.split('/').pop().replace('x-', '') + type = type.replace('plain', 'plaintext') + .replace('shellscript', 'sh') + .replace('svg+xml', 'svg') + .replace('vnd.trolltech.linguist', 'ts') + } else { + type = fileInput.files[0].name.split('.').pop() + } + fileInput.dataset.type = type + const reader = new FileReader(); + reader.addEventListener('load', (event) => { + fileInput.dataset.text = event.target.result; + }); + reader.readAsText(fileInput.files[0]); + } else { + delete fileInput.dataset.text; + } +});
\ No newline at end of file diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 9d12bea5..4a5cafa8 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -1,6 +1,7 @@ import logging import json from flask import request, Flask +from typing import Generator from g4f import debug, version, models from g4f import _all_models, get_last_provider, ChatCompletion from g4f.image import is_allowed_extension, to_image @@ -11,60 +12,123 @@ from .internet import get_search_message debug.logging = True class Backend_Api: + """ + Handles various endpoints in a Flask application for backend operations. + + This class provides methods to interact with models, providers, and to handle + various functionalities like conversations, error handling, and version management. + + Attributes: + app (Flask): A Flask application instance. + routes (dict): A dictionary mapping API endpoints to their respective handlers. + """ def __init__(self, app: Flask) -> None: + """ + Initialize the backend API with the given Flask application. + + Args: + app (Flask): Flask application instance to attach routes to. + """ self.app: Flask = app self.routes = { '/backend-api/v2/models': { - 'function': self.models, - 'methods' : ['GET'] + 'function': self.get_models, + 'methods': ['GET'] }, '/backend-api/v2/providers': { - 'function': self.providers, - 'methods' : ['GET'] + 'function': self.get_providers, + 'methods': ['GET'] }, '/backend-api/v2/version': { - 'function': self.version, - 'methods' : ['GET'] + 'function': self.get_version, + 'methods': ['GET'] }, '/backend-api/v2/conversation': { - 'function': self._conversation, + 'function': self.handle_conversation, 'methods': ['POST'] }, '/backend-api/v2/gen.set.summarize:title': { - 'function': self._gen_title, + 'function': self.generate_title, 'methods': ['POST'] }, '/backend-api/v2/error': { - 'function': self.error, + 'function': self.handle_error, 'methods': ['POST'] } } - def error(self): + def handle_error(self): + """ + Initialize the backend API with the given Flask application. + + Args: + app (Flask): Flask application instance to attach routes to. + """ print(request.json) - return 'ok', 200 - def models(self): + def get_models(self): + """ + Return a list of all models. + + Fetches and returns a list of all available models in the system. + + Returns: + List[str]: A list of model names. + """ return _all_models - def providers(self): - return [ - provider.__name__ for provider in __providers__ if provider.working - ] + def get_providers(self): + """ + Return a list of all working providers. + """ + return [provider.__name__ for provider in __providers__ if provider.working] - def version(self): + def get_version(self): + """ + Returns the current and latest version of the application. + + Returns: + dict: A dictionary containing the current and latest version. + """ return { "version": version.utils.current_version, - "lastet_version": version.get_latest_version(), + "latest_version": version.get_latest_version(), } - def _gen_title(self): - return { - 'title': '' - } + def generate_title(self): + """ + Generates and returns a title based on the request data. + + Returns: + dict: A dictionary with the generated title. + """ + return {'title': ''} - def _conversation(self): + def handle_conversation(self): + """ + Handles conversation requests and streams responses back. + + Returns: + Response: A Flask response object for streaming. + """ + kwargs = self._prepare_conversation_kwargs() + + return self.app.response_class( + self._create_response_stream(kwargs), + mimetype='text/event-stream' + ) + + def _prepare_conversation_kwargs(self): + """ + Prepares arguments for chat completion based on the request data. + + Reads the request and prepares the necessary arguments for handling + a chat completion request. + + Returns: + dict: Arguments prepared for chat completion. + """ kwargs = {} if 'image' in request.files: file = request.files['image'] @@ -87,47 +151,70 @@ class Backend_Api: messages[-1]["content"] = get_search_message(messages[-1]["content"]) model = json_data.get('model') model = model if model else models.default - provider = json_data.get('provider', '').replace('g4f.Provider.', '') - provider = provider if provider and provider != "Auto" else None patch = patch_provider if json_data.get('patch_provider') else None - def try_response(): - try: - first = True - for chunk in ChatCompletion.create( - model=model, - provider=provider, - messages=messages, - stream=True, - ignore_stream_and_auth=True, - patch_provider=patch, - **kwargs - ): - if first: - first = False - yield json.dumps({ - 'type' : 'provider', - 'provider': get_last_provider(True) - }) + "\n" - if isinstance(chunk, Exception): - logging.exception(chunk) - yield json.dumps({ - 'type' : 'message', - 'message': get_error_message(chunk), - }) + "\n" - else: - yield json.dumps({ - 'type' : 'content', - 'content': str(chunk), - }) + "\n" - except Exception as e: - logging.exception(e) - yield json.dumps({ - 'type' : 'error', - 'error': get_error_message(e) - }) - - return self.app.response_class(try_response(), mimetype='text/event-stream') + return { + "model": model, + "provider": provider, + "messages": messages, + "stream": True, + "ignore_stream_and_auth": True, + "patch_provider": patch, + **kwargs + } + + def _create_response_stream(self, kwargs) -> Generator[str, None, None]: + """ + Creates and returns a streaming response for the conversation. + + Args: + kwargs (dict): Arguments for creating the chat completion. + + Yields: + str: JSON formatted response chunks for the stream. + + Raises: + Exception: If an error occurs during the streaming process. + """ + try: + first = True + for chunk in ChatCompletion.create(**kwargs): + if first: + first = False + yield self._format_json('provider', get_last_provider(True)) + if isinstance(chunk, Exception): + logging.exception(chunk) + yield self._format_json('message', get_error_message(chunk)) + else: + yield self._format_json('content', str(chunk)) + except Exception as e: + logging.exception(e) + yield self._format_json('error', get_error_message(e)) + + def _format_json(self, response_type: str, content) -> str: + """ + Formats and returns a JSON response. + + Args: + response_type (str): The type of the response. + content: The content to be included in the response. + + Returns: + str: A JSON formatted string. + """ + return json.dumps({ + 'type': response_type, + response_type: content + }) + "\n" def get_error_message(exception: Exception) -> str: + """ + Generates a formatted error message from an exception. + + Args: + exception (Exception): The exception to format. + + Returns: + str: A formatted error message string. + """ return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}"
\ No newline at end of file |