diff options
author | H Lohaus <hlohaus@users.noreply.github.com> | 2024-03-16 18:22:26 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-16 18:22:26 +0100 |
commit | fb2061da48525edab9cd993205bb5e30c386aa1a (patch) | |
tree | 1e740bd6955dfd27b9a4d773df07234ed9e5c75e /g4f/gui/server | |
parent | Merge pull request #1694 from ComRSMaster/main (diff) | |
parent | Add conversation support for Bing (diff) | |
download | gpt4free-0.2.5.0.tar gpt4free-0.2.5.0.tar.gz gpt4free-0.2.5.0.tar.bz2 gpt4free-0.2.5.0.tar.lz gpt4free-0.2.5.0.tar.xz gpt4free-0.2.5.0.tar.zst gpt4free-0.2.5.0.zip |
Diffstat (limited to 'g4f/gui/server')
-rw-r--r-- | g4f/gui/server/api.py | 185 | ||||
-rw-r--r-- | g4f/gui/server/app.py | 8 | ||||
-rw-r--r-- | g4f/gui/server/backend.py | 163 | ||||
-rw-r--r-- | g4f/gui/server/website.py | 20 |
4 files changed, 217 insertions, 159 deletions
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py new file mode 100644 index 00000000..966319e4 --- /dev/null +++ b/g4f/gui/server/api.py @@ -0,0 +1,185 @@ +import logging +import json +from typing import Iterator + +try: + import webview +except ImportError: + ... + +from g4f import version, models +from g4f import get_last_provider, ChatCompletion +from g4f.errors import VersionNotFoundError +from g4f.Provider import ProviderType, __providers__, __map__ +from g4f.providers.base_provider import ProviderModelMixin +from g4f.Provider.bing.create_images import patch_provider +from g4f.Provider.Bing import Conversation + +conversations: dict[str, Conversation] = {} + +class Api(): + + def get_models(self) -> list[str]: + """ + Return a list of all models. + + Fetches and returns a list of all available models in the system. + + Returns: + List[str]: A list of model names. + """ + return models._all_models + + def get_provider_models(self, provider: str) -> list[dict]: + if provider in __map__: + provider: ProviderType = __map__[provider] + if issubclass(provider, ProviderModelMixin): + return [{"model": model, "default": model == provider.default_model} for model in provider.get_models()] + elif provider.supports_gpt_35_turbo or provider.supports_gpt_4: + return [ + *([{"model": "gpt-4", "default": not provider.supports_gpt_4}] if provider.supports_gpt_4 else []), + *([{"model": "gpt-3.5-turbo", "default": not provider.supports_gpt_4}] if provider.supports_gpt_35_turbo else []) + ] + else: + return []; + + def get_providers(self) -> list[str]: + """ + Return a list of all working providers. + """ + return [provider.__name__ for provider in __providers__ if provider.working] + + def get_version(self): + """ + Returns the current and latest version of the application. + + Returns: + dict: A dictionary containing the current and latest version. + """ + try: + current_version = version.utils.current_version + except VersionNotFoundError: + current_version = None + return { + "version": current_version, + "latest_version": version.utils.latest_version, + } + + def generate_title(self): + """ + Generates and returns a title based on the request data. + + Returns: + dict: A dictionary with the generated title. + """ + return {'title': ''} + + def get_conversation(self, options: dict, **kwargs) -> Iterator: + window = webview.active_window() + for message in self._create_response_stream( + self._prepare_conversation_kwargs(options, kwargs), + options.get("conversation_id") + ): + window.evaluate_js(f"this.add_message_chunk({json.dumps(message)})") + + def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict): + """ + Prepares arguments for chat completion based on the request data. + + Reads the request and prepares the necessary arguments for handling + a chat completion request. + + Returns: + dict: Arguments prepared for chat completion. + """ + provider = json_data.get('provider', None) + if "image" in kwargs and provider is None: + provider = "Bing" + if provider == 'OpenaiChat': + kwargs['auto_continue'] = True + + messages = json_data['messages'] + if json_data.get('web_search'): + if provider == "Bing": + kwargs['web_search'] = True + else: + from .internet import get_search_message + messages[-1]["content"] = get_search_message(messages[-1]["content"]) + + conversation_id = json_data.get("conversation_id") + if conversation_id and conversation_id in conversations: + kwargs["conversation"] = conversations[conversation_id] + + model = json_data.get('model') + model = model if model else models.default + patch = patch_provider if json_data.get('patch_provider') else None + + return { + "model": model, + "provider": provider, + "messages": messages, + "stream": True, + "ignore_stream": True, + "patch_provider": patch, + "return_conversation": True, + **kwargs + } + + def _create_response_stream(self, kwargs, conversation_id: str) -> Iterator: + """ + Creates and returns a streaming response for the conversation. + + Args: + kwargs (dict): Arguments for creating the chat completion. + + Yields: + str: JSON formatted response chunks for the stream. + + Raises: + Exception: If an error occurs during the streaming process. + """ + try: + first = True + for chunk in ChatCompletion.create(**kwargs): + if first: + first = False + yield self._format_json("provider", get_last_provider(True)) + if isinstance(chunk, Conversation): + conversations[conversation_id] = chunk + yield self._format_json("conversation", conversation_id) + elif isinstance(chunk, Exception): + logging.exception(chunk) + yield self._format_json("message", get_error_message(chunk)) + else: + yield self._format_json("content", chunk) + except Exception as e: + logging.exception(e) + yield self._format_json('error', get_error_message(e)) + + def _format_json(self, response_type: str, content): + """ + Formats and returns a JSON response. + + Args: + response_type (str): The type of the response. + content: The content to be included in the response. + + Returns: + str: A JSON formatted string. + """ + return { + 'type': response_type, + response_type: content + } + +def get_error_message(exception: Exception) -> str: + """ + Generates a formatted error message from an exception. + + Args: + exception (Exception): The exception to format. + + Returns: + str: A formatted error message string. + """ + return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}"
\ No newline at end of file diff --git a/g4f/gui/server/app.py b/g4f/gui/server/app.py index 2b55698c..869d3880 100644 --- a/g4f/gui/server/app.py +++ b/g4f/gui/server/app.py @@ -1,3 +1,9 @@ +import sys, os from flask import Flask -app = Flask(__name__, template_folder='./../client/html')
\ No newline at end of file +if getattr(sys, 'frozen', False): + template_folder = os.path.join(sys._MEIPASS, "client") +else: + template_folder = "../client" + +app = Flask(__name__, template_folder=template_folder, static_folder=f"{template_folder}/static")
\ No newline at end of file diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 454ed1c6..fb8404d4 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -1,15 +1,9 @@ -import logging import json from flask import request, Flask -from typing import Generator -from g4f import version, models -from g4f import get_last_provider, ChatCompletion from g4f.image import is_allowed_extension, to_image -from g4f.errors import VersionNotFoundError -from g4f.Provider import __providers__ -from g4f.Provider.bing.create_images import patch_provider +from .api import Api -class Backend_Api: +class Backend_Api(Api): """ Handles various endpoints in a Flask application for backend operations. @@ -33,6 +27,10 @@ class Backend_Api: 'function': self.get_models, 'methods': ['GET'] }, + '/backend-api/v2/models/<provider>': { + 'function': self.get_provider_models, + 'methods': ['GET'] + }, '/backend-api/v2/providers': { 'function': self.get_providers, 'methods': ['GET'] @@ -54,7 +52,7 @@ class Backend_Api: 'methods': ['POST'] } } - + def handle_error(self): """ Initialize the backend API with the given Flask application. @@ -64,49 +62,7 @@ class Backend_Api: """ print(request.json) return 'ok', 200 - - def get_models(self): - """ - Return a list of all models. - - Fetches and returns a list of all available models in the system. - - Returns: - List[str]: A list of model names. - """ - return models._all_models - - def get_providers(self): - """ - Return a list of all working providers. - """ - return [provider.__name__ for provider in __providers__ if provider.working] - - def get_version(self): - """ - Returns the current and latest version of the application. - - Returns: - dict: A dictionary containing the current and latest version. - """ - try: - current_version = version.utils.current_version - except VersionNotFoundError: - current_version = None - return { - "version": current_version, - "latest_version": version.utils.latest_version, - } - - def generate_title(self): - """ - Generates and returns a title based on the request data. - Returns: - dict: A dictionary with the generated title. - """ - return {'title': ''} - def handle_conversation(self): """ Handles conversation requests and streams responses back. @@ -114,26 +70,10 @@ class Backend_Api: Returns: Response: A Flask response object for streaming. """ - kwargs = self._prepare_conversation_kwargs() - - return self.app.response_class( - self._create_response_stream(kwargs), - mimetype='text/event-stream' - ) - - def _prepare_conversation_kwargs(self): - """ - Prepares arguments for chat completion based on the request data. - - Reads the request and prepares the necessary arguments for handling - a chat completion request. - - Returns: - dict: Arguments prepared for chat completion. - """ + kwargs = {} - if "image" in request.files: - file = request.files['image'] + if "file" in request.files: + file = request.files['file'] if file.filename != '' and is_allowed_extension(file.filename): kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg')) kwargs['image_name'] = file.filename @@ -141,66 +81,20 @@ class Backend_Api: json_data = json.loads(request.form['json']) else: json_data = request.json - - provider = json_data.get('provider', '').replace('g4f.Provider.', '') - provider = provider if provider and provider != "Auto" else None - - if "image" in kwargs and not provider: - provider = "Bing" - if provider == 'OpenaiChat': - kwargs['auto_continue'] = True - - messages = json_data['messages'] - if json_data.get('web_search'): - if provider == "Bing": - kwargs['web_search'] = True - else: - # ResourceWarning: unclosed event loop - from .internet import get_search_message - messages[-1]["content"] = get_search_message(messages[-1]["content"]) - - model = json_data.get('model') - model = model if model else models.default - patch = patch_provider if json_data.get('patch_provider') else None - - return { - "model": model, - "provider": provider, - "messages": messages, - "stream": True, - "ignore_stream": True, - "patch_provider": patch, - **kwargs - } - def _create_response_stream(self, kwargs) -> Generator[str, None, None]: - """ - Creates and returns a streaming response for the conversation. + kwargs = self._prepare_conversation_kwargs(json_data, kwargs) - Args: - kwargs (dict): Arguments for creating the chat completion. + return self.app.response_class( + self._create_response_stream(kwargs, json_data.get("conversation_id")), + mimetype='text/event-stream' + ) - Yields: - str: JSON formatted response chunks for the stream. + def get_provider_models(self, provider: str): + models = super().get_provider_models(provider) + if models is None: + return 404, "Provider not found" + return models - Raises: - Exception: If an error occurs during the streaming process. - """ - try: - first = True - for chunk in ChatCompletion.create(**kwargs): - if first: - first = False - yield self._format_json('provider', get_last_provider(True)) - if isinstance(chunk, Exception): - logging.exception(chunk) - yield self._format_json('message', get_error_message(chunk)) - else: - yield self._format_json('content', str(chunk)) - except Exception as e: - logging.exception(e) - yield self._format_json('error', get_error_message(e)) - def _format_json(self, response_type: str, content) -> str: """ Formats and returns a JSON response. @@ -212,19 +106,4 @@ class Backend_Api: Returns: str: A JSON formatted string. """ - return json.dumps({ - 'type': response_type, - response_type: content - }) + "\n" - -def get_error_message(exception: Exception) -> str: - """ - Generates a formatted error message from an exception. - - Args: - exception (Exception): The exception to format. - - Returns: - str: A formatted error message string. - """ - return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}"
\ No newline at end of file + return json.dumps(super()._format_json(response_type, content)) + "\n"
\ No newline at end of file diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py index 2705664d..4e611177 100644 --- a/g4f/gui/server/website.py +++ b/g4f/gui/server/website.py @@ -1,6 +1,5 @@ -from flask import render_template, send_file, redirect -from time import time -from os import urandom +import uuid +from flask import render_template, redirect class Website: def __init__(self, app) -> None: @@ -18,23 +17,12 @@ class Website: 'function': self._chat, 'methods': ['GET', 'POST'] }, - '/assets/<folder>/<file>': { - 'function': self._assets, - 'methods': ['GET', 'POST'] - } } def _chat(self, conversation_id): if '-' not in conversation_id: return redirect('/chat') - - return render_template('index.html', chat_id = conversation_id) + return render_template('index.html', chat_id=conversation_id) def _index(self): - return render_template('index.html', chat_id = f'{urandom(4).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{hex(int(time() * 1000))[2:]}') - - def _assets(self, folder: str, file: str): - try: - return send_file(f"./../client/{folder}/{file}", as_attachment=False) - except: - return "File not found", 404
\ No newline at end of file + return render_template('index.html', chat_id=str(uuid.uuid4()))
\ No newline at end of file |