From 0a3565f215a15dff9169ee8d619e0caa3a65f464 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 19:39:12 +0300 Subject: refactor(g4f/api/__init__.py): refactor API structure and improve async handling --- g4f/api/__init__.py | 147 +++++++++++++++++++++++++++++----------------------- 1 file changed, 83 insertions(+), 64 deletions(-) (limited to 'g4f') diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 83df469a..754a48f1 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -14,17 +14,18 @@ from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY, HTTP_401_UNAUTHORIZE from fastapi.encoders import jsonable_encoder from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel -from typing import Union, Optional +from typing import Union, Optional, Iterator import g4f import g4f.debug -from g4f.client import Client +from g4f.client import Client, ChatCompletion, ChatCompletionChunk, ImagesResponse from g4f.typing import Messages from g4f.cookies import read_cookie_files -def create_app(): +def create_app(g4f_api_key: str = None): app = FastAPI() - api = Api(app) + + # Add CORS middleware app.add_middleware( CORSMiddleware, allow_origin_regex=".*", @@ -32,18 +33,19 @@ def create_app(): allow_methods=["*"], allow_headers=["*"], ) + + api = Api(app, g4f_api_key=g4f_api_key) api.register_routes() api.register_authorization() api.register_validation_exception_handler() + + # Read cookie files if not ignored if not AppConfig.ignore_cookie_files: read_cookie_files() - return app -def create_app_debug(): - g4f.debug.logging = True - return create_app() + return app -class ChatCompletionsForm(BaseModel): +class ChatCompletionsConfig(BaseModel): messages: Messages model: str provider: Optional[str] = None @@ -55,15 +57,12 @@ class ChatCompletionsForm(BaseModel): web_search: Optional[bool] = None proxy: Optional[str] = None -class ImagesGenerateForm(BaseModel): - model: Optional[str] = None - provider: Optional[str] = None +class ImageGenerationConfig(BaseModel): prompt: str - response_format: Optional[str] = None - api_key: Optional[str] = None - proxy: Optional[str] = None + model: Optional[str] = None + response_format: str = "url" -class AppConfig(): +class AppConfig: ignored_providers: Optional[list[str]] = None g4f_api_key: Optional[str] = None ignore_cookie_files: bool = False @@ -74,16 +73,23 @@ class AppConfig(): for key, value in data.items(): setattr(cls, key, value) +list_ignored_providers: list[str] = None + +def set_list_ignored_providers(ignored: list[str]): + global list_ignored_providers + list_ignored_providers = ignored + class Api: - def __init__(self, app: FastAPI) -> None: + def __init__(self, app: FastAPI, g4f_api_key=None) -> None: self.app = app self.client = Client() + self.g4f_api_key = g4f_api_key self.get_g4f_api_key = APIKeyHeader(name="g4f-api-key") def register_authorization(self): @self.app.middleware("http") async def authorization(request: Request, call_next): - if AppConfig.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions"]: + if self.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions", "/v1/images/generate"]: try: user_g4f_api_key = await self.get_g4f_api_key(request) except HTTPException as e: @@ -92,22 +98,26 @@ class Api: status_code=HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"detail": "G4F API key required"}), ) - if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): + if not secrets.compare_digest(self.g4f_api_key, user_g4f_api_key): return JSONResponse( status_code=HTTP_403_FORBIDDEN, content=jsonable_encoder({"detail": "Invalid G4F API key"}), ) - return await call_next(request) + + response = await call_next(request) + return response def register_validation_exception_handler(self): @self.app.exception_handler(RequestValidationError) async def validation_exception_handler(request: Request, exc: RequestValidationError): details = exc.errors() - modified_details = [{ - "loc": error["loc"], - "message": error["msg"], - "type": error["type"], - } for error in details] + modified_details = [] + for error in details: + modified_details.append({ + "loc": error["loc"], + "message": error["msg"], + "type": error["type"], + }) return JSONResponse( status_code=HTTP_422_UNPROCESSABLE_ENTITY, content=jsonable_encoder({"detail": modified_details}), @@ -121,25 +131,23 @@ class Api: @self.app.get("/v1") async def read_root_v1(): return HTMLResponse('g4f API: Go to ' - 'chat/completions ' - 'or models.') + 'chat/completions, ' + 'models, or ' + 'images/generate.') @self.app.get("/v1/models") async def models(): - model_list = { - model: g4f.models.ModelUtils.convert[model] + model_list = dict( + (model, g4f.models.ModelUtils.convert[model]) for model in g4f.Model.__all__() - } + ) model_list = [{ 'id': model_id, 'object': 'model', 'created': 0, 'owned_by': model.base_provider } for model_id, model in model_list.items()] - return JSONResponse({ - "object": "list", - "data": model_list, - }) + return JSONResponse(model_list) @self.app.get("/v1/models/{model_name}") async def model_info(model_name: str): @@ -155,7 +163,7 @@ class Api: return JSONResponse({"error": "The model does not exist."}) @self.app.post("/v1/chat/completions") - async def chat_completions(config: ChatCompletionsForm, request: Request = None, provider: str = None): + async def chat_completions(config: ChatCompletionsConfig, request: Request = None, provider: str = None): try: config.provider = provider if config.provider is None else config.provider if config.api_key is None and request is not None: @@ -164,17 +172,27 @@ class Api: auth_header = auth_header.split(None, 1)[-1] if auth_header and auth_header != "Bearer": config.api_key = auth_header - # Use the asynchronous create method and await it - response = await self.client.chat.completions.async_create( + + # Create the completion response + response = self.client.chat.completions.create( **{ **AppConfig.defaults, **config.dict(exclude_none=True), }, ignored=AppConfig.ignored_providers ) - if not config.stream: + + # Check if the response is synchronous or asynchronous + if isinstance(response, ChatCompletion): + # Synchronous response return JSONResponse(response.to_json()) + if not config.stream: + # If the response is an iterator but not streaming, collect the result + response_list = list(response) if isinstance(response, Iterator) else [response] + return JSONResponse(response_list[0].to_json()) + + # Streaming response async def streaming(): try: async for chunk in response: @@ -185,41 +203,38 @@ class Api: logging.exception(e) yield f'data: {format_exception(e, config)}\n\n' yield "data: [DONE]\n\n" + return StreamingResponse(streaming(), media_type="text/event-stream") except Exception as e: logging.exception(e) return Response(content=format_exception(e, config), status_code=500, media_type="application/json") - @self.app.post("/v1/completions") - async def completions(): - return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") - - @self.app.post("/v1/images/generations") - async def images_generate(config: ImagesGenerateForm, request: Request = None, provider: str = None): + @self.app.post("/v1/images/generate") + async def generate_image(config: ImageGenerationConfig): try: - config.provider = provider if config.provider is None else config.provider - if config.api_key is None and request is not None: - auth_header = request.headers.get("Authorization") - if auth_header is not None: - auth_header = auth_header.split(None, 1)[-1] - if auth_header and auth_header != "Bearer": - config.api_key = auth_header - # Use the asynchronous generate method and await it - response = await self.client.images.async_generate( - **config.dict(exclude_none=True), + response: ImagesResponse = await self.client.images.async_generate( + prompt=config.prompt, + model=config.model, + response_format=config.response_format ) - return JSONResponse(response.to_json()) + # Convert Image objects to dictionaries + response_data = [image.to_dict() for image in response.data] + return JSONResponse({"data": response_data}) except Exception as e: logging.exception(e) return Response(content=format_exception(e, config), status_code=500, media_type="application/json") -def format_exception(e: Exception, config: ChatCompletionsForm) -> str: + @self.app.post("/v1/completions") + async def completions(): + return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") + +def format_exception(e: Exception, config: Union[ChatCompletionsConfig, ImageGenerationConfig]) -> str: last_provider = g4f.get_last_provider(True) return json.dumps({ "error": {"message": f"{e.__class__.__name__}: {e}"}, - "model": last_provider.get("model") if last_provider else config.model, - "provider": last_provider.get("name") if last_provider else config.provider + "model": last_provider.get("model") if last_provider else getattr(config, 'model', None), + "provider": last_provider.get("name") if last_provider else getattr(config, 'provider', None) }) def run_api( @@ -228,18 +243,22 @@ def run_api( bind: str = None, debug: bool = False, workers: int = None, - use_colors: bool = None + use_colors: bool = None, + g4f_api_key: str = None ) -> None: print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]' + (" (debug)" if debug else "")) if use_colors is None: use_colors = debug if bind is not None: host, port = bind.split(":") + if debug: + g4f.debug.logging = True uvicorn.run( - f"g4f.api:create_app{'_debug' if debug else ''}", - host=host, port=int(port), - workers=workers, - use_colors=use_colors, - factory=True, + "g4f.api:create_app", + host=host, + port=int(port), + workers=workers, + use_colors=use_colors, + factory=True, reload=debug ) -- cgit v1.2.3 From f55f867a01b279992470d992fae55cd2e559a9ea Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 19:43:55 +0300 Subject: feat(g4f/client/client.py): add system prompt support --- g4f/client/client.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'g4f') diff --git a/g4f/client/client.py b/g4f/client/client.py index 41238df5..2772f9bb 100644 --- a/g4f/client/client.py +++ b/g4f/client/client.py @@ -149,6 +149,7 @@ class Completions: self, messages: Messages, model: str, + system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -161,6 +162,12 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: + # If a system prompt is provided, prepend it to the messages + if system: + system_message = {"role": "system", "content": system} + messages = [system_message] + messages + + # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -221,6 +228,7 @@ class Completions: self, messages: Messages, model: str, + system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -233,6 +241,12 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]: + # If a system prompt is provided, prepend it to the messages + if system: + system_message = {"role": "system", "content": system} + messages = [system_message] + messages + + # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -271,16 +285,18 @@ class Completions: **kwargs ) - # Removed 'await' here since 'async_iter_response' returns an async generator - response = async_iter_response(response, stream, response_format, max_tokens, stop) - response = async_iter_append_model_and_provider(response) - + # Handle streaming or non-streaming responses if stream: + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) return response else: + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) async for result in response: return result + class Chat: completions: Completions @@ -401,6 +417,12 @@ class Image: def __repr__(self): return f"Image(url={self.url}, b64_json={'' if self.b64_json else None})" + def to_dict(self): + return { + "url": self.url, + "b64_json": self.b64_json + } + class ImagesResponse: def __init__(self, data: list[Image]): self.data = data -- cgit v1.2.3 From 307c2e64bd599772a29f2d367696482374b5b068 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 20:03:21 +0300 Subject: feat(g4f/gui/server/api.py): enhance image handling and directory management --- g4f/gui/server/api.py | 63 +++++++++------------------------------------------ 1 file changed, 11 insertions(+), 52 deletions(-) (limited to 'g4f') diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 57f3eaa1..51cf3d32 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -2,12 +2,11 @@ from __future__ import annotations import logging import os -import os.path import uuid import asyncio import time from aiohttp import ClientSession -from typing import Iterator, Optional +from typing import Iterator, Optional, AsyncIterator, Union from flask import send_from_directory from g4f import version, models @@ -20,21 +19,20 @@ from g4f.Provider import ProviderType, __providers__, __map__ from g4f.providers.base_provider import ProviderModelMixin, FinishReason from g4f.providers.conversation import BaseConversation -conversations: dict[dict[str, BaseConversation]] = {} +# Define the directory for generated images images_dir = "./generated_images" +# Function to ensure the images directory exists +def ensure_images_dir(): + if not os.path.exists(images_dir): + os.makedirs(images_dir) + +conversations: dict[dict[str, BaseConversation]] = {} + class Api: @staticmethod def get_models() -> list[str]: - """ - Return a list of all models. - - Fetches and returns a list of all available models in the system. - - Returns: - List[str]: A list of model names. - """ return models._all_models @staticmethod @@ -82,9 +80,6 @@ class Api: @staticmethod def get_providers() -> list[str]: - """ - Return a list of all working providers. - """ return { provider.__name__: ( provider.label if hasattr(provider, "label") else provider.__name__ @@ -99,12 +94,6 @@ class Api: @staticmethod def get_version(): - """ - Returns the current and latest version of the application. - - Returns: - dict: A dictionary containing the current and latest version. - """ try: current_version = version.utils.current_version except VersionNotFoundError: @@ -115,18 +104,10 @@ class Api: } def serve_images(self, name): + ensure_images_dir() return send_from_directory(os.path.abspath(images_dir), name) def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict): - """ - Prepares arguments for chat completion based on the request data. - - Reads the request and prepares the necessary arguments for handling - a chat completion request. - - Returns: - dict: Arguments prepared for chat completion. - """ model = json_data.get('model') or models.default provider = json_data.get('provider') messages = json_data['messages'] @@ -159,13 +140,11 @@ class Api: result = ChatCompletion.create(**kwargs) first = True if isinstance(result, ImageResponse): - # Якщо результат є ImageResponse, обробляємо його як одиночний елемент if first: first = False yield self._format_json("provider", get_last_provider(True)) yield self._format_json("content", str(result)) else: - # Якщо результат є ітерабельним, обробляємо його як раніше for chunk in result: if first: first = False @@ -181,7 +160,6 @@ class Api: elif isinstance(chunk, ImagePreview): yield self._format_json("preview", chunk.to_string()) elif isinstance(chunk, ImageResponse): - # Обробка ImageResponse images = asyncio.run(self._copy_images(chunk.get_list(), chunk.options.get("cookies"))) yield self._format_json("content", str(ImageResponse(images, chunk.alt))) elif not isinstance(chunk, FinishReason): @@ -190,8 +168,8 @@ class Api: logging.exception(e) yield self._format_json('error', get_error_message(e)) - # Додайте цей метод до класу Api async def _copy_images(self, images: list[str], cookies: Optional[Cookies] = None): + ensure_images_dir() async with ClientSession( connector=get_connector(None, os.environ.get("G4F_PROXY")), cookies=cookies @@ -212,16 +190,6 @@ class Api: return await asyncio.gather(*[copy_image(image) for image in images]) def _format_json(self, response_type: str, content): - """ - Formats and returns a JSON response. - - Args: - response_type (str): The type of the response. - content: The content to be included in the response. - - Returns: - str: A JSON formatted string. - """ return { 'type': response_type, response_type: content @@ -229,15 +197,6 @@ class Api: def get_error_message(exception: Exception) -> str: - """ - Generates a formatted error message from an exception. - - Args: - exception (Exception): The exception to format. - - Returns: - str: A formatted error message string. - """ message = f"{type(exception).__name__}: {exception}" provider = get_last_provider() if provider is None: -- cgit v1.2.3 From 7ecc5962e413ca5cb4c13a99a07f3e14bcf73b15 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 20:05:37 +0300 Subject: feat(g4f/gui/server/backend.py): add route to list generated images --- g4f/gui/server/backend.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'g4f') diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index dc1b1080..e24d4da2 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -1,5 +1,6 @@ import json -from flask import request, Flask +import os +from flask import request, Flask, jsonify, send_from_directory from g4f.image import is_allowed_extension, to_image from .api import Api @@ -54,6 +55,10 @@ class Backend_Api(Api): '/images/': { 'function': self.serve_images, 'methods': ['GET'] + }, + '/images': { + 'function': self.get_images, + 'methods': ['GET'] } } @@ -110,4 +115,19 @@ class Backend_Api(Api): Returns: str: A JSON formatted string. """ - return json.dumps(super()._format_json(response_type, content)) + "\n" \ No newline at end of file + return json.dumps(super()._format_json(response_type, content)) + "\n" + + @staticmethod + def get_images(): + images_dir = "./generated_images" + try: + images = [f for f in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, f))] + images = [f"/images/{image}" for image in images if image.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.webp'))] + return jsonify(images) + except Exception as e: + return str(e), 500 + + @staticmethod + def serve_images(name): + images_dir = "./generated_images" + return send_from_directory(os.path.abspath(images_dir), name) -- cgit v1.2.3 From 078edc6f61f70fe2436253411b195f40201be833 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 20:07:24 +0300 Subject: feat(g4f/gui/server/website.py): add redirect for /images/ endpoint --- g4f/gui/server/website.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'g4f') diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py index 5e633674..3cabcdf3 100644 --- a/g4f/gui/server/website.py +++ b/g4f/gui/server/website.py @@ -27,6 +27,10 @@ class Website: 'function': redirect_home, 'methods': ['GET', 'POST'] }, + '/images/': { + 'function': redirect_home, + 'methods': ['GET', 'POST'] + }, } def _chat(self, conversation_id): @@ -35,4 +39,4 @@ class Website: return render_template('index.html', chat_id=conversation_id) def _index(self): - return render_template('index.html', chat_id=str(uuid.uuid4())) \ No newline at end of file + return render_template('index.html', chat_id=str(uuid.uuid4())) -- cgit v1.2.3 From a49fce2f7a9ea820cc72e2b1fd6635e3f20f534c Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 20:10:46 +0300 Subject: feat(g4f/gui/client/index.html): add image album and enhance model/provider options --- g4f/gui/client/index.html | 488 ++++++++++++++++++++++++---------------------- 1 file changed, 251 insertions(+), 237 deletions(-) (limited to 'g4f') diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index 1a660062..f8c11ea2 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -1,258 +1,272 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - g4f - gui - - - -
-
-
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + g4f - gui + + +
+
+
- +
- - -
- - discord ~ discord.gg/XfybzPXPH5 - -
-
- - github ~ @xtekky/gpt4free - -
-
- - -
+ + +
+ + discord ~ discord.gg/XfybzPXPH5 + +
+
+ + github ~ @xtekky/gpt4free + +
+
+ + +
-
- - + + -
+
+
-
- - -
-
- -
-
- -
+
+ + +
+
+ +
+
+ +
-
- - - - - -
- -
-
+
+ + + + + +
+ +
+
-
- - -
-
- -
+
+ + +
+
+ +
-
-
-
- -
- +
+
+ +
+ +
+ + -- cgit v1.2.3 From 1b492f42b9973d516c176fbba1a55ff473ef1968 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 20:12:39 +0300 Subject: feat(g4f/gui/client/static/css/style.css): add image modal and navigation controls --- g4f/gui/client/static/css/style.css | 87 ++++++++++++++++++++++++++++++++++++- 1 file changed, 86 insertions(+), 1 deletion(-) (limited to 'g4f') diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index 441e2042..72f3ec4f 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -474,7 +474,6 @@ body { .stop_generating, .toolbar .regenerate { position: absolute; - z-index: 1000000; top: 0; right: 0; } @@ -1118,6 +1117,92 @@ a:-webkit-any-link { display: none; } +.album-image { + width: 100px; + height: auto; + margin: 5px; + display: inline-block; +} + +.modal { + display: none; + position: fixed; + z-index: 1; + left: 0; + top: 0; + width: 100%; + height: 100%; + overflow: hidden; + background-color: rgba(0,0,0,0.9); +} + +.modal-content { + margin: auto; + display: block; + max-width: 80%; + max-height: 80%; + transition: transform 0.2s; +} + +.close { + position: absolute; + top: 15px; + right: 35px; + color: #f1f1f1; + font-size: 40px; + font-weight: bold; + transition: 0.3s; +} + +.close:hover, +.close:focus { + color: #bbb; + text-decoration: none; + cursor: pointer; +} + + +.image-counter { + color: #fff; + font-size: 18px; + margin: auto 10px; + user-select: none; +} + +.nav-button { + background-color: #555; + color: #fff; + border: none; + padding: 10px; + font-size: 20px; + cursor: pointer; +} + +.nav-button:hover { + background-color: #777; +} + +.nav-button { + position: relative; +} + +.nav-button.left { + left: 0; +} + +.nav-button.right { + right: 0; +} + +.navigation-controls { + position: absolute; + bottom: 20px; + left: 50%; + transform: translateX(-50%); + display: flex; + gap: 10px; +} + .blink { animation: blinker 1s step-start infinite; } -- cgit v1.2.3 From 0f7d3ac0be16c672acaabf8da2dc459cbe81986a Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 20:15:10 +0300 Subject: feat(messages): add image album functionality --- g4f/gui/client/static/js/chat.v1.js | 128 +++++++++++++++++++++++++++++++++--- 1 file changed, 120 insertions(+), 8 deletions(-) (limited to 'g4f') diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index 9790b261..10b5c1f0 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -306,6 +306,14 @@ const prepare_messages = (messages, message_index = -1) => { messages = messages.filter((_, index) => message_index >= index); } + let new_messages = []; + if (systemPrompt?.value) { + new_messages.push({ + "role": "system", + "content": systemPrompt.value + }); + } + // Remove history, if it's selected if (document.getElementById('history')?.checked) { if (message_index == null) { @@ -315,13 +323,6 @@ const prepare_messages = (messages, message_index = -1) => { } } - let new_messages = []; - if (systemPrompt?.value) { - new_messages.push({ - "role": "system", - "content": systemPrompt.value - }); - } messages.forEach((new_message) => { // Include only not regenerated messages if (new_message && !new_message.regenerate) { @@ -334,6 +335,7 @@ const prepare_messages = (messages, message_index = -1) => { return new_messages; } + async function add_message_chunk(message) { if (message.type == "conversation") { console.info("Conversation used:", message.conversation) @@ -902,17 +904,127 @@ function open_settings() { } } +async function loadImages() { + try { + const response = await fetch('/images'); + const images = await response.json(); + console.log(images); + displayImages(images); + } catch (error) { + console.error('Error fetching images:', error); + } +} + +function displayImages(images) { + const album = document.querySelector('.images'); + album.innerHTML = ''; + images.forEach(image => { + const imgElement = document.createElement('img'); + imgElement.src = image; + imgElement.alt = 'Generated Image'; + imgElement.classList.add('album-image'); + album.appendChild(imgElement); + }); +} + +document.addEventListener('DOMContentLoaded', () => { + loadImages(); +}); + function open_album() { + const album = document.querySelector('.images'); if (album.classList.contains("hidden")) { sidebar.classList.remove("shown"); settings.classList.add("hidden"); album.classList.remove("hidden"); history.pushState({}, null, "/images/"); + loadImages(); } else { album.classList.add("hidden"); } } +let currentScale = 1; +let currentImageIndex = 0; +let imagesList = []; + +function displayImages(images) { + imagesList = images; + const album = document.querySelector('.images'); + album.innerHTML = ''; + images.forEach((image, index) => { + const imgElement = document.createElement('img'); + imgElement.src = image; + imgElement.alt = 'Generated Image'; + imgElement.classList.add('album-image'); + imgElement.style.cursor = 'pointer'; + imgElement.addEventListener('click', () => openImageModal(index)); + album.appendChild(imgElement); + }); +} + +function openImageModal(index) { + currentImageIndex = index; + const modal = document.getElementById('imageModal'); + const modalImg = document.getElementById('img01'); + const imageCounter = document.getElementById('imageCounter'); + modal.style.display = 'block'; + modalImg.src = imagesList[index]; + currentScale = 1; + modalImg.style.transform = `scale(${currentScale})`; + imageCounter.textContent = `${index + 1} / ${imagesList.length}`; +} + +const modal = document.getElementById('imageModal'); +const span = document.getElementsByClassName('close')[0]; +const prevImageButton = document.getElementById('prevImage'); +const nextImageButton = document.getElementById('nextImage'); + +span.onclick = function() { + modal.style.display = 'none'; +} + +window.onclick = function(event) { + if (event.target == modal) { + modal.style.display = 'none'; + } +} + +document.getElementById('img01').addEventListener('wheel', function(event) { + event.preventDefault(); + if (event.deltaY < 0) { + currentScale += 0.1; + } else if (currentScale > 0.1) { + currentScale -= 0.1; + } + document.getElementById('img01').style.transform = `scale(${currentScale})`; +}); + +prevImageButton.onclick = function() { + if (currentImageIndex > 0) { + currentImageIndex--; + openImageModal(currentImageIndex); + } +} + +nextImageButton.onclick = function() { + if (currentImageIndex < imagesList.length - 1) { + currentImageIndex++; + openImageModal(currentImageIndex); + } +} + +document.addEventListener('keydown', function(event) { + if (modal.style.display === 'block') { + if (event.key === 'ArrowLeft') { + prevImageButton.click(); + } else if (event.key === 'ArrowRight') { + nextImageButton.click(); + } + } +}); + + const register_settings_storage = async () => { optionElements.forEach((element) => { if (element.type == "textarea") { @@ -1424,4 +1536,4 @@ if (SpeechRecognition) { recognition.start(); } }); -} \ No newline at end of file +} -- cgit v1.2.3 From cc0ed0481e6f04f7eeba9fec836693a9fad45660 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sat, 26 Oct 2024 18:54:47 +0300 Subject: feat(g4f/models.py): add new provider GizAI and update best providers --- g4f/models.py | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) (limited to 'g4f') diff --git a/g4f/models.py b/g4f/models.py index 1cea6447..1bea9eec 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -23,7 +23,6 @@ from .Provider import ( DDG, DeepInfra, DeepInfraChat, - DeepInfraImage, Editee, Free2GPT, FreeChatgpt, @@ -31,6 +30,7 @@ from .Provider import ( FreeNetfly, Gemini, GeminiPro, + GizAI, GigaChat, GPROChat, HuggingChat, @@ -87,6 +87,8 @@ class Model: """Returns a list of all model names.""" return _all_models + +### Default ### default = Model( name = "", base_provider = "", @@ -113,6 +115,8 @@ default = Model( ]) ) + + ############ ### Text ### ############ @@ -136,13 +140,13 @@ gpt_35_turbo = Model( gpt_4o = Model( name = 'gpt-4o', base_provider = 'OpenAI', - best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, Liaobots, Airforce, OpenaiChat]) + best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, GizAI, Airforce, Liaobots, OpenaiChat]) ) gpt_4o_mini = Model( name = 'gpt-4o-mini', base_provider = 'OpenAI', - best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, RubiksAI, Liaobots, Airforce, ChatgptFree, Koala, OpenaiChat, ChatGpt]) + best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, RubiksAI, Liaobots, Airforce, GizAI, ChatgptFree, Koala, OpenaiChat, ChatGpt]) ) gpt_4_turbo = Model( @@ -167,7 +171,7 @@ o1 = Model( o1_mini = Model( name = 'o1-mini', base_provider = 'OpenAI', - best_provider = AmigoChat + best_provider = IterListProvider([AmigoChat, GizAI]) ) @@ -216,13 +220,13 @@ llama_3_70b = Model( llama_3_1_8b = Model( name = "llama-3.1-8b", base_provider = "Meta Llama", - best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, Airforce, PerplexityLabs]) + best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, Airforce, GizAI, PerplexityLabs]) ) llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, RubiksAI, HuggingFace, PerplexityLabs]) + best_provider = IterListProvider([DDG, HuggingChat, Blackbox, FreeGpt, TeachAnything, Free2GPT, DeepInfraChat, DarkAI, Airforce, AiMathGPT, RubiksAI, GizAI, HuggingFace, PerplexityLabs]) ) llama_3_1_405b = Model( @@ -299,7 +303,7 @@ mistral_nemo = Model( mistral_large = Model( name = "mistral-large", base_provider = "Mistral", - best_provider = Editee + best_provider = IterListProvider([Editee, GizAI]) ) @@ -347,13 +351,13 @@ phi_3_5_mini = Model( gemini_pro = Model( name = 'gemini-pro', base_provider = 'Google DeepMind', - best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, NexraGeminiPro, AmigoChat, Editee, Liaobots, Airforce]) + best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, NexraGeminiPro, AmigoChat, Editee, GizAI, Airforce, Liaobots]) ) gemini_flash = Model( name = 'gemini-flash', base_provider = 'Google DeepMind', - best_provider = IterListProvider([Blackbox, Liaobots, Airforce]) + best_provider = IterListProvider([Blackbox, GizAI, Airforce, Liaobots]) ) gemini = Model( @@ -424,14 +428,14 @@ claude_3_sonnet = Model( claude_3_haiku = Model( name = 'claude-3-haiku', base_provider = 'Anthropic', - best_provider = IterListProvider([DDG, Airforce, Liaobots]) + best_provider = IterListProvider([DDG, Airforce, GizAI, Liaobots]) ) # claude 3.5 claude_3_5_sonnet = Model( name = 'claude-3.5-sonnet', base_provider = 'Anthropic', - best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, Liaobots]) + best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, GizAI, Liaobots]) ) @@ -753,14 +757,14 @@ sdxl_lora = Model( sdxl = Model( name = 'sdxl', base_provider = 'Stability AI', - best_provider = IterListProvider([ReplicateHome, DeepInfraImage]) + best_provider = IterListProvider([ReplicateHome]) ) sd_1_5 = Model( name = 'sd-1.5', base_provider = 'Stability AI', - best_provider = NexraSD15 + best_provider = IterListProvider([NexraSD15, GizAI]) ) @@ -771,6 +775,13 @@ sd_3 = Model( ) +sd_3_5 = Model( + name = 'sd-3.5', + base_provider = 'Stability AI', + best_provider = GizAI + +) + ### Playground ### playground_v2_5 = Model( name = 'playground-v2.5', @@ -791,7 +802,7 @@ flux = Model( flux_pro = Model( name = 'flux-pro', base_provider = 'Flux AI', - best_provider = IterListProvider([AmigoChat, NexraFluxPro]) + best_provider = IterListProvider([NexraFluxPro, AmigoChat]) ) @@ -840,7 +851,7 @@ flux_4o = Model( flux_schnell = Model( name = 'flux-schnell', base_provider = 'Flux AI', - best_provider = ReplicateHome + best_provider = IterListProvider([ReplicateHome, GizAI]) ) @@ -1123,6 +1134,7 @@ class ModelUtils: 'sdxl-turbo': sdxl_turbo, 'sd-1.5': sd_1_5, 'sd-3': sd_3, +'sd-3.5': sd_3_5, ### Playground ### -- cgit v1.2.3 From 498ab6d5b8f52d1f2f480e7973635136af7f83f0 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sat, 26 Oct 2024 18:59:18 +0300 Subject: Disconnecting a provider due to a problem with CloudFare --- g4f/Provider/AI365VIP.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'g4f') diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/AI365VIP.py index c7ebf6b5..511ad568 100644 --- a/g4f/Provider/AI365VIP.py +++ b/g4f/Provider/AI365VIP.py @@ -10,7 +10,7 @@ from .helper import format_prompt class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat.ai365vip.com" api_endpoint = "/api/chat" - working = True + working = False default_model = 'gpt-3.5-turbo' models = [ 'gpt-3.5-turbo', -- cgit v1.2.3 From 51e9d02d84e40160e9738d19244ac1995bb59b52 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sat, 26 Oct 2024 18:59:40 +0300 Subject: Update (g4f/Provider/AiMathGPT.py) --- g4f/Provider/AiMathGPT.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/AiMathGPT.py b/g4f/Provider/AiMathGPT.py index 4399320a..90931691 100644 --- a/g4f/Provider/AiMathGPT.py +++ b/g4f/Provider/AiMathGPT.py @@ -59,10 +59,6 @@ class AiMathGPT(AsyncGeneratorProvider, ProviderModelMixin): async with ClientSession(headers=headers) as session: data = { "messages": [ - { - "role": "system", - "content": "" - }, { "role": "user", "content": format_prompt(messages) -- cgit v1.2.3 From 6ba098ec2bce56cac55b635302ff87bf97573fe2 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sat, 26 Oct 2024 19:00:31 +0300 Subject: New provider added (g4f/Provider/GizAI.py) --- g4f/Provider/GizAI.py | 151 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 g4f/Provider/GizAI.py (limited to 'g4f') diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py new file mode 100644 index 00000000..127edc9e --- /dev/null +++ b/g4f/Provider/GizAI.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from ..image import ImageResponse +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class GizAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://app.giz.ai/assistant/" + api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer" + working = True + + supports_system_message = True + supports_message_history = True + + # Chat models + default_model = 'chat-gemini-flash' + chat_models = [ + default_model, + 'chat-gemini-pro', + 'chat-gpt4m', + 'chat-gpt4', + 'claude-sonnet', + 'claude-haiku', + 'llama-3-70b', + 'llama-3-8b', + 'mistral-large', + 'chat-o1-mini' + ] + + # Image models + image_models = [ + 'flux1', + 'sdxl', + 'sd', + 'sd35', + ] + + models = [*chat_models, *image_models] + + model_aliases = { + # Chat model aliases + "gemini-flash": "chat-gemini-flash", + "gemini-pro": "chat-gemini-pro", + "gpt-4o-mini": "chat-gpt4m", + "gpt-4o": "chat-gpt4", + "claude-3.5-sonnet": "claude-sonnet", + "claude-3-haiku": "claude-haiku", + "llama-3.1-70b": "llama-3-70b", + "llama-3.1-8b": "llama-3-8b", + "o1-mini": "chat-o1-mini", + # Image model aliases + "sd-1.5": "sd", + "sd-3.5": "sd35", + "flux-schnell": "flux1", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + def is_image_model(cls, model: str) -> bool: + return model in cls.image_models + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'Accept': 'application/json, text/plain, */*', + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Content-Type': 'application/json', + 'Origin': 'https://app.giz.ai', + 'Pragma': 'no-cache', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"' + } + + async with ClientSession() as session: + if cls.is_image_model(model): + # Image generation + prompt = messages[-1]["content"] + data = { + "model": model, + "input": { + "width": "1024", + "height": "1024", + "steps": 4, + "output_format": "webp", + "batch_size": 1, + "mode": "plan", + "prompt": prompt + } + } + async with session.post( + cls.api_endpoint, + headers=headers, + data=json.dumps(data), + proxy=proxy + ) as response: + response.raise_for_status() + response_data = await response.json() + if response_data.get('status') == 'completed' and response_data.get('output'): + for url in response_data['output']: + yield ImageResponse(images=url, alt="Generated Image") + else: + # Chat completion + data = { + "model": model, + "input": { + "messages": [ + { + "type": "human", + "content": format_prompt(messages) + } + ], + "mode": "plan" + }, + "noStream": True + } + async with session.post( + cls.api_endpoint, + headers=headers, + data=json.dumps(data), + proxy=proxy + ) as response: + response.raise_for_status() + result = await response.json() + yield result.get('output', '') -- cgit v1.2.3 From 664289cb0ba9e33ec53892866a958d38b549b137 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sat, 26 Oct 2024 19:00:55 +0300 Subject: Update (g4f/Provider/__init__.py) --- g4f/Provider/__init__.py | 1 + 1 file changed, 1 insertion(+) (limited to 'g4f') diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 8f36606b..1caf8aaf 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -47,6 +47,7 @@ from .FreeChatgpt import FreeChatgpt from .FreeGpt import FreeGpt from .FreeNetfly import FreeNetfly from .GeminiPro import GeminiPro +from .GizAI import GizAI from .GPROChat import GPROChat from .HuggingChat import HuggingChat from .HuggingFace import HuggingFace -- cgit v1.2.3 From fa2d608822540c9b73350bfa036e8822ade4e23f Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sat, 26 Oct 2024 20:17:14 +0300 Subject: Update (g4f/Provider/Blackbox.py) --- g4f/Provider/Blackbox.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 5cd43eed..6d8a467d 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -51,7 +51,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'ReactAgent', 'XcodeAgent', 'AngularJSAgent', - 'RepoMap', ] agentMode = { @@ -78,7 +77,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'ReactAgent': {'mode': True, 'id': "React Agent"}, 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"}, 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"}, - 'RepoMap': {'mode': True, 'id': "repomap"}, } userSelectedModel = { -- cgit v1.2.3 From 8768a057534b91e463f428fb91f301325110415c Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sun, 27 Oct 2024 20:14:45 +0200 Subject: Update (docs/providers-and-models.md g4f/models.py g4f/Provider/nexra/) --- g4f/Provider/nexra/NexraChatGPT.py | 270 +++++++++++++++++++++++++++++----- g4f/Provider/nexra/NexraChatGPT4o.py | 86 ----------- g4f/Provider/nexra/NexraChatGptV2.py | 92 ------------ g4f/Provider/nexra/NexraChatGptWeb.py | 64 -------- g4f/Provider/nexra/__init__.py | 3 - g4f/models.py | 7 +- 6 files changed, 235 insertions(+), 287 deletions(-) delete mode 100644 g4f/Provider/nexra/NexraChatGPT4o.py delete mode 100644 g4f/Provider/nexra/NexraChatGptV2.py delete mode 100644 g4f/Provider/nexra/NexraChatGptWeb.py (limited to 'g4f') diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py index fc5051ee..074a0363 100644 --- a/g4f/Provider/nexra/NexraChatGPT.py +++ b/g4f/Provider/nexra/NexraChatGPT.py @@ -1,45 +1,52 @@ from __future__ import annotations +import asyncio import json import requests +from typing import Any, Dict -from ...typing import CreateResult, Messages -from ..base_provider import ProviderModelMixin, AbstractProvider +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..helper import format_prompt -class NexraChatGPT(AbstractProvider, ProviderModelMixin): + +class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin): label = "Nexra ChatGPT" url = "https://nexra.aryahcr.cc/documentation/chatgpt/en" - api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt" + api_endpoint_nexra_chatgpt = "https://nexra.aryahcr.cc/api/chat/gpt" + api_endpoint_nexra_chatgpt4o = "https://nexra.aryahcr.cc/api/chat/complements" + api_endpoint_nexra_chatgpt_v2 = "https://nexra.aryahcr.cc/api/chat/complements" + api_endpoint_nexra_gptweb = "https://nexra.aryahcr.cc/api/chat/gptweb" working = True + supports_system_message = True + supports_message_history = True + supports_stream = True default_model = 'gpt-3.5-turbo' - models = ['gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314', default_model, 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002'] + nexra_chatgpt = [ + 'gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314', + default_model, 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301', + 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002' + ] + nexra_chatgpt4o = ['gpt-4o'] + nexra_chatgptv2 = ['chatgpt'] + nexra_gptweb = ['gptweb'] + models = nexra_chatgpt + nexra_chatgpt4o + nexra_chatgptv2 + nexra_gptweb model_aliases = { "gpt-4": "gpt-4-0613", - "gpt-4": "gpt-4-32k", - "gpt-4": "gpt-4-0314", - "gpt-4": "gpt-4-32k-0314", - + "gpt-4-32k": "gpt-4-32k-0314", "gpt-3.5-turbo": "gpt-3.5-turbo-16k", - "gpt-3.5-turbo": "gpt-3.5-turbo-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-0301", - + "gpt-3.5-turbo-0613": "gpt-3.5-turbo-16k-0613", "gpt-3": "text-davinci-003", - "gpt-3": "text-davinci-002", - "gpt-3": "code-davinci-002", - "gpt-3": "text-curie-001", - "gpt-3": "text-babbage-001", - "gpt-3": "text-ada-001", - "gpt-3": "text-ada-001", - "gpt-3": "davinci", - "gpt-3": "curie", - "gpt-3": "babbage", - "gpt-3": "ada", - "gpt-3": "babbage-002", - "gpt-3": "davinci-002", + "text-davinci-002": "code-davinci-002", + "text-curie-001": "text-babbage-001", + "text-ada-001": "davinci", + "curie": "babbage", + "ada": "babbage-002", + "davinci-002": "davinci-002", + "chatgpt": "chatgpt", + "gptweb": "gptweb" } @classmethod @@ -50,40 +57,229 @@ class NexraChatGPT(AbstractProvider, ProviderModelMixin): return cls.model_aliases[model] else: return cls.default_model - + @classmethod - def create_completion( + async def create_async_generator( cls, model: str, messages: Messages, + stream: bool = False, proxy: str = None, markdown: bool = False, **kwargs - ) -> CreateResult: - model = cls.get_model(model) + ) -> AsyncResult: + if model in cls.nexra_chatgpt: + async for chunk in cls._create_async_generator_nexra_chatgpt(model, messages, proxy, **kwargs): + yield chunk + elif model in cls.nexra_chatgpt4o: + async for chunk in cls._create_async_generator_nexra_chatgpt4o(model, messages, stream, proxy, markdown, **kwargs): + yield chunk + elif model in cls.nexra_chatgptv2: + async for chunk in cls._create_async_generator_nexra_chatgpt_v2(model, messages, stream, proxy, markdown, **kwargs): + yield chunk + elif model in cls.nexra_gptweb: + async for chunk in cls._create_async_generator_nexra_gptweb(model, messages, proxy, **kwargs): + yield chunk + @classmethod + async def _create_async_generator_nexra_chatgpt( + cls, + model: str, + messages: Messages, + proxy: str = None, + markdown: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + headers = { - 'Content-Type': 'application/json' + "Content-Type": "application/json" } + prompt = format_prompt(messages) data = { - "messages": [], - "prompt": format_prompt(messages), + "messages": messages, + "prompt": prompt, "model": model, "markdown": markdown } + + loop = asyncio.get_event_loop() + try: + response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt, data, headers, proxy) + filtered_response = cls._filter_response(response) + + for chunk in filtered_response: + yield chunk + except Exception as e: + print(f"Error during API request (nexra_chatgpt): {e}") + + @classmethod + async def _create_async_generator_nexra_chatgpt4o( + cls, + model: str, + messages: Messages, + stream: bool = False, + proxy: str = None, + markdown: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) - response = requests.post(cls.api_endpoint, headers=headers, json=data) + headers = { + "Content-Type": "application/json" + } + + prompt = format_prompt(messages) + data = { + "messages": [ + { + "role": "user", + "content": prompt + } + ], + "stream": stream, + "markdown": markdown, + "model": model + } - return cls.process_response(response) + loop = asyncio.get_event_loop() + try: + response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt4o, data, headers, proxy, stream) + + if stream: + async for chunk in cls._process_streaming_response(response): + yield chunk + else: + for chunk in cls._process_non_streaming_response(response): + yield chunk + except Exception as e: + print(f"Error during API request (nexra_chatgpt4o): {e}") @classmethod - def process_response(cls, response): + async def _create_async_generator_nexra_chatgpt_v2( + cls, + model: str, + messages: Messages, + stream: bool = False, + proxy: str = None, + markdown: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "Content-Type": "application/json" + } + + prompt = format_prompt(messages) + data = { + "messages": [ + { + "role": "user", + "content": prompt + } + ], + "stream": stream, + "markdown": markdown, + "model": model + } + + loop = asyncio.get_event_loop() + try: + response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt_v2, data, headers, proxy, stream) + + if stream: + async for chunk in cls._process_streaming_response(response): + yield chunk + else: + for chunk in cls._process_non_streaming_response(response): + yield chunk + except Exception as e: + print(f"Error during API request (nexra_chatgpt_v2): {e}") + + @classmethod + async def _create_async_generator_nexra_gptweb( + cls, + model: str, + messages: Messages, + proxy: str = None, + markdown: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "Content-Type": "application/json" + } + + prompt = format_prompt(messages) + data = { + "prompt": prompt, + "markdown": markdown, + } + + loop = asyncio.get_event_loop() + try: + response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_gptweb, data, headers, proxy) + + for chunk in response.iter_content(1024): + if chunk: + decoded_chunk = chunk.decode().lstrip('_') + try: + response_json = json.loads(decoded_chunk) + if response_json.get("status"): + yield response_json.get("gpt", "") + except json.JSONDecodeError: + continue + except Exception as e: + print(f"Error during API request (nexra_gptweb): {e}") + + @staticmethod + def _sync_post_request(url: str, data: Dict[str, Any], headers: Dict[str, str], proxy: str = None, stream: bool = False) -> requests.Response: + proxies = { + "http": proxy, + "https": proxy, + } if proxy else None + + try: + response = requests.post(url, json=data, headers=headers, proxies=proxies, stream=stream) + response.raise_for_status() + return response + except requests.RequestException as e: + print(f"Request failed: {e}") + raise + + @staticmethod + def _process_non_streaming_response(response: requests.Response) -> str: if response.status_code == 200: try: - data = response.json() - return data.get('gpt', '') + content = response.text.lstrip('') + data = json.loads(content) + return data.get('message', '') except json.JSONDecodeError: return "Error: Unable to decode JSON response" else: return f"Error: {response.status_code}" + + @staticmethod + async def _process_streaming_response(response: requests.Response): + full_message = "" + for line in response.iter_lines(decode_unicode=True): + if line: + try: + line = line.lstrip('') + data = json.loads(line) + if data.get('finish'): + break + message = data.get('message', '') + if message: + yield message[len(full_message):] + full_message = message + except json.JSONDecodeError: + pass + + @staticmethod + def _filter_response(response: requests.Response) -> str: + response_json = response.json() + return response_json.get("gpt", "") diff --git a/g4f/Provider/nexra/NexraChatGPT4o.py b/g4f/Provider/nexra/NexraChatGPT4o.py deleted file mode 100644 index 126d32b8..00000000 --- a/g4f/Provider/nexra/NexraChatGPT4o.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import annotations - -import json -import requests - -from ...typing import CreateResult, Messages -from ..base_provider import ProviderModelMixin, AbstractProvider -from ..helper import format_prompt - -class NexraChatGPT4o(AbstractProvider, ProviderModelMixin): - label = "Nexra ChatGPT4o" - url = "https://nexra.aryahcr.cc/documentation/chatgpt/en" - api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements" - working = True - supports_stream = True - - default_model = "gpt-4o" - models = [default_model] - - @classmethod - def get_model(cls, model: str) -> str: - return cls.default_model - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - markdown: bool = False, - **kwargs - ) -> CreateResult: - model = cls.get_model(model) - - headers = { - 'Content-Type': 'application/json' - } - - data = { - "messages": [ - { - "role": "user", - "content": format_prompt(messages) - } - ], - "stream": stream, - "markdown": markdown, - "model": model - } - - response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream) - - if stream: - return cls.process_streaming_response(response) - else: - return cls.process_non_streaming_response(response) - - @classmethod - def process_non_streaming_response(cls, response): - if response.status_code == 200: - try: - content = response.text.lstrip('') - data = json.loads(content) - return data.get('message', '') - except json.JSONDecodeError: - return "Error: Unable to decode JSON response" - else: - return f"Error: {response.status_code}" - - @classmethod - def process_streaming_response(cls, response): - full_message = "" - for line in response.iter_lines(decode_unicode=True): - if line: - try: - line = line.lstrip('') - data = json.loads(line) - if data.get('finish'): - break - message = data.get('message', '') - if message and message != full_message: - yield message[len(full_message):] - full_message = message - except json.JSONDecodeError: - pass diff --git a/g4f/Provider/nexra/NexraChatGptV2.py b/g4f/Provider/nexra/NexraChatGptV2.py deleted file mode 100644 index 1ff42705..00000000 --- a/g4f/Provider/nexra/NexraChatGptV2.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import annotations - -import json -import requests - -from ...typing import CreateResult, Messages -from ..base_provider import ProviderModelMixin, AbstractProvider -from ..helper import format_prompt - -class NexraChatGptV2(AbstractProvider, ProviderModelMixin): - label = "Nexra ChatGPT v2" - url = "https://nexra.aryahcr.cc/documentation/chatgpt/en" - api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements" - working = True - supports_stream = True - - default_model = 'chatgpt' - models = [default_model] - model_aliases = {"gpt-4": "chatgpt"} - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - markdown: bool = False, - **kwargs - ) -> CreateResult: - model = cls.get_model(model) - - headers = { - 'Content-Type': 'application/json' - } - - data = { - "messages": [ - { - "role": "user", - "content": format_prompt(messages) - } - ], - "stream": stream, - "markdown": markdown, - "model": model - } - - response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream) - - if stream: - return cls.process_streaming_response(response) - else: - return cls.process_non_streaming_response(response) - - @classmethod - def process_non_streaming_response(cls, response): - if response.status_code == 200: - try: - content = response.text.lstrip('') - data = json.loads(content) - return data.get('message', '') - except json.JSONDecodeError: - return "Error: Unable to decode JSON response" - else: - return f"Error: {response.status_code}" - - @classmethod - def process_streaming_response(cls, response): - full_message = "" - for line in response.iter_lines(decode_unicode=True): - if line: - try: - line = line.lstrip('') - data = json.loads(line) - if data.get('finish'): - break - message = data.get('message', '') - if message: - yield message[len(full_message):] - full_message = message - except json.JSONDecodeError: - pass diff --git a/g4f/Provider/nexra/NexraChatGptWeb.py b/g4f/Provider/nexra/NexraChatGptWeb.py deleted file mode 100644 index f82694d4..00000000 --- a/g4f/Provider/nexra/NexraChatGptWeb.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -import json -import requests - -from ...typing import CreateResult, Messages -from ..base_provider import ProviderModelMixin, AbstractProvider -from ..helper import format_prompt - -class NexraChatGptWeb(AbstractProvider, ProviderModelMixin): - label = "Nexra ChatGPT Web" - url = "https://nexra.aryahcr.cc/documentation/chatgpt/en" - working = True - - default_model = "gptweb" - models = [default_model] - model_aliases = {"gpt-4": "gptweb"} - api_endpoints = {"gptweb": "https://nexra.aryahcr.cc/api/chat/gptweb"} - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - proxy: str = None, - markdown: bool = False, - **kwargs - ) -> CreateResult: - model = cls.get_model(model) - api_endpoint = cls.api_endpoints.get(model, cls.api_endpoints[cls.default_model]) - - headers = { - 'Content-Type': 'application/json' - } - - data = { - "prompt": format_prompt(messages), - "markdown": markdown - } - - response = requests.post(api_endpoint, headers=headers, json=data) - - return cls.process_response(response) - - @classmethod - def process_response(cls, response): - if response.status_code == 200: - try: - content = response.text.lstrip('_') - json_response = json.loads(content) - return json_response.get('gpt', '') - except json.JSONDecodeError: - return "Error: Unable to decode JSON response" - else: - return f"Error: {response.status_code}" diff --git a/g4f/Provider/nexra/__init__.py b/g4f/Provider/nexra/__init__.py index 6121fdc0..bebc1fb6 100644 --- a/g4f/Provider/nexra/__init__.py +++ b/g4f/Provider/nexra/__init__.py @@ -1,9 +1,6 @@ from .NexraBing import NexraBing from .NexraBlackbox import NexraBlackbox from .NexraChatGPT import NexraChatGPT -from .NexraChatGPT4o import NexraChatGPT4o -from .NexraChatGptV2 import NexraChatGptV2 -from .NexraChatGptWeb import NexraChatGptWeb from .NexraDallE import NexraDallE from .NexraDallE2 import NexraDallE2 from .NexraEmi import NexraEmi diff --git a/g4f/models.py b/g4f/models.py index 1bea9eec..32a12d10 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -42,9 +42,6 @@ from .Provider import ( NexraBing, NexraBlackbox, NexraChatGPT, - NexraChatGPT4o, - NexraChatGptV2, - NexraChatGptWeb, NexraDallE, NexraDallE2, NexraEmi, @@ -140,7 +137,7 @@ gpt_35_turbo = Model( gpt_4o = Model( name = 'gpt-4o', base_provider = 'OpenAI', - best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, GizAI, Airforce, Liaobots, OpenaiChat]) + best_provider = IterListProvider([NexraChatGPT, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, GizAI, Airforce, Liaobots, OpenaiChat]) ) gpt_4o_mini = Model( @@ -158,7 +155,7 @@ gpt_4_turbo = Model( gpt_4 = Model( name = 'gpt-4', base_provider = 'OpenAI', - best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraBing, NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider]) + best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraBing, NexraChatGPT, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider]) ) # o1 -- cgit v1.2.3 From d7d1db835ed6670d51823e32c855151813e12fce Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 29 Oct 2024 09:48:04 +0200 Subject: Update (g4f/gui/*) --- g4f/gui/client/index.html | 437 ++++++++++++++++++------------------ g4f/gui/client/static/css/style.css | 87 +------ g4f/gui/client/static/js/chat.v1.js | 110 --------- g4f/gui/server/backend.py | 24 +- 4 files changed, 218 insertions(+), 440 deletions(-) (limited to 'g4f') diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index f8c11ea2..7e8ef09c 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -1,222 +1,227 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - g4f - gui - - -
-
-
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + g4f - gui + + + +
+
+
- +
- - -
- - discord ~ discord.gg/XfybzPXPH5 - -
-
- - github ~ @xtekky/gpt4free - -
-
- - -
+ + +
+ + discord ~ discord.gg/XfybzPXPH5 + +
+
+ + github ~ @xtekky/gpt4free + +
+
+ + +
-
- - + + -
+
+
-
- - -
-
- -
-
- -
+
+ + +
+
+ +
+
+ +
-
- - - - - -
- -
-
+
+ + + + + +
+ +
+
@@ -243,30 +248,18 @@ - - -
+ +
-
-
- -
- -
- +
+ +
+ +
+ - diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index 72f3ec4f..441e2042 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -474,6 +474,7 @@ body { .stop_generating, .toolbar .regenerate { position: absolute; + z-index: 1000000; top: 0; right: 0; } @@ -1117,92 +1118,6 @@ a:-webkit-any-link { display: none; } -.album-image { - width: 100px; - height: auto; - margin: 5px; - display: inline-block; -} - -.modal { - display: none; - position: fixed; - z-index: 1; - left: 0; - top: 0; - width: 100%; - height: 100%; - overflow: hidden; - background-color: rgba(0,0,0,0.9); -} - -.modal-content { - margin: auto; - display: block; - max-width: 80%; - max-height: 80%; - transition: transform 0.2s; -} - -.close { - position: absolute; - top: 15px; - right: 35px; - color: #f1f1f1; - font-size: 40px; - font-weight: bold; - transition: 0.3s; -} - -.close:hover, -.close:focus { - color: #bbb; - text-decoration: none; - cursor: pointer; -} - - -.image-counter { - color: #fff; - font-size: 18px; - margin: auto 10px; - user-select: none; -} - -.nav-button { - background-color: #555; - color: #fff; - border: none; - padding: 10px; - font-size: 20px; - cursor: pointer; -} - -.nav-button:hover { - background-color: #777; -} - -.nav-button { - position: relative; -} - -.nav-button.left { - left: 0; -} - -.nav-button.right { - right: 0; -} - -.navigation-controls { - position: absolute; - bottom: 20px; - left: 50%; - transform: translateX(-50%); - display: flex; - gap: 10px; -} - .blink { animation: blinker 1s step-start infinite; } diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index 9bf07046..42ddb129 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -936,127 +936,17 @@ function open_settings() { } } -async function loadImages() { - try { - const response = await fetch('/images'); - const images = await response.json(); - console.log(images); - displayImages(images); - } catch (error) { - console.error('Error fetching images:', error); - } -} - -function displayImages(images) { - const album = document.querySelector('.images'); - album.innerHTML = ''; - images.forEach(image => { - const imgElement = document.createElement('img'); - imgElement.src = image; - imgElement.alt = 'Generated Image'; - imgElement.classList.add('album-image'); - album.appendChild(imgElement); - }); -} - -document.addEventListener('DOMContentLoaded', () => { - loadImages(); -}); - function open_album() { - const album = document.querySelector('.images'); if (album.classList.contains("hidden")) { sidebar.classList.remove("shown"); settings.classList.add("hidden"); album.classList.remove("hidden"); history.pushState({}, null, "/images/"); - loadImages(); } else { album.classList.add("hidden"); } } -let currentScale = 1; -let currentImageIndex = 0; -let imagesList = []; - -function displayImages(images) { - imagesList = images; - const album = document.querySelector('.images'); - album.innerHTML = ''; - images.forEach((image, index) => { - const imgElement = document.createElement('img'); - imgElement.src = image; - imgElement.alt = 'Generated Image'; - imgElement.classList.add('album-image'); - imgElement.style.cursor = 'pointer'; - imgElement.addEventListener('click', () => openImageModal(index)); - album.appendChild(imgElement); - }); -} - -function openImageModal(index) { - currentImageIndex = index; - const modal = document.getElementById('imageModal'); - const modalImg = document.getElementById('img01'); - const imageCounter = document.getElementById('imageCounter'); - modal.style.display = 'block'; - modalImg.src = imagesList[index]; - currentScale = 1; - modalImg.style.transform = `scale(${currentScale})`; - imageCounter.textContent = `${index + 1} / ${imagesList.length}`; -} - -const modal = document.getElementById('imageModal'); -const span = document.getElementsByClassName('close')[0]; -const prevImageButton = document.getElementById('prevImage'); -const nextImageButton = document.getElementById('nextImage'); - -span.onclick = function() { - modal.style.display = 'none'; -} - -window.onclick = function(event) { - if (event.target == modal) { - modal.style.display = 'none'; - } -} - -document.getElementById('img01').addEventListener('wheel', function(event) { - event.preventDefault(); - if (event.deltaY < 0) { - currentScale += 0.1; - } else if (currentScale > 0.1) { - currentScale -= 0.1; - } - document.getElementById('img01').style.transform = `scale(${currentScale})`; -}); - -prevImageButton.onclick = function() { - if (currentImageIndex > 0) { - currentImageIndex--; - openImageModal(currentImageIndex); - } -} - -nextImageButton.onclick = function() { - if (currentImageIndex < imagesList.length - 1) { - currentImageIndex++; - openImageModal(currentImageIndex); - } -} - -document.addEventListener('keydown', function(event) { - if (modal.style.display === 'block') { - if (event.key === 'ArrowLeft') { - prevImageButton.click(); - } else if (event.key === 'ArrowRight') { - nextImageButton.click(); - } - } -}); - - const register_settings_storage = async () => { optionElements.forEach((element) => { if (element.type == "textarea") { diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index e24d4da2..dc1b1080 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -1,6 +1,5 @@ import json -import os -from flask import request, Flask, jsonify, send_from_directory +from flask import request, Flask from g4f.image import is_allowed_extension, to_image from .api import Api @@ -55,10 +54,6 @@ class Backend_Api(Api): '/images/': { 'function': self.serve_images, 'methods': ['GET'] - }, - '/images': { - 'function': self.get_images, - 'methods': ['GET'] } } @@ -115,19 +110,4 @@ class Backend_Api(Api): Returns: str: A JSON formatted string. """ - return json.dumps(super()._format_json(response_type, content)) + "\n" - - @staticmethod - def get_images(): - images_dir = "./generated_images" - try: - images = [f for f in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, f))] - images = [f"/images/{image}" for image in images if image.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.webp'))] - return jsonify(images) - except Exception as e: - return str(e), 500 - - @staticmethod - def serve_images(name): - images_dir = "./generated_images" - return send_from_directory(os.path.abspath(images_dir), name) + return json.dumps(super()._format_json(response_type, content)) + "\n" \ No newline at end of file -- cgit v1.2.3 From 482e7d8946d8c087fe759c310c47131d401a0f23 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 29 Oct 2024 10:07:27 +0200 Subject: Update (g4f/gui/server/api.py g4f/Provider/Blackbox.py) --- g4f/Provider/Blackbox.py | 4 ++-- g4f/gui/server/api.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 6d8a467d..168cfe1e 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -172,7 +172,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): proxy: Optional[str] = None, image: ImageType = None, image_name: str = None, - websearch: bool = False, + web_search: bool = False, **kwargs ) -> AsyncGenerator[Union[str, ImageResponse], None]: """ @@ -274,7 +274,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "clickedForceWebSearch": False, "visitFromDelta": False, "mobileClient": False, - "webSearchMode": websearch, + "webSearchMode": web_search, "userSelectedModel": cls.userSelectedModel.get(model, model) } diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 51cf3d32..92700611 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -115,7 +115,7 @@ class Api: if api_key is not None: kwargs["api_key"] = api_key if json_data.get('web_search'): - if provider in ("Bing", "HuggingChat"): + if provider in ("Bing", "HuggingChat", "Blackbox"): kwargs['web_search'] = True else: from .internet import get_search_message -- cgit v1.2.3 From c11bac4849b2e51f8642f410d0bd62c855bf7e8f Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 29 Oct 2024 10:09:58 +0200 Subject: Update (g4f/Provider/Blackbox.py) --- g4f/Provider/Blackbox.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 168cfe1e..4052893a 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -184,7 +184,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): proxy (Optional[str]): Proxy URL, if needed. image (ImageType): Image data to be processed, if any. image_name (str): Name of the image file, if an image is provided. - websearch (bool): Enables or disables web search mode. + web_search (bool): Enables or disables web search mode. **kwargs: Additional keyword arguments. Yields: @@ -311,7 +311,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): else: yield cleaned_response else: - if websearch: + if web_search: match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL) if match: source_part = match.group(1).strip() -- cgit v1.2.3 From 0aad039ac8554c4a011501e5e232a1237e69eacb Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 29 Oct 2024 10:11:56 +0200 Subject: Update (g4f/gui/server/api.py) --- g4f/gui/server/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'g4f') diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 92700611..7aac650a 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -115,7 +115,7 @@ class Api: if api_key is not None: kwargs["api_key"] = api_key if json_data.get('web_search'): - if provider in ("Bing", "HuggingChat", "Blackbox"): + if provider: kwargs['web_search'] = True else: from .internet import get_search_message -- cgit v1.2.3 From e79c8b01f58d21502c962f38c804bf81196f89fb Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 29 Oct 2024 22:03:05 +0200 Subject: Update (docs/async_client.md docs/client.md docs/interference-api.md g4f/client/client.py) --- g4f/client/client.py | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) (limited to 'g4f') diff --git a/g4f/client/client.py b/g4f/client/client.py index 2772f9bb..44d99d60 100644 --- a/g4f/client/client.py +++ b/g4f/client/client.py @@ -149,7 +149,6 @@ class Completions: self, messages: Messages, model: str, - system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -162,12 +161,6 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: - # If a system prompt is provided, prepend it to the messages - if system: - system_message = {"role": "system", "content": system} - messages = [system_message] + messages - - # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -228,7 +221,6 @@ class Completions: self, messages: Messages, model: str, - system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -241,12 +233,6 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]: - # If a system prompt is provided, prepend it to the messages - if system: - system_message = {"role": "system", "content": system} - messages = [system_message] + messages - - # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -285,18 +271,16 @@ class Completions: **kwargs ) - # Handle streaming or non-streaming responses + # Removed 'await' here since 'async_iter_response' returns an async generator + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) + if stream: - response = async_iter_response(response, stream, response_format, max_tokens, stop) - response = async_iter_append_model_and_provider(response) return response else: - response = async_iter_response(response, stream, response_format, max_tokens, stop) - response = async_iter_append_model_and_provider(response) async for result in response: return result - class Chat: completions: Completions @@ -417,12 +401,6 @@ class Image: def __repr__(self): return f"Image(url={self.url}, b64_json={'' if self.b64_json else None})" - def to_dict(self): - return { - "url": self.url, - "b64_json": self.b64_json - } - class ImagesResponse: def __init__(self, data: list[Image]): self.data = data @@ -530,3 +508,4 @@ class Images: async def create_variation(self, image: Union[str, bytes], model: str = None, response_format: str = "url", **kwargs): # Existing implementation, adjust if you want to support b64_json here as well pass + -- cgit v1.2.3