diff options
author | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2024-01-13 18:10:43 +0100 |
---|---|---|
committer | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2024-01-13 18:10:43 +0100 |
commit | ceed364cb1ade47a29ccf698074be01b520fc82c (patch) | |
tree | fd1ddf447d186ee5e4c8977e5b937e4161392b4c | |
parent | Improve FreeChatgpt Provider (diff) | |
download | gpt4free-ceed364cb1ade47a29ccf698074be01b520fc82c.tar gpt4free-ceed364cb1ade47a29ccf698074be01b520fc82c.tar.gz gpt4free-ceed364cb1ade47a29ccf698074be01b520fc82c.tar.bz2 gpt4free-ceed364cb1ade47a29ccf698074be01b520fc82c.tar.lz gpt4free-ceed364cb1ade47a29ccf698074be01b520fc82c.tar.xz gpt4free-ceed364cb1ade47a29ccf698074be01b520fc82c.tar.zst gpt4free-ceed364cb1ade47a29ccf698074be01b520fc82c.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/Bing.py | 24 | ||||
-rw-r--r-- | g4f/Provider/bing/upload_image.py | 6 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/OpenaiChat.py | 5 | ||||
-rw-r--r-- | g4f/gui/client/css/style.css | 1 | ||||
-rw-r--r-- | g4f/gui/client/js/chat.v1.js | 4 | ||||
-rw-r--r-- | g4f/image.py | 5 |
6 files changed, 23 insertions, 22 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index da9b0172..50e29d23 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -9,9 +9,10 @@ from urllib import parse from aiohttp import ClientSession, ClientTimeout from ..typing import AsyncResult, Messages, ImageType +from ..image import ImageResponse from .base_provider import AsyncGeneratorProvider from .bing.upload_image import upload_image -from .bing.create_images import create_images, format_images_markdown +from .bing.create_images import create_images from .bing.conversation import Conversation, create_conversation, delete_conversation class Tones(): @@ -172,7 +173,7 @@ def create_message( prompt: str, tone: str, context: str = None, - image_info: dict = None, + image_response: ImageResponse = None, web_search: bool = False, gpt4_turbo: bool = False ) -> str: @@ -228,9 +229,9 @@ def create_message( 'target': 'chat', 'type': 4 } - if image_info and "imageUrl" in image_info and "originalImageUrl" in image_info: - struct['arguments'][0]['message']['originalImageUrl'] = image_info['originalImageUrl'] - struct['arguments'][0]['message']['imageUrl'] = image_info['imageUrl'] + if image_response.get('imageUrl') and image_response.get('originalImageUrl'): + struct['arguments'][0]['message']['originalImageUrl'] = image_response.get('originalImageUrl') + struct['arguments'][0]['message']['imageUrl'] = image_response.get('imageUrl') struct['arguments'][0]['experienceType'] = None struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None} if context: @@ -262,9 +263,9 @@ async def stream_generate( headers=headers ) as session: conversation = await create_conversation(session, proxy) - image_info = None - if image: - image_info = await upload_image(session, image, tone, proxy) + image_response = await upload_image(session, image, tone, proxy) if image else None + if image_response: + yield image_response try: async with session.ws_connect( 'wss://sydney.bing.com/sydney/ChatHub', @@ -274,7 +275,7 @@ async def stream_generate( ) as wss: await wss.send_str(format_message({'protocol': 'json', 'version': 1})) await wss.receive(timeout=timeout) - await wss.send_str(create_message(conversation, prompt, tone, context, image_info, web_search, gpt4_turbo)) + await wss.send_str(create_message(conversation, prompt, tone, context, image_response, web_search, gpt4_turbo)) response_txt = '' returned_text = '' @@ -290,6 +291,7 @@ async def stream_generate( response = json.loads(obj) if response.get('type') == 1 and response['arguments'][0].get('messages'): message = response['arguments'][0]['messages'][0] + image_response = None if (message['contentOrigin'] != 'Apology'): if 'adaptiveCards' in message: card = message['adaptiveCards'][0]['body'][0] @@ -301,7 +303,7 @@ async def stream_generate( elif message.get('contentType') == "IMAGE": prompt = message.get('text') try: - response_txt += format_images_markdown(await create_images(session, prompt, proxy), prompt) + image_response = ImageResponse(await create_images(session, prompt, proxy), prompt) except: response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}" final = True @@ -310,6 +312,8 @@ async def stream_generate( if new != "\n": yield new returned_text = response_txt + if image_response: + yield image_response elif response.get('type') == 2: result = response['item']['result'] if result.get('error'): diff --git a/g4f/Provider/bing/upload_image.py b/g4f/Provider/bing/upload_image.py index d92451fa..1af902ef 100644 --- a/g4f/Provider/bing/upload_image.py +++ b/g4f/Provider/bing/upload_image.py @@ -6,7 +6,7 @@ import json import math from ...typing import ImageType from aiohttp import ClientSession -from ...image import to_image, process_image, to_base64 +from ...image import to_image, process_image, to_base64, ImageResponse image_config = { "maxImagePixels": 360000, @@ -19,7 +19,7 @@ async def upload_image( image: ImageType, tone: str, proxy: str = None -) -> dict: +) -> ImageResponse: image = to_image(image) width, height = image.size max_image_pixels = image_config['maxImagePixels'] @@ -55,7 +55,7 @@ async def upload_image( else "https://www.bing.com/images/blob?bcid=" + result['bcid'] ) - return result + return ImageResponse(result["imageUrl"], "", result) def build_image_upload_api_payload(image_bin: str, tone: str): payload = { diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 4b11aeaf..a790f0de 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -13,7 +13,6 @@ from ...webdriver import get_browser, get_driver_cookies from ...typing import AsyncResult, Messages from ...requests import StreamSession from ...image import to_image, to_bytes, ImageType, ImageResponse -from ... import debug models = { "gpt-3.5": "text-davinci-002-render-sha", @@ -242,9 +241,7 @@ class OpenaiChat(AsyncGeneratorProvider): json=data, headers={"Accept": "text/event-stream", **headers} ) as response: - try: - response.raise_for_status() - except: + if not response.ok: raise RuntimeError(f"Response {response.status_code}: {await response.text()}") try: last_message: int = 0 diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css index 59464272..2d4c9857 100644 --- a/g4f/gui/client/css/style.css +++ b/g4f/gui/client/css/style.css @@ -566,6 +566,7 @@ select { animation: blink 0.8s infinite; width: 7px; height: 15px; + display: inline-block; } @keyframes blink { diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index e763f52d..ccc9461b 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -104,7 +104,7 @@ const ask_gpt = async () => { </div> <div class="content" id="gpt_${window.token}"> <div class="provider"></div> - <div class="content_inner"><div id="cursor"></div></div> + <div class="content_inner"><span id="cursor"></span></div> </div> </div> `; @@ -168,7 +168,7 @@ const ask_gpt = async () => { } if (error) { console.error(error); - content_inner.innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider"; + content_inner.innerHTML += "<p>An error occured, please try again, if the problem persists, please use a other model or provider.</p>"; } else { html = markdown_render(text); html = html.substring(0, html.lastIndexOf('</p>')) + '<span id="cursor"></span></p>'; diff --git a/g4f/image.py b/g4f/image.py index 4a97247e..01664f4e 100644 --- a/g4f/image.py +++ b/g4f/image.py @@ -64,7 +64,6 @@ def get_orientation(image: Image.Image) -> int: def process_image(img: Image.Image, new_width: int, new_height: int) -> Image.Image: orientation = get_orientation(img) - new_img = Image.new("RGB", (new_width, new_height), color="#FFFFFF") if orientation: if orientation > 4: img = img.transpose(Image.FLIP_LEFT_RIGHT) @@ -74,8 +73,8 @@ def process_image(img: Image.Image, new_width: int, new_height: int) -> Image.Im img = img.transpose(Image.ROTATE_270) if orientation in [7, 8]: img = img.transpose(Image.ROTATE_90) - new_img.paste(img, (0, 0)) - return new_img + img.thumbnail((new_width, new_height)) + return img def to_base64(image: Image.Image, compression_rate: float) -> str: output_buffer = BytesIO() |