summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-03-19 18:48:32 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-03-19 18:48:32 +0100
commit486e43dabd1198e6f3c94a4040fc01f5b2fe824c (patch)
treed328bf845d35e875c3c10964ac3b44d047ac352c
parentFix abort in webview (diff)
downloadgpt4free-486e43dabd1198e6f3c94a4040fc01f5b2fe824c.tar
gpt4free-486e43dabd1198e6f3c94a4040fc01f5b2fe824c.tar.gz
gpt4free-486e43dabd1198e6f3c94a4040fc01f5b2fe824c.tar.bz2
gpt4free-486e43dabd1198e6f3c94a4040fc01f5b2fe824c.tar.lz
gpt4free-486e43dabd1198e6f3c94a4040fc01f5b2fe824c.tar.xz
gpt4free-486e43dabd1198e6f3c94a4040fc01f5b2fe824c.tar.zst
gpt4free-486e43dabd1198e6f3c94a4040fc01f5b2fe824c.zip
-rw-r--r--.gitignore3
-rw-r--r--g4f/Provider/Bing.py26
-rw-r--r--g4f/Provider/bing/conversation.py3
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py2
-rw-r--r--g4f/gui/client/index.html6
-rw-r--r--g4f/gui/client/static/css/style.css19
-rw-r--r--g4f/gui/client/static/js/chat.v1.js73
-rw-r--r--g4f/gui/server/android_gallery.py67
-rw-r--r--g4f/gui/server/api.py89
-rw-r--r--g4f/gui/webview.py4
-rw-r--r--requirements-min.txt3
-rw-r--r--setup.py1
12 files changed, 243 insertions, 53 deletions
diff --git a/.gitignore b/.gitignore
index b6c333e2..71d27a86 100644
--- a/.gitignore
+++ b/.gitignore
@@ -52,4 +52,5 @@ x.py
info.txt
local.py
*.gguf
-image.py \ No newline at end of file
+image.py
+.buildozer \ No newline at end of file
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 7ff4d74b..0a0d2634 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -11,7 +11,7 @@ from aiohttp import ClientSession, ClientTimeout, BaseConnector, WSMsgType
from ..typing import AsyncResult, Messages, ImageType, Cookies
from ..image import ImageRequest
-from ..errors import ResponseStatusError
+from ..errors import ResponseStatusError, RateLimitError
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector, get_random_hex
from .bing.upload_image import upload_image
@@ -26,7 +26,7 @@ class Tones:
creative = "Creative"
balanced = "Balanced"
precise = "Precise"
- copilot = "Balanced"
+ copilot = "Copilot"
class Bing(AsyncGeneratorProvider, ProviderModelMixin):
"""
@@ -36,8 +36,8 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_gpt_4 = True
- default_model = "balanced"
- models = [key for key in Tones.__dict__ if not key.startswith("__")]
+ default_model = "Balanced"
+ models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")]
@classmethod
def create_async_generator(
@@ -72,7 +72,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
context = create_context(messages[:-1]) if len(messages) > 1 else None
if tone is None:
tone = tone if model.startswith("gpt-4") else model
- tone = cls.get_model("" if tone is None else tone.lower())
+ tone = cls.get_model("" if tone is None else tone)
gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
return stream_generate(
@@ -258,7 +258,6 @@ class Defaults:
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': home,
- 'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
}
@@ -311,7 +310,7 @@ def create_message(
"allowedMessageTypes": Defaults.allowedMessageTypes,
"sliceIds": Defaults.sliceIds[tone],
"verbosity": "verbose",
- "scenario": "CopilotMicrosoftCom" if tone == "copilot" else "SERP",
+ "scenario": "CopilotMicrosoftCom" if tone == Tones.copilot else "SERP",
"plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [],
"traceId": get_random_hex(40),
"conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
@@ -329,7 +328,7 @@ def create_message(
"requestId": request_id,
"messageId": request_id
},
- "tone": getattr(Tones, tone),
+ "tone": "Balanced" if tone == Tones.copilot else tone,
"spokenTextMode": "None",
"conversationId": conversation.conversationId,
"participant": {"id": conversation.clientId}
@@ -412,10 +411,15 @@ async def stream_generate(
await asyncio.sleep(sleep_retry)
continue
- image_request = await upload_image(session, image, getattr(Tones, tone), headers) if image else None
+ image_request = await upload_image(
+ session,
+ image,
+ "Balanced" if Tones.copilot == "Copilot" else tone,
+ headers
+ ) if image else None
async with session.ws_connect(
'wss://s.copilot.microsoft.com/sydney/ChatHub'
- if tone == "copilot" else
+ if tone == "Copilot" else
'wss://sydney.bing.com/sydney/ChatHub',
autoping=False,
params={'sec_access_token': conversation.conversationSignature},
@@ -481,7 +485,7 @@ async def stream_generate(
max_retries -= 1
if max_retries < 1:
if result["value"] == "CaptchaChallenge":
- raise RuntimeError(f"{result['value']}: Use other cookies or/and ip address")
+ raise RateLimitError(f"{result['value']}: Use other cookies or/and ip address")
else:
raise RuntimeError(f"{result['value']}: {result['message']}")
if debug.logging:
diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py
index 886efa68..de5716b7 100644
--- a/g4f/Provider/bing/conversation.py
+++ b/g4f/Provider/bing/conversation.py
@@ -2,6 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession
from ...requests import raise_for_status
+from ...errors import RateLimitError
class Conversation:
"""
@@ -36,6 +37,8 @@ async def create_conversation(session: ClientSession, headers: dict, tone: str)
else:
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
async with session.get(url, headers=headers) as response:
+ if response.status == 404:
+ raise RateLimitError("Response 404: Do less requests and reuse conversations")
await raise_for_status(response, "Failed to create conversation")
data = await response.json()
conversationId = data.get('conversationId')
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 6601f500..8a5a03d4 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -450,7 +450,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as response:
cls._update_request_args(session)
await raise_for_status(response)
- async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields, websocket_request_id):
+ async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields):
if response_fields:
response_fields = False
yield fields
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 9ce6b66a..6b9b1ab9 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -133,15 +133,15 @@
<div class="box input-box">
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
style="white-space: pre-wrap;resize: none;"></textarea>
- <label for="image" title="Works with Bing, Gemini, OpenaiChat and You">
+ <label class="file-label" for="image" title="Works with Bing, Gemini, OpenaiChat and You">
<input type="file" id="image" name="image" accept="image/*" required/>
<i class="fa-regular fa-image"></i>
</label>
- <label for="camera">
+ <label class="file-label" for="camera">
<input type="file" id="camera" name="camera" accept="image/*" capture="camera" required/>
<i class="fa-solid fa-camera"></i>
</label>
- <label for="file">
+ <label class="file-label" for="file">
<input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/>
<i class="fa-solid fa-paperclip"></i>
</label>
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index 936df0d2..28064159 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -482,25 +482,18 @@ body {
display: none;
}
-label[for="image"]:has(> input:valid){
- color: var(--accent);
-}
-
-label[for="camera"]:has(> input:valid){
- color: var(--accent);
-}
-
-label[for="file"]:has(> input:valid){
- color: var(--accent);
-}
-
-label[for="image"], label[for="file"], label[for="camera"] {
+.file-label {
cursor: pointer;
position: absolute;
top: 10px;
left: 10px;
}
+.file-label:has(> input:valid),
+.file-label.selected {
+ color: var(--accent);
+}
+
label[for="image"] {
top: 32px;
}
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 0da72988..bcef4a78 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -211,7 +211,7 @@ async function add_message_chunk(message) {
${message.provider.model ? ' with ' + message.provider.model : ''}
`
} else if (message.type == "message") {
- console.error(messag.message)
+ console.error(message.message)
} else if (message.type == "error") {
window.error = message.error
console.error(message.error);
@@ -240,6 +240,27 @@ async function add_message_chunk(message) {
}
}
+cameraInput?.addEventListener("click", (e) => {
+ if (window?.pywebview) {
+ e.preventDefault();
+ pywebview.api.choose_file();
+ }
+})
+
+cameraInput?.addEventListener("click", (e) => {
+ if (window?.pywebview) {
+ e.preventDefault();
+ pywebview.api.take_picture();
+ }
+})
+
+imageInput?.addEventListener("click", (e) => {
+ if (window?.pywebview) {
+ e.preventDefault();
+ pywebview.api.choose_image();
+ }
+})
+
const ask_gpt = async () => {
regenerate.classList.add(`regenerate-hidden`);
messages = await get_messages(window.conversation_id);
@@ -307,8 +328,7 @@ const ask_gpt = async () => {
console.error(e);
if (e.name != "AbortError") {
error = true;
- text = "oops ! something went wrong, please try again / reload. [stacktrace in console]";
- content_inner.innerHTML = text;
+ content_inner.innerHTML += `<p><strong>An error occured:</strong> ${e}</p>`;
}
}
if (!error && text) {
@@ -592,7 +612,7 @@ document.getElementById("cancelButton").addEventListener("click", async () => {
console.log(`aborted ${window.conversation_id}`);
});
-document.getElementById(`regenerateButton`).addEventListener(`click`, async () => {
+document.getElementById("regenerateButton").addEventListener("click", async () => {
prompt_lock = true;
await hide_last_message(window.conversation_id);
window.token = message_id();
@@ -622,14 +642,20 @@ const message_id = () => {
async function hide_sidebar() {
sidebar.classList.remove("shown");
sidebar_button.classList.remove("rotated");
+ if (window.location.pathname == "/menu/") {
+ history.back();
+ }
}
+window.addEventListener('popstate', hide_sidebar, false);
+
sidebar_button.addEventListener("click", (event) => {
if (sidebar.classList.contains("shown")) {
hide_sidebar();
} else {
sidebar.classList.add("shown");
sidebar_button.classList.add("rotated");
+ history.pushState({}, null, "/menu/");
}
window.scrollTo(0, 0);
});
@@ -817,19 +843,6 @@ async function on_api() {
register_settings_storage();
- versions = await api("version");
- document.title = 'g4f - ' + versions["version"];
- let text = "version ~ "
- if (versions["version"] != versions["latest_version"]) {
- let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
- let title = `New version: ${versions["latest_version"]}`;
- text += `<a href="${release_url}" target="_blank" title="${title}">${versions["version"]}</a> `;
- text += `<i class="fa-solid fa-rotate"></i>`
- } else {
- text += versions["version"];
- }
- document.getElementById("version_text").innerHTML = text
-
models = await api("models");
models.forEach((model) => {
let option = document.createElement("option");
@@ -845,8 +858,24 @@ async function on_api() {
})
await load_provider_models(appStorage.getItem("provider"));
- load_settings_storage()
+ await load_settings_storage()
+}
+
+async function load_version() {
+ const versions = await api("version");
+ document.title = 'g4f - ' + versions["version"];
+ let text = "version ~ "
+ if (versions["version"] != versions["latest_version"]) {
+ let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
+ let title = `New version: ${versions["latest_version"]}`;
+ text += `<a href="${release_url}" target="_blank" title="${title}">${versions["version"]}</a> `;
+ text += `<i class="fa-solid fa-rotate"></i>`
+ } else {
+ text += versions["version"];
+ }
+ document.getElementById("version_text").innerHTML = text
}
+setTimeout(load_version, 5000);
for (const el of [imageInput, cameraInput]) {
el.addEventListener('click', async () => {
@@ -913,13 +942,13 @@ function get_selected_model() {
async function api(ressource, args=null, file=null) {
if (window?.pywebview) {
- if (args) {
+ if (args !== null) {
if (ressource == "models") {
ressource = "provider_models";
}
- return pywebview.api["get_" + ressource](args);
+ return pywebview.api[`get_${ressource}`](args);
}
- return pywebview.api["get_" + ressource]();
+ return pywebview.api[`get_${ressource}`]();
}
if (ressource == "models" && args) {
ressource = `${ressource}/${args}`;
@@ -930,7 +959,7 @@ async function api(ressource, args=null, file=null) {
const headers = {
accept: 'text/event-stream'
}
- if (file) {
+ if (file !== null) {
const formData = new FormData();
formData.append('file', file);
formData.append('json', body);
diff --git a/g4f/gui/server/android_gallery.py b/g4f/gui/server/android_gallery.py
new file mode 100644
index 00000000..9101bc02
--- /dev/null
+++ b/g4f/gui/server/android_gallery.py
@@ -0,0 +1,67 @@
+from kivy.logger import Logger
+from kivy.clock import Clock
+
+from jnius import autoclass
+from jnius import cast
+from android import activity
+
+PythonActivity = autoclass('org.kivy.android.PythonActivity')
+Intent = autoclass('android.content.Intent')
+Uri = autoclass('android.net.Uri')
+
+MEDIA_DATA = "_data"
+RESULT_LOAD_IMAGE = 1
+
+Activity = autoclass('android.app.Activity')
+
+def user_select_image(on_selection):
+ """Open Gallery Activity and call callback with absolute image filepath of image user selected.
+ None if user canceled.
+ """
+
+ currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
+
+ # Forum discussion: https://groups.google.com/forum/#!msg/kivy-users/bjsG2j9bptI/-Oe_aGo0newJ
+ def on_activity_result(request_code, result_code, intent):
+ if request_code != RESULT_LOAD_IMAGE:
+ Logger.warning('user_select_image: ignoring activity result that was not RESULT_LOAD_IMAGE')
+ return
+
+ if result_code == Activity.RESULT_CANCELED:
+ Clock.schedule_once(lambda dt: on_selection(None), 0)
+ return
+
+ if result_code != Activity.RESULT_OK:
+ # This may just go into the void...
+ raise NotImplementedError('Unknown result_code "{}"'.format(result_code))
+
+ selectedImage = intent.getData(); # Uri
+ filePathColumn = [MEDIA_DATA]; # String[]
+ # Cursor
+ cursor = currentActivity.getContentResolver().query(selectedImage,
+ filePathColumn, None, None, None);
+ cursor.moveToFirst();
+
+ # int
+ columnIndex = cursor.getColumnIndex(filePathColumn[0]);
+ # String
+ picturePath = cursor.getString(columnIndex);
+ cursor.close();
+ Logger.info('android_ui: user_select_image() selected %s', picturePath)
+
+ # This is possibly in a different thread?
+ Clock.schedule_once(lambda dt: on_selection(picturePath), 0)
+
+ # See: http://pyjnius.readthedocs.org/en/latest/android.html
+ activity.bind(on_activity_result=on_activity_result)
+
+ intent = Intent()
+
+ # http://programmerguru.com/android-tutorial/how-to-pick-image-from-gallery/
+ # http://stackoverflow.com/questions/18416122/open-gallery-app-in-android
+ intent.setAction(Intent.ACTION_PICK)
+ # TODO internal vs external?
+ intent.setData(Uri.parse('content://media/internal/images/media'))
+ # TODO setType(Image)?
+
+ currentActivity.startActivityForResult(intent, RESULT_LOAD_IMAGE) \ No newline at end of file
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 4dfc43d4..43bb4250 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -1,11 +1,40 @@
+from __future__ import annotations
+
import logging
import json
+import os.path
from typing import Iterator
+from uuid import uuid4
+from functools import partial
try:
import webview
+ import platformdirs
except ImportError:
...
+try:
+ from plyer import camera
+ from plyer import filechooser
+ has_plyer = True
+except ImportError:
+ has_plyer = False
+try:
+ from android.runnable import run_on_ui_thread
+ from android.storage import app_storage_path
+ from android.permissions import request_permissions, Permission
+ from android.permissions import _RequestPermissionsManager
+ _RequestPermissionsManager.register_callback()
+ from .android_gallery import user_select_image
+ has_android = True
+except ImportError:
+ run_on_ui_thread = lambda a : a
+ app_storage_path = platformdirs.user_pictures_dir
+ user_select_image = partial(
+ filechooser.open_file,
+ path=platformdirs.user_pictures_dir(),
+ filters=[["Image", "*.jpg", "*.jpeg", "*.png", "*.webp", "*.svg"]],
+ )
+ has_android = False
from g4f import version, models
from g4f import get_last_provider, ChatCompletion
@@ -75,13 +104,71 @@ class Api():
return {'title': ''}
def get_conversation(self, options: dict, **kwargs) -> Iterator:
- window = webview.active_window()
+ window = webview.windows[0]
+ if hasattr(self, "image") and self.image is not None:
+ kwargs["image"] = open(self.image, "rb")
for message in self._create_response_stream(
self._prepare_conversation_kwargs(options, kwargs),
options.get("conversation_id")
):
if not window.evaluate_js(f"if (!this.abort) this.add_message_chunk({json.dumps(message)}); !this.abort && !this.error;"):
break
+ self.image = None
+ self.set_selected(None)
+
+ @run_on_ui_thread
+ def choose_file(self):
+ self.request_permissions()
+ filechooser.open_file(
+ path=platformdirs.user_pictures_dir(),
+ on_selection=print
+ )
+
+ @run_on_ui_thread
+ def choose_image(self):
+ self.request_permissions()
+ user_select_image(
+ on_selection=self.on_image_selection
+ )
+
+ @run_on_ui_thread
+ def take_picture(self):
+ self.request_permissions()
+ filename = os.path.join(app_storage_path(), f"chat-{uuid4()}.png")
+ camera.take_picture(filename=filename, on_complete=self.on_camera)
+
+ def on_image_selection(self, filename):
+ if filename is not None and os.path.exists(filename):
+ self.image = filename
+ else:
+ self.image = None
+ self.set_selected(None if self.image is None else "image")
+
+ def on_camera(self, filename):
+ if filename is not None and os.path.exists(filename):
+ self.image = filename
+ else:
+ self.image = None
+ self.set_selected(None if self.image is None else "camera")
+
+ def set_selected(self, input_id: str = None):
+ window = webview.windows[0]
+ if window is not None:
+ window.evaluate_js(
+ f"document.querySelector(`.file-label.selected`)?.classList.remove(`selected`);"
+ )
+ if input_id is not None and input_id in ("image", "camera"):
+ window.evaluate_js(
+ f'document.querySelector(`label[for="{input_id}"]`)?.classList.add(`selected`);'
+ )
+
+ def request_permissions(self):
+ if has_android:
+ request_permissions([
+ Permission.CAMERA,
+ Permission.READ_EXTERNAL_STORAGE,
+ Permission.WRITE_EXTERNAL_STORAGE
+ ])
def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
"""
diff --git a/g4f/gui/webview.py b/g4f/gui/webview.py
index ba764947..36ad0e60 100644
--- a/g4f/gui/webview.py
+++ b/g4f/gui/webview.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import sys
import os.path
import webview
@@ -20,6 +22,8 @@ def run_webview(
dirname = sys._MEIPASS
else:
dirname = os.path.dirname(__file__)
+ webview.settings['OPEN_EXTERNAL_LINKS_IN_BROWSER'] = False
+ webview.settings['ALLOW_DOWNLOADS'] = True
webview.create_window(
f"g4f - {g4f.version.utils.current_version}",
os.path.join(dirname, "client/index.html"),
diff --git a/requirements-min.txt b/requirements-min.txt
index a0402ccc..512ab5bb 100644
--- a/requirements-min.txt
+++ b/requirements-min.txt
@@ -1,2 +1,3 @@
requests
-aiohttp \ No newline at end of file
+aiohttp
+brotli \ No newline at end of file
diff --git a/setup.py b/setup.py
index e653620a..7ab9532a 100644
--- a/setup.py
+++ b/setup.py
@@ -11,6 +11,7 @@ with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
INSTALL_REQUIRE = [
"requests",
"aiohttp",
+ "brotli"
]
EXTRA_REQUIRE = {