summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--.gitignore3
-rw-r--r--README.md7
-rw-r--r--g4f/Provider/needs_auth/Gemini.py9
-rw-r--r--g4f/client/async_client.py3
-rw-r--r--g4f/gui/client/index.html4
-rw-r--r--g4f/gui/client/static/css/style.css42
-rw-r--r--g4f/gui/client/static/js/chat.v1.js24
-rw-r--r--g4f/gui/server/api.py46
-rw-r--r--g4f/gui/server/backend.py8
-rw-r--r--generated_images/.gitkeep0
10 files changed, 109 insertions, 37 deletions
diff --git a/.gitignore b/.gitignore
index 3803a710..8d678a0f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -64,4 +64,5 @@ dist.py
x.txt
bench.py
to-reverse.txt
-g4f/Provider/OpenaiChat2.py \ No newline at end of file
+g4f/Provider/OpenaiChat2.py
+generated_images/ \ No newline at end of file
diff --git a/README.md b/README.md
index 65baf7ad..ddcaf318 100644
--- a/README.md
+++ b/README.md
@@ -92,7 +92,12 @@ As per the survey, here is a list of improvements to come
```sh
docker pull hlohaus789/g4f
-docker run -p 8080:8080 -p 1337:1337 -p 7900:7900 --shm-size="2g" -v ${PWD}/har_and_cookies:/app/har_and_cookies hlohaus789/g4f:latest
+docker run \
+ -p 8080:8080 -p 1337:1337 -p 7900:7900 \
+ --shm-size="2g" \
+ -v ${PWD}/har_and_cookies:/app/har_and_cookies \
+ -v ${PWD}/generated_images:/app/generated_images \
+ hlohaus789/g4f:latest
```
3. **Access the Client:**
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index f9b1c4a5..71cc8d81 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -4,7 +4,6 @@ import os
import json
import random
import re
-import base64
from aiohttp import ClientSession, BaseConnector
@@ -193,14 +192,10 @@ class Gemini(AsyncGeneratorProvider):
yield content
if image_prompt:
images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
- resolved_images = []
if response_format == "b64_json":
- for image in images:
- async with client.get(image) as response:
- data = base64.b64encode(await response.content.read()).decode()
- resolved_images.append(data)
- yield ImageDataResponse(resolved_images, image_prompt)
+ yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
else:
+ resolved_images = []
preview = []
for image in images:
async with client.get(image, allow_redirects=False) as fetch:
diff --git a/g4f/client/async_client.py b/g4f/client/async_client.py
index 9849c565..dbfa6b70 100644
--- a/g4f/client/async_client.py
+++ b/g4f/client/async_client.py
@@ -171,7 +171,8 @@ async def iter_image_response(
if isinstance(chunk, ImageProviderResponse):
if response_format == "b64_json":
async with ClientSession(
- connector=get_connector(connector, proxy)
+ connector=get_connector(connector, proxy),
+ cookies=chunk.options.get("cookies")
) as session:
async def fetch_image(image):
async with session.get(image) as response:
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 3c428f38..a2f883d9 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -32,10 +32,10 @@
<script type="module" src="https://cdn.jsdelivr.net/npm/mistral-tokenizer-js" async>
import mistralTokenizer from "mistral-tokenizer-js"
</script>
- <script type="module" src="https://belladoreai.github.io/llama-tokenizer-js/llama-tokenizer.js" async>
+ <script type="module" src="https://cdn.jsdelivr.net/gh/belladoreai/llama-tokenizer-js@master/llama-tokenizer.js" async>
import llamaTokenizer from "llama-tokenizer-js"
</script>
- <script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
+ <script src="https://cdn.jsdelivr.net/npm/gpt-tokenizer/dist/cl100k_base.js" async></script>
<script src="/static/js/text_to_speech/index.js" async></script>
<!--
<script src="/static/js/whisper-web/index.js" async></script>
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index 200a79d4..e8ed0a6f 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -265,6 +265,14 @@ body {
padding-bottom: 0;
}
+.message.print {
+ height: 100%;
+ position: absolute;
+ background-color: #fff;
+ z-index: 100;
+ top: 0;
+}
+
.message.regenerate {
opacity: 0.75;
}
@@ -339,14 +347,14 @@ body {
flex-wrap: wrap;
}
-.message .content,
-.message .content a:link,
-.message .content a:visited{
+.message .content_inner,
+.message .content_inner a:link,
+.message .content_inner a:visited{
font-size: 15px;
line-height: 1.3;
color: var(--colour-3);
}
-.message .content pre{
+.message .content_inner pre{
white-space: pre-wrap;
}
@@ -389,19 +397,19 @@ body {
.message .count .fa-clipboard,
.message .count .fa-volume-high,
-.message .count .fa-rotate {
+.message .count .fa-rotate,
+.message .count .fa-print {
z-index: 1000;
cursor: pointer;
}
-.message .count .fa-clipboard {
+.message .count .fa-clipboard,
+.message .count .fa-whatsapp {
color: var(--colour-3);
}
-.message .count .fa-clipboard.clicked {
- color: var(--accent);
-}
-
+.message .count .fa-clipboard.clicked,
+.message .count .fa-print.clicked,
.message .count .fa-volume-high.active {
color: var(--accent);
}
@@ -1121,4 +1129,18 @@ a:-webkit-any-link {
50% {
opacity: 0;
}
+}
+
+@media print {
+ #systemPrompt:placeholder-shown,
+ .conversations,
+ .conversation .user-input,
+ .conversation .buttons,
+ .conversation .toolbar,
+ .conversation .slide-systemPrompt,
+ .message .count i,
+ .message .assistant,
+ .message .user {
+ display: none;
+ }
} \ No newline at end of file
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 47bfdd3b..9790b261 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -192,6 +192,26 @@ const register_message_buttons = async () => {
})
}
});
+ document.querySelectorAll(".message .fa-whatsapp").forEach(async (el) => {
+ if (!el.parentElement.href) {
+ const text = el.parentElement.parentElement.parentElement.innerText;
+ el.parentElement.href = `https://wa.me/?text=${encodeURIComponent(text)}`;
+ }
+ });
+ document.querySelectorAll(".message .fa-print").forEach(async (el) => {
+ if (!("click" in el.dataset)) {
+ el.dataset.click = "true";
+ el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement.parentElement;
+ el.classList.add("clicked");
+ message_box.scrollTop = 0;
+ message_el.classList.add("print");
+ setTimeout(() => el.classList.remove("clicked"), 1000);
+ setTimeout(() => message_el.classList.remove("print"), 1000);
+ window.print()
+ })
+ }
+ });
}
const delete_conversations = async () => {
@@ -253,6 +273,8 @@ const handle_ask = async () => {
${count_words_and_tokens(message, get_selected_model())}
<i class="fa-solid fa-volume-high"></i>
<i class="fa-regular fa-clipboard"></i>
+ <a><i class="fa-brands fa-whatsapp"></i></a>
+ <i class="fa-solid fa-print"></i>
</div>
</div>
</div>
@@ -625,6 +647,8 @@ const load_conversation = async (conversation_id, scroll=true) => {
${count_words_and_tokens(item.content, next_provider?.model)}
<i class="fa-solid fa-volume-high"></i>
<i class="fa-regular fa-clipboard"></i>
+ <a><i class="fa-brands fa-whatsapp"></i></a>
+ <i class="fa-solid fa-print"></i>
</div>
</div>
</div>
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 020b2090..3da0fe17 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -1,18 +1,27 @@
from __future__ import annotations
import logging
-import json
-from typing import Iterator
+import os
+import os.path
+import uuid
+import asyncio
+import time
+from aiohttp import ClientSession
+from typing import Iterator, Optional
+from flask import send_from_directory
from g4f import version, models
from g4f import get_last_provider, ChatCompletion
from g4f.errors import VersionNotFoundError
-from g4f.image import ImagePreview
+from g4f.typing import Cookies
+from g4f.image import ImagePreview, ImageResponse, is_accepted_format
+from g4f.requests.aiohttp import get_connector
from g4f.Provider import ProviderType, __providers__, __map__
from g4f.providers.base_provider import ProviderModelMixin, FinishReason
from g4f.providers.conversation import BaseConversation
conversations: dict[dict[str, BaseConversation]] = {}
+images_dir = "./generated_images"
class Api():
@@ -110,14 +119,8 @@ class Api():
"latest_version": version.utils.latest_version,
}
- def generate_title(self):
- """
- Generates and returns a title based on the request data.
-
- Returns:
- dict: A dictionary with the generated title.
- """
- return {'title': ''}
+ def serve_images(self, name):
+ return send_from_directory(os.path.abspath(images_dir), name)
def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
"""
@@ -185,6 +188,27 @@ class Api():
yield self._format_json("message", get_error_message(chunk))
elif isinstance(chunk, ImagePreview):
yield self._format_json("preview", chunk.to_string())
+ elif isinstance(chunk, ImageResponse):
+ async def copy_images(images: list[str], cookies: Optional[Cookies] = None):
+ async with ClientSession(
+ connector=get_connector(None, os.environ.get("G4F_PROXY")),
+ cookies=cookies
+ ) as session:
+ async def copy_image(image):
+ async with session.get(image) as response:
+ target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
+ with open(target, "wb") as f:
+ async for chunk in response.content.iter_any():
+ f.write(chunk)
+ with open(target, "rb") as f:
+ extension = is_accepted_format(f.read(12)).split("/")[-1]
+ extension = "jpg" if extension == "jpeg" else extension
+ new_target = f"{target}.{extension}"
+ os.rename(target, new_target)
+ return f"/images/{os.path.basename(new_target)}"
+ return await asyncio.gather(*[copy_image(image) for image in images])
+ images = asyncio.run(copy_images(chunk.get_list(), chunk.options.get("cookies")))
+ yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
elif not isinstance(chunk, FinishReason):
yield self._format_json("content", str(chunk))
except Exception as e:
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index d9e31c0e..dc1b1080 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -47,13 +47,13 @@ class Backend_Api(Api):
'function': self.handle_conversation,
'methods': ['POST']
},
- '/backend-api/v2/gen.set.summarize:title': {
- 'function': self.generate_title,
- 'methods': ['POST']
- },
'/backend-api/v2/error': {
'function': self.handle_error,
'methods': ['POST']
+ },
+ '/images/<path:name>': {
+ 'function': self.serve_images,
+ 'methods': ['GET']
}
}
diff --git a/generated_images/.gitkeep b/generated_images/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/generated_images/.gitkeep