summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--g4f/Provider/GeminiPro.py16
-rw-r--r--g4f/Provider/Liaobots.py2
-rw-r--r--g4f/gui/client/js/chat.v1.js37
3 files changed, 30 insertions, 25 deletions
diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/GeminiPro.py
index 792cd5d1..87ded3ac 100644
--- a/g4f/Provider/GeminiPro.py
+++ b/g4f/Provider/GeminiPro.py
@@ -32,17 +32,20 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
model = "gemini-pro-vision" if not model and image else model
model = cls.get_model(model)
- if not api_key and not api_base:
- raise MissingAuthError('Missing "api_key" or "api_base"')
+ if not api_key:
+ raise MissingAuthError('Missing "api_key"')
if not api_base:
api_base = f"https://generativelanguage.googleapis.com/v1beta"
method = "streamGenerateContent" if stream else "generateContent"
url = f"{api_base.rstrip('/')}/models/{model}:{method}"
- if api_key:
+ headers = None
+ if api_base:
+ headers = {f"Authorization": "Bearer {api_key}"}
+ else:
url += f"?key={api_key}"
- async with ClientSession() as session:
+ async with ClientSession(headers=headers) as session:
contents = [
{
"role": "model" if message["role"] == "assistant" else message["role"],
@@ -79,12 +82,11 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
lines = [b"{\n"]
elif chunk == b",\r\n" or chunk == b"]":
try:
- data = b"".join(lines)
- data = json.loads(data)
+ data = json.loads(b"".join(lines))
yield data["candidates"][0]["content"]["parts"][0]["text"]
except:
data = data.decode() if isinstance(data, bytes) else data
- raise RuntimeError(f"Read chunk failed. data: {data}")
+ raise RuntimeError(f"Read chunk failed: {data}")
lines = []
else:
lines.append(chunk)
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index e93642ba..54bf7f2e 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -78,7 +78,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-3.5-turbo"
- models = [m for m in models]
+ models = list(models)
model_aliases = {
"claude-v2": "claude-2"
}
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js
index e5b2d653..c727dbf9 100644
--- a/g4f/gui/client/js/chat.v1.js
+++ b/g4f/gui/client/js/chat.v1.js
@@ -3,7 +3,6 @@ const markdown = window.markdownit();
const message_box = document.getElementById(`messages`);
const message_input = document.getElementById(`message-input`);
const box_conversations = document.querySelector(`.top`);
-const spinner = box_conversations.querySelector(".spinner");
const stop_generating = document.querySelector(`.stop_generating`);
const regenerate = document.querySelector(`.regenerate`);
const send_button = document.querySelector(`#send-button`);
@@ -71,6 +70,7 @@ const handle_ask = async () => {
message_input.style.height = `82px`;
message_input.focus();
window.scrollTo(0, 0);
+
message = message_input.value
if (message.length > 0) {
message_input.value = '';
@@ -292,13 +292,16 @@ const ask_gpt = async () => {
if (!error) {
await add_message(window.conversation_id, "assistant", text, provider);
await load_conversation(window.conversation_id);
+ } else {
+ let cursorDiv = document.getElementById(`cursor`);
+ if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
}
message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button();
await register_remove_message();
prompt_lock = false;
window.scrollTo(0, 0);
- await load_conversations(20, 0);
+ await load_conversations();
regenerate.classList.remove(`regenerate-hidden`);
};
@@ -357,7 +360,7 @@ const delete_conversation = async (conversation_id) => {
await new_conversation();
}
- await load_conversations(20, 0, true);
+ await load_conversations();
};
const set_conversation = async (conversation_id) => {
@@ -366,7 +369,7 @@ const set_conversation = async (conversation_id) => {
await clear_conversation();
await load_conversation(conversation_id);
- await load_conversations(20, 0, true);
+ await load_conversations();
};
const new_conversation = async () => {
@@ -374,7 +377,7 @@ const new_conversation = async () => {
window.conversation_id = uuid();
await clear_conversation();
- await load_conversations(20, 0, true);
+ await load_conversations();
await say_hello()
};
@@ -439,14 +442,14 @@ function count_words(text) {
}
function count_tokens(model, text) {
- if (model.startsWith("gpt-3") || model.startsWith("gpt-4")) {
- return GPTTokenizer_cl100k_base?.encode(text).length
+ if (model.startsWith("gpt-3") || model.startsWith("gpt-4") || model.startsWith("text-davinci")) {
+ return GPTTokenizer_cl100k_base?.encode(text).length;
}
if (model.startsWith("llama2") || model.startsWith("codellama")) {
- return llamaTokenizer?.encode(text).length
+ return llamaTokenizer?.encode(text).length;
}
if (model.startsWith("mistral") || model.startsWith("mixtral")) {
- return mistralTokenizer?.encode(text).length
+ return mistralTokenizer?.encode(text).length;
}
}
@@ -530,7 +533,7 @@ const add_message = async (conversation_id, role, content, provider) => {
return conversation.items.length - 1;
};
-const load_conversations = async (limit, offset, loader) => {
+const load_conversations = async () => {
let conversations = [];
for (let i = 0; i < localStorage.length; i++) {
if (localStorage.key(i).startsWith("conversation:")) {
@@ -554,7 +557,6 @@ const load_conversations = async (limit, offset, loader) => {
</div>
`;
}
-
};
document.getElementById(`cancelButton`).addEventListener(`click`, async () => {
@@ -697,10 +699,8 @@ window.onload = async () => {
}
}
- if (conversations == 0) localStorage.clear();
-
await setTimeout(() => {
- load_conversations(20, 0);
+ load_conversations();
}, 1);
if (/\/chat\/.+/.test(window.location.href)) {
@@ -780,15 +780,17 @@ observer.observe(message_input, { attributes: true });
versions = await response.json()
document.title = 'g4f - gui - ' + versions["version"];
- text = "version ~ "
+ let text = "version ~ "
if (versions["version"] != versions["latest_version"]) {
- release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
- text += '<a href="' + release_url +'" target="_blank" title="New version: ' + versions["latest_version"] +'">' + versions["version"] + ' 🆕</a>';
+ let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
+ let title = `New version: ${versions["latest_version"]}`;
+ text += `<a href="${release_url}" target="_blank" title="${title}">${versions["version"]} 🆕</a>`;
} else {
text += versions["version"];
}
document.getElementById("version_text").innerHTML = text
})()
+
for (const el of [imageInput, cameraInput]) {
el.addEventListener('click', async () => {
el.value = '';
@@ -798,6 +800,7 @@ for (const el of [imageInput, cameraInput]) {
}
});
}
+
fileInput.addEventListener('click', async (event) => {
fileInput.value = '';
delete fileInput.dataset.text;