diff options
-rw-r--r-- | g4f/Provider/FlowGpt.py | 75 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 1 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/OpenaiChat.py | 2 | ||||
-rw-r--r-- | g4f/api/__init__.py | 5 | ||||
-rw-r--r-- | g4f/gui/client/css/style.css | 26 | ||||
-rw-r--r-- | g4f/gui/client/html/index.html | 13 | ||||
-rw-r--r-- | g4f/gui/client/js/chat.v1.js | 75 |
7 files changed, 169 insertions, 28 deletions
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py new file mode 100644 index 00000000..39192bf9 --- /dev/null +++ b/g4f/Provider/FlowGpt.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin + +class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://flowgpt.com/chat" + working = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + supports_message_history = True + default_model = "gpt-3.5-turbo" + models = [ + "gpt-4", + "gpt-3.5-turbo", + "gpt-3.5-long", + "google-gemini", + "claude-v2", + "llama2-13b" + ] + model_aliases = { + "gemini": "google-gemini", + "gemini-pro": "google-gemini" + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0", + "Accept": "*/*", + "Accept-Language": "en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": "https://flowgpt.com/", + "Content-Type": "application/json", + "Authorization": "Bearer null", + "Origin": "https://flowgpt.com", + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-site", + "TE": "trailers" + } + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "nsfw": False, + "question": messages[-1]["content"], + "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *messages[:-1]], + "system": kwargs.get("system_message", "You are helpful assistant. Follow the user's instructions carefully."), + "temperature": kwargs.get("temperature", 0.7), + "promptId": f"model-{model}", + "documentIds": [], + "chatFileDocumentIds": [], + "generateImage": False, + "generateAudio": False + } + async with session.post("https://backend-k8s.flowgpt.com/v2/chat-anonymous", json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk.strip(): + message = json.loads(chunk) + if "event" not in message: + continue + if message["event"] == "text": + yield message["data"]
\ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 270b6356..6cdc8806 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -31,6 +31,7 @@ from .ChatgptX import ChatgptX from .Chatxyz import Chatxyz from .DeepInfra import DeepInfra from .FakeGpt import FakeGpt +from .FlowGpt import FlowGpt from .FreeChatgpt import FreeChatgpt from .FreeGpt import FreeGpt from .GeekGpt import GeekGpt diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 001f5a3c..556c3d9b 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -336,7 +336,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): if cls._args is None and cookies is None: cookies = get_cookies("chat.openai.com", False) api_key = kwargs["access_token"] if "access_token" in kwargs else api_key - if api_key is None: + if api_key is None and cookies is not None: api_key = cookies["access_token"] if "access_token" in cookies else api_key if cls._args is None: cls._args = { diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index abfe3b04..f8d0b4af 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -86,9 +86,8 @@ class Api: auth_header = request.headers.get("Authorization") if auth_header is not None: config.api_key = auth_header.split(None, 1)[-1] - response = self.client.chat.completions.create( - **dict(config), + **config.dict(exclude_none=True), ignored=self.list_ignored_providers ) except Exception as e: @@ -121,7 +120,7 @@ class Api: def format_exception(e: Exception, config: ChatCompletionsConfig) -> str: last_provider = g4f.get_last_provider(True) return json.dumps({ - "error": {"message": f"ChatCompletionsError: {e.__class__.__name__}: {e}"}, + "error": {"message": f"{e.__class__.__name__}: {e}"}, "model": last_provider.get("model") if last_provider else config.model, "provider": last_provider.get("name") if last_provider else config.provider }) diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css index aab7e555..bd42280d 100644 --- a/g4f/gui/client/css/style.css +++ b/g4f/gui/client/css/style.css @@ -129,7 +129,7 @@ body { flex-direction: column; overflow: auto; overflow-wrap: break-word; - padding-bottom: 50px; + padding-bottom: 20px; } .conversation .user-input { @@ -291,7 +291,7 @@ body { .message .content { display: flex; flex-direction: column; - gap: 18px; + gap: 10px; } .message .content, @@ -343,6 +343,26 @@ body { display: block; } +.message .content .provider a, +.message .content .provider { + font-size: 12px; + text-decoration: none; +} + +.message .content .provider a { + font-weight: bold; +} + +.message .content .count { + font-size: 12px; +} + +.count_total { + font-size: 12px; + padding-left: 100px; + padding-top: 10px; +} + .new_convo { padding: 8px 12px; display: flex; @@ -367,7 +387,7 @@ body { .stop_generating, .regenerate { position: absolute; - bottom: 158px; + bottom: 122px; left: 50%; transform: translateX(-50%); z-index: 1000000; diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html index eaae7355..102a762e 100644 --- a/g4f/gui/client/html/index.html +++ b/g4f/gui/client/html/index.html @@ -29,10 +29,17 @@ } }; </script> - <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script> + <script id="MathJax-script" src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js" async></script> + <script type="module" src="https://cdn.jsdelivr.net/npm/mistral-tokenizer-js" async> + import mistralTokenizer from 'mistral-tokenizer-js' + </script> + <script type="module" src="https://belladoreai.github.io/llama-tokenizer-js/llama-tokenizer.js" async> + import llamaTokenizer from 'llama-tokenizer-js' + </script> + <script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script> <script> - const user_image = `<img src="/assets/img/user.png" alt="your avatar">`; - const gpt_image = `<img src="/assets/img/gpt.png" alt="your avatar">`; + const user_image = '<img src="/assets/img/user.png" alt="your avatar">'; + const gpt_image = '<img src="/assets/img/gpt.png" alt="your avatar">'; </script> <style> .hljs { diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 57af298b..9585ca98 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -11,7 +11,7 @@ const imageInput = document.querySelector('#image'); const cameraInput = document.querySelector('#camera'); const fileInput = document.querySelector('#file'); -let prompt_lock = false; +let prompt_lock = false; hljs.addPlugin(new CopyButtonPlugin()); @@ -120,16 +120,8 @@ const remove_cancel_button = async () => { }, 300); }; -const ask_gpt = async () => { - regenerate.classList.add(`regenerate-hidden`); - messages = await get_messages(window.conversation_id); - - // Remove history, if it is selected - if (document.getElementById('history')?.checked) { - messages = [messages[messages.length-1]] - } - - new_messages = []; +const filter_messages = (messages) => { + let new_messages = []; for (i in messages) { new_message = messages[i]; // Remove generated images from history @@ -143,6 +135,19 @@ const ask_gpt = async () => { new_messages.push(new_message) } } + return new_messages; +} + +const ask_gpt = async () => { + regenerate.classList.add(`regenerate-hidden`); + messages = await get_messages(window.conversation_id); + total_messages = messages.length; + + // Remove history, if it is selected + if (document.getElementById('history')?.checked) { + messages = [messages[messages.length-1]]; + } + messages = filter_messages(messages); window.scrollTo(0, 0); window.controller = new AbortController(); @@ -159,8 +164,11 @@ const ask_gpt = async () => { await new Promise((r) => setTimeout(r, 500)); window.scrollTo(0, 0); + el = message_box.querySelector('.count_total'); + el ? el.parentElement.removeChild(el) : null; + message_box.innerHTML += ` - <div class="message" data-index="${new_messages.length}"> + <div class="message" data-index="${total_messages}"> <div class="assistant"> ${gpt_image} <i class="fa-solid fa-xmark"></i> @@ -186,7 +194,7 @@ const ask_gpt = async () => { web_search: document.getElementById(`switch`).checked, provider: provider.options[provider.selectedIndex].value, patch_provider: document.getElementById('patch').checked, - messages: new_messages + messages: messages }); const headers = { accept: 'text/event-stream' @@ -240,7 +248,7 @@ const ask_gpt = async () => { } else { html = markdown_render(text); let lastElement, lastIndex = null; - for (element of ['</p>', '</code></pre>', '</li>\n</ol>']) { + for (element of ['</p>', '</code></pre>', '</li>\n</ol>', '</li>\n</ul>']) { const index = html.lastIndexOf(element) if (index > lastIndex) { lastElement = element; @@ -278,8 +286,9 @@ const ask_gpt = async () => { let cursorDiv = document.getElementById(`cursor`); if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv); if (text) { - add_message(window.conversation_id, "assistant", text, provider); + await add_message(window.conversation_id, "assistant", text, provider); } + await load_conversation(window.conversation_id); message_box.scrollTop = message_box.scrollHeight; await remove_cancel_button(); await register_remove_message(); @@ -372,10 +381,16 @@ const load_conversation = async (conversation_id) => { let elements = ""; for (i in messages) { let item = messages[i]; - let provider = item.provider ? ` + let next_i = parseInt(i) + 1; + let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null); + let tokens_count = next_provider?.model ? count_tokens(next_provider.model, item.content) : ""; + let append_count = tokens_count ? `, ${tokens_count} tokens` : ""; + let words_count = `(${count_words(item.content)} words${append_count})` + let provider_link = item?.provider?.name ? `<a href="${item?.provider?.url}" target="_blank">${item.provider.name}</a>` : ""; + let provider = provider_link ? ` <div class="provider"> - <a href="${item.provider.url}" target="_blank">${item.provider.name}</a> - ${item.provider.model ? ' with ' + item.provider.model : ''} + ${provider_link} + ${item.provider.model ? ' with ' + item.provider.model : ''} </div> ` : ""; elements += ` @@ -391,10 +406,17 @@ const load_conversation = async (conversation_id) => { <div class="content"> ${provider} <div class="content_inner">${markdown_render(item.content)}</div> + <div class="count">${words_count}</div> </div> </div> `; } + + let count_total = GPTTokenizer_cl100k_base?.encodeChat(filter_messages(messages), "gpt-3.5-turbo").length + if (count_total > 0) { + elements += `<div class="count_total">(${count_total} tokens used)</div>`; + } + message_box.innerHTML = elements; await register_remove_message(); @@ -407,6 +429,23 @@ const load_conversation = async (conversation_id) => { }, 500); }; +function count_words(text) { + var matches = text.match(/[\w\d\’\'-]+/gi); + return matches ? matches.length : 0; +} + +function count_tokens(model, text) { + if (model.startsWith("gpt-3") || model.startsWith("gpt-4")) { + return GPTTokenizer_cl100k_base?.encode(text).length + } + if (model.startsWith("llama2") || model.startsWith("codellama")) { + return llamaTokenizer?.encode(text).length + } + if (model.startsWith("mistral") || model.startsWith("mixtral")) { + return mistralTokenizer?.encode(text).length + } +} + const get_conversation = async (conversation_id) => { let conversation = await JSON.parse( localStorage.getItem(`conversation:${conversation_id}`) |