From 2064bb7355163ff531280d38059ae2e48b19a9ff Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 12 Jan 2025 03:07:42 +0100 Subject: Add CohereForAI provider, Updates for You.com provider Add default system prompt in UI Expose novnc port in docker --- g4f/Provider/You.py | 24 ++--- g4f/Provider/hf_space/CohereForAI.py | 95 ++++++++++++++++++++ g4f/Provider/hf_space/__init__.py | 13 +-- g4f/Provider/needs_auth/HuggingFace.py | 19 ++-- g4f/Provider/needs_auth/Theb.py | 156 --------------------------------- g4f/Provider/needs_auth/__init__.py | 1 - g4f/Provider/not_working/Theb.py | 156 +++++++++++++++++++++++++++++++++ g4f/Provider/not_working/__init__.py | 1 + g4f/gui/client/index.html | 9 +- g4f/gui/client/static/css/style.css | 23 ++--- g4f/gui/client/static/js/chat.v1.js | 101 +++++++++++++-------- 11 files changed, 367 insertions(+), 231 deletions(-) create mode 100644 g4f/Provider/hf_space/CohereForAI.py delete mode 100644 g4f/Provider/needs_auth/Theb.py create mode 100644 g4f/Provider/not_working/Theb.py (limited to 'g4f') diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index 2d4f7ca5..b91fb0d8 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -10,7 +10,7 @@ from .helper import format_prompt from ..image import ImageResponse, ImagePreview, EXTENSIONS_MAP, to_bytes, is_accepted_format from ..requests import StreamSession, FormData, raise_for_status, get_nodriver from ..cookies import get_cookies -from ..errors import MissingRequirementsError +from ..errors import MissingRequirementsError, ResponseError from .. import debug class You(AsyncGeneratorProvider, ProviderModelMixin): @@ -23,18 +23,19 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): models = [ default_model, "gpt-4o", + "gpt-4o-mini", "gpt-4-turbo", - "gpt-4", + "grok-2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-opus", "claude-3-sonnet", "claude-3-haiku", - "claude-2", + "llama-3.3-70b", "llama-3.1-70b", "llama-3", "gemini-1-5-flash", "gemini-1-5-pro", - "gemini-1-0-pro", "databricks-dbrx-instruct", "command-r", "command-r-plus", @@ -105,19 +106,14 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): "conversationTurnId": str(uuid.uuid4()), "chatId": str(uuid.uuid4()), } - params = { - "userFiles": upload, - "selectedChatMode": chat_mode, - } if chat_mode == "custom": if debug.logging: print(f"You model: {model}") - params["selectedAiModel"] = model.replace("-", "_") + data["selectedAiModel"] = model.replace("-", "_") - async with (session.post if chat_mode == "default" else session.get)( + async with session.get( f"{cls.url}/api/streamingSearch", - data=data if chat_mode == "default" else None, - params=params if chat_mode == "default" else data, + params=data, headers=headers, cookies=cookies ) as response: @@ -126,9 +122,13 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): if line.startswith(b'event: '): event = line[7:].decode() elif line.startswith(b'data: '): + if event == "error": + raise ResponseError(line[6:]) if event in ["youChatUpdate", "youChatToken"]: data = json.loads(line[6:]) if event == "youChatToken" and event in data and data[event]: + if data[event].startswith("#### You\'ve hit your free quota for the Model Agent. For more usage of the Model Agent, learn more at:"): + continue yield data[event] elif event == "youChatUpdate" and "t" in data and data["t"]: if chat_mode == "create": diff --git a/g4f/Provider/hf_space/CohereForAI.py b/g4f/Provider/hf_space/CohereForAI.py new file mode 100644 index 00000000..4adeef60 --- /dev/null +++ b/g4f/Provider/hf_space/CohereForAI.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import json +import uuid +from aiohttp import ClientSession, FormData + +from ...typing import AsyncResult, Messages +from ...requests import raise_for_status +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt +from ...providers.response import JsonConversation, TitleGeneration + +class CohereForAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://cohereforai-c4ai-command.hf.space" + conversation_url = f"{url}/conversation" + + working = True + + default_model = "command-r-plus-08-2024" + models = [ + default_model, + "command-r-08-2024", + "command-r-plus", + "command-r", + "command-r7b-12-2024", + ] + + @classmethod + async def create_async_generator( + cls, model: str, messages: Messages, + api_key: str = None, + proxy: str = None, + conversation: JsonConversation = None, + return_conversation: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + headers = { + "Origin": cls.url, + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:133.0) Gecko/20100101 Firefox/133.0", + "Accept": "*/*", + "Accept-Language": "en-US,en;q=0.5", + "Referer": "https://cohereforai-c4ai-command.hf.space/", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Priority": "u=4", + } + if api_key is not None: + headers["Authorization"] = f"Bearer {api_key}" + async with ClientSession( + headers=headers, + cookies=None if conversation is None else conversation.cookies + ) as session: + system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"]) + messages = [message for message in messages if message["role"] != "system"] + inputs = format_prompt(messages) if conversation is None else messages[-1]["content"] + if conversation is None or conversation.model != model or conversation.preprompt != system_prompt: + data = {"model": model, "preprompt": system_prompt} + async with session.post(cls.conversation_url, json=data, proxy=proxy) as response: + await raise_for_status(response) + conversation = JsonConversation( + **await response.json(), + **data, + cookies={n: c.value for n, c in response.cookies.items()} + ) + if return_conversation: + yield conversation + async with session.get(f"{cls.conversation_url}/{conversation.conversationId}/__data.json?x-sveltekit-invalidated=11", proxy=proxy) as response: + await raise_for_status(response) + node = json.loads((await response.text()).splitlines()[0])["nodes"][1] + if node["type"] == "error": + raise RuntimeError(node["error"]) + data = node["data"] + message_id = data[data[data[data[0]["messages"]][-1]]["id"]] + data = FormData() + inputs = messages[-1]["content"] + data.add_field( + "data", + json.dumps({"inputs": inputs, "id": message_id, "is_retry": False, "is_continue": False, "web_search": False, "tools": []}), + content_type="application/json" + ) + async with session.post(f"{cls.conversation_url}/{conversation.conversationId}", data=data, proxy=proxy) as response: + await raise_for_status(response) + async for chunk in response.content: + try: + data = json.loads(chunk) + except (json.JSONDecodeError) as e: + raise RuntimeError(f"Failed to read response: {chunk.decode(errors='replace')}", e) + if data["type"] == "stream": + yield data["token"].replace("\u0000", "") + elif data["type"] == "title": + yield TitleGeneration(data["title"]) + elif data["type"] == "finalAnswer": + break \ No newline at end of file diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py index 5ab7ad22..daa5b935 100644 --- a/g4f/Provider/hf_space/__init__.py +++ b/g4f/Provider/hf_space/__init__.py @@ -8,6 +8,7 @@ from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell from .StableDiffusion35Large import StableDiffusion35Large +from .CohereForAI import CohereForAI from .Qwen_QVQ_72B import Qwen_QVQ_72B class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin): @@ -16,7 +17,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin): working = True default_model = BlackForestLabsFlux1Dev.default_model default_vision_model = Qwen_QVQ_72B.default_model - providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, Qwen_QVQ_72B] + providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, CohereForAI, Qwen_QVQ_72B] @classmethod def get_parameters(cls, **kwargs) -> dict: @@ -28,11 +29,13 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin): @classmethod def get_models(cls, **kwargs) -> list[str]: if not cls.models: + models = [] for provider in cls.providers: - cls.models.extend(provider.get_models(**kwargs)) - cls.models.extend(provider.model_aliases.keys()) - cls.models = list(set(cls.models)) - cls.models.sort() + models.extend(provider.get_models(**kwargs)) + models.extend(provider.model_aliases.keys()) + models = list(set(models)) + models.sort() + cls.models = models return cls.models @classmethod diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index c15dc767..9d4e3538 100644 --- a/g4f/Provider/needs_auth/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -28,15 +28,16 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): def get_models(cls) -> list[str]: if not cls.models: url = "https://huggingface.co/api/models?inference=warm&pipeline_tag=text-generation" - cls.models = [model["id"] for model in requests.get(url).json()] - cls.models.append("meta-llama/Llama-3.2-11B-Vision-Instruct") - cls.models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF") - cls.models.sort() - if not cls.image_models: - url = "https://huggingface.co/api/models?pipeline_tag=text-to-image" - cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20] - cls.image_models.sort() - cls.models.extend(cls.image_models) + models = [model["id"] for model in requests.get(url).json()] + models.append("meta-llama/Llama-3.2-11B-Vision-Instruct") + models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF") + models.sort() + if not cls.image_models: + url = "https://huggingface.co/api/models?pipeline_tag=text-to-image" + cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20] + cls.image_models.sort() + models.extend(cls.image_models) + cls.models = models return cls.models @classmethod diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py deleted file mode 100644 index f0600e4b..00000000 --- a/g4f/Provider/needs_auth/Theb.py +++ /dev/null @@ -1,156 +0,0 @@ -from __future__ import annotations - -import time - -from ...typing import CreateResult, Messages -from ..base_provider import AbstractProvider -from ..helper import format_prompt - -models = { - "theb-ai": "TheB.AI", - "theb-ai-free": "TheB.AI Free", - "gpt-3.5-turbo": "GPT-3.5 Turbo (New)", - "gpt-3.5-turbo-16k": "GPT-3.5-16K", - "gpt-4-turbo": "GPT-4 Turbo", - "gpt-4": "GPT-4", - "gpt-4-32k": "GPT-4 32K", - "claude-2": "Claude 2", - "claude-instant-1": "Claude Instant 1.2", - "palm-2": "PaLM 2", - "palm-2-32k": "PaLM 2 32K", - "palm-2-codey": "Codey", - "palm-2-codey-32k": "Codey 32K", - "vicuna-13b-v1.5": "Vicuna v1.5 13B", - "llama-2-7b-chat": "Llama 2 7B", - "llama-2-13b-chat": "Llama 2 13B", - "llama-2-70b-chat": "Llama 2 70B", - "code-llama-7b": "Code Llama 7B", - "code-llama-13b": "Code Llama 13B", - "code-llama-34b": "Code Llama 34B", - "qwen-7b-chat": "Qwen 7B" -} - -class Theb(AbstractProvider): - label = "TheB.AI" - url = "https://beta.theb.ai" - working = False - supports_stream = True - - models = models.keys() - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - webdriver: WebDriver = None, - virtual_display: bool = True, - **kwargs - ) -> CreateResult: - if model in models: - model = models[model] - prompt = format_prompt(messages) - web_session = WebDriverSession(webdriver, virtual_display=virtual_display, proxy=proxy) - with web_session as driver: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - from selenium.webdriver.common.keys import Keys - - # Register fetch hook - script = """ -window._fetch = window.fetch; -window.fetch = async (url, options) => { - // Call parent fetch method - const response = await window._fetch(url, options); - if (!url.startsWith("/api/conversation")) { - return result; - } - // Copy response - copy = response.clone(); - window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); - return copy; -} -window._last_message = ""; -""" - driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { - "source": script - }) - - try: - driver.get(f"{cls.url}/home") - wait = WebDriverWait(driver, 5) - wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize"))) - except: - driver = web_session.reopen() - driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { - "source": script - }) - driver.get(f"{cls.url}/home") - wait = WebDriverWait(driver, 240) - wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize"))) - - try: - driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() - driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() - except: - pass - if model: - # Load model panel - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#SelectModel svg"))) - time.sleep(0.1) - driver.find_element(By.CSS_SELECTOR, "#SelectModel svg").click() - try: - driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() - driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() - except: - pass - # Select model - selector = f"div.flex-col div.items-center span[title='{model}']" - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector))) - span = driver.find_element(By.CSS_SELECTOR, selector) - container = span.find_element(By.XPATH, "//div/../..") - button = container.find_element(By.CSS_SELECTOR, "button.btn-blue.btn-small.border") - button.click() - - - # Submit prompt - wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize"))) - element_send_text(driver.find_element(By.ID, "textareaAutosize"), prompt) - - # Read response with reader - script = """ -if(window._reader) { - chunk = await window._reader.read(); - if (chunk['done']) { - return null; - } - message = ''; - chunk['value'].split('\\r\\n').forEach((line, index) => { - if (line.startsWith('data: ')) { - try { - line = JSON.parse(line.substring('data: '.length)); - message = line["args"]["content"]; - } catch(e) { } - } - }); - if (message) { - try { - return message.substring(window._last_message.length); - } finally { - window._last_message = message; - } - } -} -return ''; -""" - while True: - chunk = driver.execute_script(script) - if chunk: - yield chunk - elif chunk != "": - break - else: - time.sleep(0.1) diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index 4b800ff4..119a118d 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -25,7 +25,6 @@ from .Poe import Poe from .Raycast import Raycast from .Reka import Reka from .Replicate import Replicate -from .Theb import Theb from .ThebApi import ThebApi from .WhiteRabbitNeo import WhiteRabbitNeo from .xAI import xAI diff --git a/g4f/Provider/not_working/Theb.py b/g4f/Provider/not_working/Theb.py new file mode 100644 index 00000000..f0600e4b --- /dev/null +++ b/g4f/Provider/not_working/Theb.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +import time + +from ...typing import CreateResult, Messages +from ..base_provider import AbstractProvider +from ..helper import format_prompt + +models = { + "theb-ai": "TheB.AI", + "theb-ai-free": "TheB.AI Free", + "gpt-3.5-turbo": "GPT-3.5 Turbo (New)", + "gpt-3.5-turbo-16k": "GPT-3.5-16K", + "gpt-4-turbo": "GPT-4 Turbo", + "gpt-4": "GPT-4", + "gpt-4-32k": "GPT-4 32K", + "claude-2": "Claude 2", + "claude-instant-1": "Claude Instant 1.2", + "palm-2": "PaLM 2", + "palm-2-32k": "PaLM 2 32K", + "palm-2-codey": "Codey", + "palm-2-codey-32k": "Codey 32K", + "vicuna-13b-v1.5": "Vicuna v1.5 13B", + "llama-2-7b-chat": "Llama 2 7B", + "llama-2-13b-chat": "Llama 2 13B", + "llama-2-70b-chat": "Llama 2 70B", + "code-llama-7b": "Code Llama 7B", + "code-llama-13b": "Code Llama 13B", + "code-llama-34b": "Code Llama 34B", + "qwen-7b-chat": "Qwen 7B" +} + +class Theb(AbstractProvider): + label = "TheB.AI" + url = "https://beta.theb.ai" + working = False + supports_stream = True + + models = models.keys() + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + proxy: str = None, + webdriver: WebDriver = None, + virtual_display: bool = True, + **kwargs + ) -> CreateResult: + if model in models: + model = models[model] + prompt = format_prompt(messages) + web_session = WebDriverSession(webdriver, virtual_display=virtual_display, proxy=proxy) + with web_session as driver: + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + from selenium.webdriver.common.keys import Keys + + # Register fetch hook + script = """ +window._fetch = window.fetch; +window.fetch = async (url, options) => { + // Call parent fetch method + const response = await window._fetch(url, options); + if (!url.startsWith("/api/conversation")) { + return result; + } + // Copy response + copy = response.clone(); + window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); + return copy; +} +window._last_message = ""; +""" + driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { + "source": script + }) + + try: + driver.get(f"{cls.url}/home") + wait = WebDriverWait(driver, 5) + wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize"))) + except: + driver = web_session.reopen() + driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { + "source": script + }) + driver.get(f"{cls.url}/home") + wait = WebDriverWait(driver, 240) + wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize"))) + + try: + driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() + driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() + except: + pass + if model: + # Load model panel + wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#SelectModel svg"))) + time.sleep(0.1) + driver.find_element(By.CSS_SELECTOR, "#SelectModel svg").click() + try: + driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() + driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() + except: + pass + # Select model + selector = f"div.flex-col div.items-center span[title='{model}']" + wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector))) + span = driver.find_element(By.CSS_SELECTOR, selector) + container = span.find_element(By.XPATH, "//div/../..") + button = container.find_element(By.CSS_SELECTOR, "button.btn-blue.btn-small.border") + button.click() + + + # Submit prompt + wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize"))) + element_send_text(driver.find_element(By.ID, "textareaAutosize"), prompt) + + # Read response with reader + script = """ +if(window._reader) { + chunk = await window._reader.read(); + if (chunk['done']) { + return null; + } + message = ''; + chunk['value'].split('\\r\\n').forEach((line, index) => { + if (line.startsWith('data: ')) { + try { + line = JSON.parse(line.substring('data: '.length)); + message = line["args"]["content"]; + } catch(e) { } + } + }); + if (message) { + try { + return message.substring(window._last_message.length); + } finally { + window._last_message = message; + } + } +} +return ''; +""" + while True: + chunk = driver.execute_script(script) + if chunk: + yield chunk + elif chunk != "": + break + else: + time.sleep(0.1) diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py index a58870c2..9ca89cf9 100644 --- a/g4f/Provider/not_working/__init__.py +++ b/g4f/Provider/not_working/__init__.py @@ -14,4 +14,5 @@ from .Koala import Koala from .MagickPen import MagickPen from .MyShell import MyShell from .RobocodersAPI import RobocodersAPI +from .Theb import Theb from .Upstage import Upstage diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index b6b17fcd..bbf41314 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -142,6 +142,10 @@ +
+ + +
@@ -149,6 +153,9 @@
+
- +