From 3576dee75a1623aa2385b6afe8b922ad5affca26 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Wed, 6 Dec 2023 09:35:36 +0100 Subject: Add selenium to dockerfile Load model and provider list in gui Remove needs_auth in HuggingChat Add default model and login url in gui --- g4f/Provider/Bing.py | 7 ++- g4f/Provider/PerplexityAi.py | 9 ++-- g4f/Provider/helper.py | 9 +++- g4f/Provider/needs_auth/Bard.py | 16 +++--- g4f/Provider/needs_auth/HuggingChat.py | 6 +-- g4f/Provider/needs_auth/OpenaiChat.py | 37 ++++++------- g4f/__init__.py | 15 +++--- g4f/gui/client/js/chat.v1.js | 95 +++++++++++++--------------------- g4f/gui/server/backend.py | 44 +++++++++------- g4f/gui/server/provider.py | 14 ----- g4f/models.py | 3 +- g4f/webdriver.py | 10 +++- 12 files changed, 122 insertions(+), 143 deletions(-) delete mode 100644 g4f/gui/server/provider.py (limited to 'g4f') diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index b790a6d2..9e3e7405 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -156,8 +156,11 @@ async def delete_conversation(session: ClientSession, conversation: Conversation "optionsSets": ["autosave"] } async with session.post(url, json=json, proxy=proxy) as response: - response = await response.json() - return response["result"]["value"] == "Success" + try: + response = await response.json() + return response["result"]["value"] == "Success" + except: + return False class Defaults: delimiter = "\x1e" diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py index 941ca6d4..ad629aa8 100644 --- a/g4f/Provider/PerplexityAi.py +++ b/g4f/Provider/PerplexityAi.py @@ -1,6 +1,10 @@ from __future__ import annotations import time +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.keys import Keys from ..typing import CreateResult, Messages from .base_provider import BaseProvider @@ -27,11 +31,6 @@ class PerplexityAi(BaseProvider): **kwargs ) -> CreateResult: with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - from selenium.webdriver.common.keys import Keys - prompt = format_prompt(messages) driver.get(f"{cls.url}/") diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 2171f0b7..61d9cb62 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -6,6 +6,7 @@ import webbrowser import random import string import secrets +import os from os import path from asyncio import AbstractEventLoop from platformdirs import user_config_dir @@ -18,7 +19,7 @@ from browser_cookie3 import ( edge, vivaldi, firefox, - BrowserCookieError + _LinuxPasswordManager ) from ..typing import Dict, Messages @@ -81,6 +82,10 @@ def init_cookies(): except webbrowser.Error: continue +# Check for broken dbus address in docker image +if os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null": + _LinuxPasswordManager.get_password = lambda a, b: b"secret" + # Load cookies for a domain from all supported browsers. # Cache the results in the "_cookies" variable. def get_cookies(domain_name=''): @@ -100,7 +105,7 @@ def get_cookies(domain_name=''): for cookie in cookie_jar: if cookie.name not in cookies: cookies[cookie.name] = cookie.value - except BrowserCookieError as e: + except: pass _cookies[domain_name] = cookies return _cookies[domain_name] diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py index 877af37e..48e535dd 100644 --- a/g4f/Provider/needs_auth/Bard.py +++ b/g4f/Provider/needs_auth/Bard.py @@ -1,6 +1,11 @@ from __future__ import annotations import time +import os +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.keys import Keys from ...typing import CreateResult, Messages from ..base_provider import BaseProvider @@ -27,10 +32,6 @@ class Bard(BaseProvider): prompt = format_prompt(messages) session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy) with session as driver: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - try: driver.get(f"{cls.url}/chat") wait = WebDriverWait(driver, 10 if headless else 240) @@ -40,6 +41,9 @@ class Bard(BaseProvider): if not webdriver: driver = session.reopen() driver.get(f"{cls.url}/chat") + login_url = os.environ.get("G4F_LOGIN_URL") + if login_url: + yield f"Please login: [Google Bard]({login_url})\n\n" wait = WebDriverWait(driver, 240) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) else: @@ -61,8 +65,8 @@ XMLHttpRequest.prototype.open = function(method, url) { driver.execute_script(script) # Submit prompt - driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt) - driver.find_element(By.CSS_SELECTOR, "button.send-button").click() + driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(prompt) + driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(Keys.ENTER) # Yield response while True: diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 59e2da73..530069c0 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -11,7 +11,6 @@ from ..helper import format_prompt, get_cookies class HuggingChat(AsyncGeneratorProvider): url = "https://huggingface.co/chat" - needs_auth = True working = True model = "meta-llama/Llama-2-70b-chat-hf" @@ -22,12 +21,11 @@ class HuggingChat(AsyncGeneratorProvider): messages: Messages, stream: bool = True, proxy: str = None, + web_search: bool = False, cookies: dict = None, **kwargs ) -> AsyncResult: model = model if model else cls.model - if proxy and "://" not in proxy: - proxy = f"http://{proxy}" if not cookies: cookies = get_cookies(".huggingface.co") @@ -46,7 +44,7 @@ class HuggingChat(AsyncGeneratorProvider): "inputs": format_prompt(messages), "is_retry": False, "response_id": str(uuid.uuid4()), - "web_search": False + "web_search": web_search } async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response: async for line in response.content: diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index af62382a..818c163f 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -1,12 +1,15 @@ from __future__ import annotations -import uuid, json, asyncio +import uuid, json, asyncio, os from py_arkose_generator.arkose import get_values_for_request from asyncstdlib.itertools import tee from async_property import async_cached_property - +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC + from ..base_provider import AsyncGeneratorProvider -from ..helper import get_event_loop +from ..helper import get_event_loop, format_prompt from ...webdriver import get_browser from ...typing import AsyncResult, Messages from ...requests import StreamSession @@ -84,7 +87,12 @@ class OpenaiChat(AsyncGeneratorProvider): if not parent_id: parent_id = str(uuid.uuid4()) if not access_token: - access_token = await cls.get_access_token(proxy) + access_token = cls._access_token + if not access_token: + login_url = os.environ.get("G4F_LOGIN_URL") + if login_url: + yield f"Please login: [ChatGPT]({login_url})\n\n" + access_token = cls._access_token = await cls.browse_access_token(proxy) headers = { "Accept": "text/event-stream", "Authorization": f"Bearer {access_token}", @@ -106,10 +114,11 @@ class OpenaiChat(AsyncGeneratorProvider): "history_and_training_disabled": history_disabled and not auto_continue, } if action != "continue": + prompt = format_prompt(messages) if not conversation_id else messages[-1]["content"] data["messages"] = [{ "id": str(uuid.uuid4()), "author": {"role": "user"}, - "content": {"content_type": "text", "parts": [messages[-1]["content"]]}, + "content": {"content_type": "text", "parts": [prompt]}, }] async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response: try: @@ -155,14 +164,7 @@ class OpenaiChat(AsyncGeneratorProvider): @classmethod async def browse_access_token(cls, proxy: str = None) -> str: def browse() -> str: - try: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - - driver = get_browser(proxy=proxy) - except ImportError: - return + driver = get_browser(proxy=proxy) try: driver.get(f"{cls.url}/") WebDriverWait(driver, 1200).until( @@ -177,15 +179,6 @@ class OpenaiChat(AsyncGeneratorProvider): None, browse ) - - @classmethod - async def get_access_token(cls, proxy: str = None) -> str: - if not cls._access_token: - cls._access_token = await cls.browse_access_token(proxy) - if not cls._access_token: - raise RuntimeError("Read access token failed") - return cls._access_token - async def get_arkose_token(proxy: str = None, timeout: int = None) -> str: config = { diff --git a/g4f/__init__.py b/g4f/__init__.py index 4c47fe7d..1ea6d3a3 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -25,7 +25,8 @@ def get_model_and_provider(model : Union[Model, str], provider : Union[type[BaseProvider], None], stream : bool, ignored : List[str] = None, - ignore_working: bool = False) -> tuple[Model, type[BaseProvider]]: + ignore_working: bool = False, + ignore_stream: bool = False) -> tuple[Model, type[BaseProvider]]: if isinstance(model, str): if model in ModelUtils.convert: @@ -45,7 +46,7 @@ def get_model_and_provider(model : Union[Model, str], if not provider.working and not ignore_working: raise RuntimeError(f'{provider.__name__} is not working') - if not provider.supports_stream and stream: + if not ignore_stream and not provider.supports_stream and stream: raise ValueError(f'{provider.__name__} does not support "stream" argument') if debug.logging: @@ -61,15 +62,17 @@ class ChatCompletion: stream : bool = False, auth : Union[str, None] = None, ignored : List[str] = None, - ignore_working: bool = False, **kwargs) -> Union[CreateResult, str]: + ignore_working: bool = False, + ignore_stream_and_auth: bool = False, + **kwargs) -> Union[CreateResult, str]: - model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working) + model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working, ignore_stream_and_auth) - if provider.needs_auth and not auth: + if not ignore_stream_and_auth and provider.needs_auth and not auth: raise ValueError( f'{provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)') - if provider.needs_auth: + if auth: kwargs['auth'] = auth result = provider.create_completion(model.name, messages, stream, **kwargs) diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 5b7a0bf0..2b1fdcb0 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -161,7 +161,7 @@ const ask_gpt = async (txtMsgs) => { text += chunk; - document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text); + document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text).replace(" { + response = await fetch('/backend-api/v2/models') + models = await response.json() + let select = document.getElementById('model'); + select.textContent = ''; + + let auto = document.createElement('option'); + auto.value = ''; + auto.text = 'Default Model'; + select.appendChild(auto); for (model of models) { - let model_info = document.createElement('option'); - model_info.value = model - model_info.text = model + let option = document.createElement('option'); + option.value = option.text = model; + select.appendChild(option); + } +})(); - MODELS_SELECT.appendChild(model_info); +(async () => { + response = await fetch('/backend-api/v2/providers') + providers = await response.json() + + let select = document.getElementById('provider'); + select.textContent = ''; + + let auto = document.createElement('option'); + auto.value = ''; + auto.text = 'Provider: Auto'; + select.appendChild(auto); + + for (provider of providers) { + let option = document.createElement('option'); + option.value = option.text = provider; + select.appendChild(option); } -} \ No newline at end of file +})(); \ No newline at end of file diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 3d7bfedc..03363201 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -3,9 +3,8 @@ import g4f from flask import request from .internet import search from .config import special_instructions -from .provider import get_provider -g4f.logging = True +g4f.debug.logging = True class Backend_Api: def __init__(self, app) -> None: @@ -15,6 +14,10 @@ class Backend_Api: 'function': self.models, 'methods' : ['GET'] }, + '/backend-api/v2/providers': { + 'function': self.providers, + 'methods' : ['GET'] + }, '/backend-api/v2/conversation': { 'function': self._conversation, 'methods': ['POST'] @@ -37,6 +40,9 @@ class Backend_Api: def models(self): return g4f._all_models + def providers(self): + return [provider.__name__ for provider in g4f.Provider.__providers__ if provider.working] + def _gen_title(self): return { 'title': '' @@ -47,26 +53,26 @@ class Backend_Api: #jailbreak = request.json['jailbreak'] #internet_access = request.json['meta']['content']['internet_access'] #conversation = request.json['meta']['content']['conversation'] - prompt = request.json['meta']['content']['parts'] - model = request.json['model'] - provider = request.json.get('provider').split('g4f.Provider.')[1] - - messages = prompt - print(messages) + messages = request.json['meta']['content']['parts'] + model = request.json.get('model') + model = model if model else g4f.models.default + provider = request.json.get('provider', 'Auto').replace('g4f.Provider.', '') + provider = provider if provider != "Auto" else None + if provider != None: + provider = g4f.Provider.ProviderUtils.convert.get(provider) - def stream(): - yield from g4f.ChatCompletion.create( - model=model, - provider=get_provider(provider), - messages=messages, - stream=True, - ) if provider else g4f.ChatCompletion.create( - model=model, messages=messages, stream=True - ) + response = g4f.ChatCompletion.create( + model=model, + provider=provider, + messages=messages, + stream=True, + ignore_stream_and_auth=True + ) - return self.app.response_class(stream(), mimetype='text/event-stream') + return self.app.response_class(response, mimetype='text/event-stream') - except Exception as e: + except Exception as e: + print(e) return { 'code' : 'G4F_ERROR', '_action': '_ask', diff --git a/g4f/gui/server/provider.py b/g4f/gui/server/provider.py deleted file mode 100644 index 8c7ac755..00000000 --- a/g4f/gui/server/provider.py +++ /dev/null @@ -1,14 +0,0 @@ -from __future__ import annotations - -import g4f -from g4f import BaseProvider - - -def get_provider(provider: str) -> BaseProvider | None: - if not isinstance(provider, str): - return None - print(provider) - if provider == 'g4f.Provider.Auto': - return None - - return g4f.Provider.ProviderUtils.convert.get(provider) diff --git a/g4f/models.py b/g4f/models.py index 2f86891d..a6cd724b 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -7,6 +7,7 @@ from .Provider import ( ChatgptDemoAi, ChatAnywhere, ChatgptNext, + HuggingChat, GptForLove, ChatgptAi, DeepInfra, @@ -100,7 +101,7 @@ llama2_13b = Model( llama2_70b = Model( name = "meta-llama/Llama-2-70b-chat-hf", base_provider = "huggingface", - best_provider = RetryProvider([Llama2, DeepInfra])) + best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])) # Bard palm = Model( diff --git a/g4f/webdriver.py b/g4f/webdriver.py index 288eed0e..f0fa1fba 100644 --- a/g4f/webdriver.py +++ b/g4f/webdriver.py @@ -4,6 +4,8 @@ import time from platformdirs import user_config_dir from selenium.webdriver.remote.webdriver import WebDriver from undetected_chromedriver import Chrome, ChromeOptions +import os.path +from . import debug try: from pyvirtualdisplay import Display @@ -19,12 +21,16 @@ def get_browser( ) -> WebDriver: if user_data_dir == None: user_data_dir = user_config_dir("g4f") + if debug.logging: + print("Open browser with config dir:", user_data_dir) if not options: options = ChromeOptions() - options.add_argument("window-size=1920,1080"); if proxy: options.add_argument(f'--proxy-server={proxy}') - return Chrome(options=options, user_data_dir=user_data_dir, headless=headless) + driver = '/usr/bin/chromedriver' + if not os.path.isfile(driver): + driver = None + return Chrome(options=options, user_data_dir=user_data_dir, driver_executable_path=driver, headless=headless) class WebDriverSession(): def __init__( -- cgit v1.2.3 From c667f1cec521534781de51e316d5edeed89bdb89 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Wed, 6 Dec 2023 11:54:50 +0100 Subject: Improve docker image --- g4f/gui/client/js/chat.v1.js | 2 +- g4f/gui/server/backend.py | 5 ++++- g4f/models.py | 2 ++ 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'g4f') diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 2b1fdcb0..2844b73e 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -601,7 +601,7 @@ observer.observe(message_input, { attributes: true }); let auto = document.createElement('option'); auto.value = ''; - auto.text = 'Default Model'; + auto.text = 'Model: Default'; select.appendChild(auto); for (model of models) { diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 03363201..e1abb764 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -41,7 +41,10 @@ class Backend_Api: return g4f._all_models def providers(self): - return [provider.__name__ for provider in g4f.Provider.__providers__ if provider.working] + return [ + provider.__name__ for provider in g4f.Provider.__providers__ + if provider.working and provider is not g4f.Provider.RetryProvider + ] def _gen_title(self): return { diff --git a/g4f/models.py b/g4f/models.py index a6cd724b..cf24ff52 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -275,6 +275,8 @@ class ModelUtils: 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, + 'gpt-3.5-long': gpt_35_long, + # gpt-4 'gpt-4' : gpt_4, 'gpt-4-0613' : gpt_4_0613, -- cgit v1.2.3