diff options
-rw-r--r-- | Dockerfile | 33 | ||||
-rw-r--r-- | README.md | 2 | ||||
-rw-r--r-- | docker-compose.yml | 19 | ||||
-rw-r--r-- | docker/Dockerfile | 43 | ||||
-rwxr-xr-x | docker/supervisor.conf | 76 | ||||
-rw-r--r-- | g4f.png | bin | 0 -> 155456 bytes | |||
-rw-r--r-- | g4f/Provider/Bing.py | 7 | ||||
-rw-r--r-- | g4f/Provider/PerplexityAi.py | 9 | ||||
-rw-r--r-- | g4f/Provider/helper.py | 9 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/Bard.py | 16 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/HuggingChat.py | 6 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/OpenaiChat.py | 37 | ||||
-rw-r--r-- | g4f/__init__.py | 15 | ||||
-rw-r--r-- | g4f/gui/client/js/chat.v1.js | 95 | ||||
-rw-r--r-- | g4f/gui/server/backend.py | 47 | ||||
-rw-r--r-- | g4f/gui/server/provider.py | 14 | ||||
-rw-r--r-- | g4f/models.py | 5 | ||||
-rw-r--r-- | g4f/webdriver.py | 10 | ||||
-rw-r--r-- | ptest.py | 57 |
19 files changed, 255 insertions, 245 deletions
diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 503a6dcc..00000000 --- a/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -# Use the official lightweight Python image. -# https://hub.docker.com/_/python -FROM python:3.9-slim - -# Ensure Python outputs everything immediately (useful for real-time logging in Docker). -ENV PYTHONUNBUFFERED 1 - -# Set the working directory in the container. -WORKDIR /app - -# Update the system packages and install system-level dependencies required for compilation. -# gcc: Compiler required for some Python packages. -# build-essential: Contains necessary tools and libraries for building software. -RUN apt-get update && apt-get install -y --no-install-recommends \ - gcc \ - build-essential \ - && rm -rf /var/lib/apt/lists/* - -# Copy the project's requirements file into the container. -COPY requirements.txt /app/ - -# Upgrade pip for the latest features and install the project's Python dependencies. -RUN pip install --upgrade pip && pip install -r requirements.txt - -# Copy the entire project into the container. -# This may include all code, assets, and configuration files required to run the application. -COPY . /app/ - -# Expose port 80 and 1337 -EXPOSE 80 1337 - -# Define the default command to run the app using Python's module mode. -ENTRYPOINT ["python", "-m", "g4f.cli"] @@ -1,4 +1,4 @@ -![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9) +![g4f](g4f.png) <a href='https://ko-fi.com/xtekky' target='_blank'><img height='35' style='border:0px;height:46px;' src='https://az743702.vo.msecnd.net/cdn/kofi3.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /> <div id="top"></div> diff --git a/docker-compose.yml b/docker-compose.yml index 43aa6c02..8dfe1f35 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,18 +1,15 @@ version: '3' services: - gpt4free-api: &gpt4free - image: gpt4free:latest + gpt4free: + image: ghcr.io/xtekky/gpt4free:latest + shm_size: 2gb build: context: . - dockerfile: Dockerfile - cache_from: - - gpt4free:latest - ports: - - '1337:1337' - command: api - gpt4free-gui: - <<: *gpt4free + dockerfile: docker/Dockerfile + volumes: + - .:/app ports: - '8080:80' - command: gui + - '1337:1337' + - '7900:7900'
\ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..0c52940d --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,43 @@ +FROM selenium/node-chrome + +ENV SE_SCREEN_WIDTH 1920 +ENV G4F_LOGIN_URL http://localhost:7900/?autoconnect=1&resize=scale&password=secret +ENV PYTHONUNBUFFERED 1 + +USER root + +# Python packages +RUN apt-get -qqy update \ + && apt-get -qqy install \ + python3 \ + python-is-python3 \ + pip + +# Cleanup +RUN rm -rf /var/lib/apt/lists/* /var/cache/apt/* \ + && apt-get -qyy autoremove \ + && apt-get -qyy clean + +# Update entrypoint +COPY docker/supervisor.conf /etc/supervisor/conf.d/selenium.conf + +# Change background image +COPY g4f.png /usr/share/images/fluxbox/ubuntu-light.png + +# Switch user +USER 1200 + +# Set the working directory in the container. +WORKDIR /app + +# Copy the project's requirements file into the container. +COPY requirements.txt /app/ + +# Upgrade pip for the latest features and install the project's Python dependencies. +RUN pip install --upgrade pip && pip install -r requirements.txt + +# Copy the entire package into the container. +COPY g4f /app/g4f + +# Expose ports +EXPOSE 80 1337
\ No newline at end of file diff --git a/docker/supervisor.conf b/docker/supervisor.conf new file mode 100755 index 00000000..6d82f6c9 --- /dev/null +++ b/docker/supervisor.conf @@ -0,0 +1,76 @@ +[program:xvfb] +priority=0 +command=/opt/bin/start-xvfb.sh +autostart=true +autorestart=true + +;Logs +redirect_stderr=false +stdout_logfile=/var/log/supervisor/xvfb-stdout.log +stderr_logfile=/var/log/supervisor/xvfb-stderr.log +stdout_logfile_maxbytes=50MB +stderr_logfile_maxbytes=50MB +stdout_logfile_backups=5 +stderr_logfile_backups=5 +stdout_capture_maxbytes=50MB +stderr_capture_maxbytes=50MB + +[program:vnc] +priority=5 +command=/opt/bin/start-vnc.sh +autostart=true +autorestart=true + +;Logs +redirect_stderr=false +stdout_logfile=/var/log/supervisor/vnc-stdout.log +stderr_logfile=/var/log/supervisor/vnc-stderr.log +stdout_logfile_maxbytes=50MB +stderr_logfile_maxbytes=50MB +stdout_logfile_backups=5 +stderr_logfile_backups=5 +stdout_capture_maxbytes=50MB +stderr_capture_maxbytes=50MB + +[program:novnc] +priority=10 +command=/opt/bin/start-novnc.sh +autostart=true +autorestart=true + +;Logs +redirect_stderr=false +stdout_logfile=/var/log/supervisor/novnc-stdout.log +stderr_logfile=/var/log/supervisor/novnc-stderr.log +stdout_logfile_maxbytes=50MB +stderr_logfile_maxbytes=50MB +stdout_logfile_backups=5 +stderr_logfile_backups=5 +stdout_capture_maxbytes=50MB +stderr_capture_maxbytes=50MB + +[program:g4f-cli] +priority=15 +command=python -m g4f.cli api +directory=/app +stopasgroup=true +autostart=true +autorestart=true + +;Logs (all Hub activity redirected to stdout so it can be seen through "docker logs" +redirect_stderr=true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 + +[program:g4f-gui] +priority=15 +command=python -m g4f.cli gui +directory=/app +stopasgroup=true +autostart=true +autorestart=true + +;Logs (all Hub activity redirected to stdout so it can be seen through "docker logs" +redirect_stderr=true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0
\ No newline at end of file diff --git a/g4f.png b/g4f.png Binary files differnew file mode 100644 index 00000000..41bf9e6b --- /dev/null +++ b/g4f.png diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index b790a6d2..9e3e7405 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -156,8 +156,11 @@ async def delete_conversation(session: ClientSession, conversation: Conversation "optionsSets": ["autosave"] } async with session.post(url, json=json, proxy=proxy) as response: - response = await response.json() - return response["result"]["value"] == "Success" + try: + response = await response.json() + return response["result"]["value"] == "Success" + except: + return False class Defaults: delimiter = "\x1e" diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py index 941ca6d4..ad629aa8 100644 --- a/g4f/Provider/PerplexityAi.py +++ b/g4f/Provider/PerplexityAi.py @@ -1,6 +1,10 @@ from __future__ import annotations import time +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.keys import Keys from ..typing import CreateResult, Messages from .base_provider import BaseProvider @@ -27,11 +31,6 @@ class PerplexityAi(BaseProvider): **kwargs ) -> CreateResult: with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - from selenium.webdriver.common.keys import Keys - prompt = format_prompt(messages) driver.get(f"{cls.url}/") diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 2171f0b7..61d9cb62 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -6,6 +6,7 @@ import webbrowser import random import string import secrets +import os from os import path from asyncio import AbstractEventLoop from platformdirs import user_config_dir @@ -18,7 +19,7 @@ from browser_cookie3 import ( edge, vivaldi, firefox, - BrowserCookieError + _LinuxPasswordManager ) from ..typing import Dict, Messages @@ -81,6 +82,10 @@ def init_cookies(): except webbrowser.Error: continue +# Check for broken dbus address in docker image +if os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null": + _LinuxPasswordManager.get_password = lambda a, b: b"secret" + # Load cookies for a domain from all supported browsers. # Cache the results in the "_cookies" variable. def get_cookies(domain_name=''): @@ -100,7 +105,7 @@ def get_cookies(domain_name=''): for cookie in cookie_jar: if cookie.name not in cookies: cookies[cookie.name] = cookie.value - except BrowserCookieError as e: + except: pass _cookies[domain_name] = cookies return _cookies[domain_name] diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py index 877af37e..48e535dd 100644 --- a/g4f/Provider/needs_auth/Bard.py +++ b/g4f/Provider/needs_auth/Bard.py @@ -1,6 +1,11 @@ from __future__ import annotations import time +import os +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.keys import Keys from ...typing import CreateResult, Messages from ..base_provider import BaseProvider @@ -27,10 +32,6 @@ class Bard(BaseProvider): prompt = format_prompt(messages) session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy) with session as driver: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - try: driver.get(f"{cls.url}/chat") wait = WebDriverWait(driver, 10 if headless else 240) @@ -40,6 +41,9 @@ class Bard(BaseProvider): if not webdriver: driver = session.reopen() driver.get(f"{cls.url}/chat") + login_url = os.environ.get("G4F_LOGIN_URL") + if login_url: + yield f"Please login: [Google Bard]({login_url})\n\n" wait = WebDriverWait(driver, 240) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) else: @@ -61,8 +65,8 @@ XMLHttpRequest.prototype.open = function(method, url) { driver.execute_script(script) # Submit prompt - driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt) - driver.find_element(By.CSS_SELECTOR, "button.send-button").click() + driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(prompt) + driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(Keys.ENTER) # Yield response while True: diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 59e2da73..530069c0 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -11,7 +11,6 @@ from ..helper import format_prompt, get_cookies class HuggingChat(AsyncGeneratorProvider): url = "https://huggingface.co/chat" - needs_auth = True working = True model = "meta-llama/Llama-2-70b-chat-hf" @@ -22,12 +21,11 @@ class HuggingChat(AsyncGeneratorProvider): messages: Messages, stream: bool = True, proxy: str = None, + web_search: bool = False, cookies: dict = None, **kwargs ) -> AsyncResult: model = model if model else cls.model - if proxy and "://" not in proxy: - proxy = f"http://{proxy}" if not cookies: cookies = get_cookies(".huggingface.co") @@ -46,7 +44,7 @@ class HuggingChat(AsyncGeneratorProvider): "inputs": format_prompt(messages), "is_retry": False, "response_id": str(uuid.uuid4()), - "web_search": False + "web_search": web_search } async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response: async for line in response.content: diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index af62382a..818c163f 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -1,12 +1,15 @@ from __future__ import annotations -import uuid, json, asyncio +import uuid, json, asyncio, os from py_arkose_generator.arkose import get_values_for_request from asyncstdlib.itertools import tee from async_property import async_cached_property - +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC + from ..base_provider import AsyncGeneratorProvider -from ..helper import get_event_loop +from ..helper import get_event_loop, format_prompt from ...webdriver import get_browser from ...typing import AsyncResult, Messages from ...requests import StreamSession @@ -84,7 +87,12 @@ class OpenaiChat(AsyncGeneratorProvider): if not parent_id: parent_id = str(uuid.uuid4()) if not access_token: - access_token = await cls.get_access_token(proxy) + access_token = cls._access_token + if not access_token: + login_url = os.environ.get("G4F_LOGIN_URL") + if login_url: + yield f"Please login: [ChatGPT]({login_url})\n\n" + access_token = cls._access_token = await cls.browse_access_token(proxy) headers = { "Accept": "text/event-stream", "Authorization": f"Bearer {access_token}", @@ -106,10 +114,11 @@ class OpenaiChat(AsyncGeneratorProvider): "history_and_training_disabled": history_disabled and not auto_continue, } if action != "continue": + prompt = format_prompt(messages) if not conversation_id else messages[-1]["content"] data["messages"] = [{ "id": str(uuid.uuid4()), "author": {"role": "user"}, - "content": {"content_type": "text", "parts": [messages[-1]["content"]]}, + "content": {"content_type": "text", "parts": [prompt]}, }] async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response: try: @@ -155,14 +164,7 @@ class OpenaiChat(AsyncGeneratorProvider): @classmethod async def browse_access_token(cls, proxy: str = None) -> str: def browse() -> str: - try: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - - driver = get_browser(proxy=proxy) - except ImportError: - return + driver = get_browser(proxy=proxy) try: driver.get(f"{cls.url}/") WebDriverWait(driver, 1200).until( @@ -177,15 +179,6 @@ class OpenaiChat(AsyncGeneratorProvider): None, browse ) - - @classmethod - async def get_access_token(cls, proxy: str = None) -> str: - if not cls._access_token: - cls._access_token = await cls.browse_access_token(proxy) - if not cls._access_token: - raise RuntimeError("Read access token failed") - return cls._access_token - async def get_arkose_token(proxy: str = None, timeout: int = None) -> str: config = { diff --git a/g4f/__init__.py b/g4f/__init__.py index 7448bf62..9363455a 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -25,7 +25,8 @@ def get_model_and_provider(model : Union[Model, str], provider : Union[type[BaseProvider], None], stream : bool, ignored : List[str] = None, - ignore_working: bool = False) -> tuple[Model, type[BaseProvider]]: + ignore_working: bool = False, + ignore_stream: bool = False) -> tuple[Model, type[BaseProvider]]: if isinstance(model, str): if model in ModelUtils.convert: @@ -45,7 +46,7 @@ def get_model_and_provider(model : Union[Model, str], if not provider.working and not ignore_working: raise RuntimeError(f'{provider.__name__} is not working') - if not provider.supports_stream and stream: + if not ignore_stream and not provider.supports_stream and stream: raise ValueError(f'{provider.__name__} does not support "stream" argument') if debug.logging: @@ -61,15 +62,17 @@ class ChatCompletion: stream : bool = False, auth : Union[str, None] = None, ignored : List[str] = None, - ignore_working: bool = False, **kwargs) -> Union[CreateResult, str]: + ignore_working: bool = False, + ignore_stream_and_auth: bool = False, + **kwargs) -> Union[CreateResult, str]: - model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working) + model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working, ignore_stream_and_auth) - if provider.needs_auth and not auth: + if not ignore_stream_and_auth and provider.needs_auth and not auth: raise ValueError( f'{provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)') - if provider.needs_auth: + if auth: kwargs['auth'] = auth result = provider.create_completion(model.name, messages, stream, **kwargs) diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 5b7a0bf0..2844b73e 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -161,7 +161,7 @@ const ask_gpt = async (txtMsgs) => { text += chunk; - document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text); + document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text).replace("<a href=", '<a target="_blank" href='); document.querySelectorAll(`code`).forEach((el) => { hljs.highlightElement(el); }); @@ -308,7 +308,7 @@ const load_conversation = async (conversation_id) => { </div> <div class="content"> ${item.role == "assistant" - ? markdown.render(item.content) + ? markdown.render(item.content).replace("<a href=", '<a target="_blank" href=') : item.content } </div> @@ -529,7 +529,7 @@ window.onload = async () => { load_settings_localstorage(); setTheme(); - conversations = 0; + let conversations = 0; for (let i = 0; i < localStorage.length; i++) { if (localStorage.key(i).startsWith("conversation:")) { conversations += 1; @@ -548,7 +548,6 @@ window.onload = async () => { } } - // await load_models(); await say_hello() message_input.addEventListener(`keydown`, async (evt) => { @@ -593,64 +592,40 @@ const observer = new MutationObserver((mutationsList) => { observer.observe(message_input, { attributes: true }); - -const load_models = async () => { - // models = localStorage.getItem('_models') - - // if (models === null) { - // response = await fetch('/backend-api/v2/models') - // models = await response.json() - // localStorage.setItem('_models', JSON.stringify(models)) - - // } else { - // models = JSON.parse(models) - // } - - models = [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", - "gpt-4", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0613", - "palm2", - "palm", - "google", - "google-bard", - "google-palm", - "bard", - "falcon-40b", - "falcon-7b", - "llama-13b", - "command-nightly", - "gpt-neox-20b", - "santacoder", - "bloom", - "flan-t5-xxl", - "code-davinci-002", - "text-ada-001", - "text-babbage-001", - "text-curie-001", - "text-davinci-002", - "text-davinci-003", - "llama70b-v2-chat", - "llama13b-v2-chat", - "llama7b-v2-chat", - "oasst-sft-1-pythia-12b", - "oasst-sft-4-pythia-12b-epoch-3.5", - "command-light-nightly" - ] - - let MODELS_SELECT = document.getElementById('model'); +(async () => { + response = await fetch('/backend-api/v2/models') + models = await response.json() + let select = document.getElementById('model'); + select.textContent = ''; + + let auto = document.createElement('option'); + auto.value = ''; + auto.text = 'Model: Default'; + select.appendChild(auto); for (model of models) { - let model_info = document.createElement('option'); - model_info.value = model - model_info.text = model + let option = document.createElement('option'); + option.value = option.text = model; + select.appendChild(option); + } +})(); - MODELS_SELECT.appendChild(model_info); +(async () => { + response = await fetch('/backend-api/v2/providers') + providers = await response.json() + + let select = document.getElementById('provider'); + select.textContent = ''; + + let auto = document.createElement('option'); + auto.value = ''; + auto.text = 'Provider: Auto'; + select.appendChild(auto); + + for (provider of providers) { + let option = document.createElement('option'); + option.value = option.text = provider; + select.appendChild(option); } -}
\ No newline at end of file +})();
\ No newline at end of file diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 3d7bfedc..e1abb764 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -3,9 +3,8 @@ import g4f from flask import request from .internet import search from .config import special_instructions -from .provider import get_provider -g4f.logging = True +g4f.debug.logging = True class Backend_Api: def __init__(self, app) -> None: @@ -15,6 +14,10 @@ class Backend_Api: 'function': self.models, 'methods' : ['GET'] }, + '/backend-api/v2/providers': { + 'function': self.providers, + 'methods' : ['GET'] + }, '/backend-api/v2/conversation': { 'function': self._conversation, 'methods': ['POST'] @@ -37,6 +40,12 @@ class Backend_Api: def models(self): return g4f._all_models + def providers(self): + return [ + provider.__name__ for provider in g4f.Provider.__providers__ + if provider.working and provider is not g4f.Provider.RetryProvider + ] + def _gen_title(self): return { 'title': '' @@ -47,26 +56,26 @@ class Backend_Api: #jailbreak = request.json['jailbreak'] #internet_access = request.json['meta']['content']['internet_access'] #conversation = request.json['meta']['content']['conversation'] - prompt = request.json['meta']['content']['parts'] - model = request.json['model'] - provider = request.json.get('provider').split('g4f.Provider.')[1] - - messages = prompt - print(messages) + messages = request.json['meta']['content']['parts'] + model = request.json.get('model') + model = model if model else g4f.models.default + provider = request.json.get('provider', 'Auto').replace('g4f.Provider.', '') + provider = provider if provider != "Auto" else None + if provider != None: + provider = g4f.Provider.ProviderUtils.convert.get(provider) - def stream(): - yield from g4f.ChatCompletion.create( - model=model, - provider=get_provider(provider), - messages=messages, - stream=True, - ) if provider else g4f.ChatCompletion.create( - model=model, messages=messages, stream=True - ) + response = g4f.ChatCompletion.create( + model=model, + provider=provider, + messages=messages, + stream=True, + ignore_stream_and_auth=True + ) - return self.app.response_class(stream(), mimetype='text/event-stream') + return self.app.response_class(response, mimetype='text/event-stream') - except Exception as e: + except Exception as e: + print(e) return { 'code' : 'G4F_ERROR', '_action': '_ask', diff --git a/g4f/gui/server/provider.py b/g4f/gui/server/provider.py deleted file mode 100644 index 8c7ac755..00000000 --- a/g4f/gui/server/provider.py +++ /dev/null @@ -1,14 +0,0 @@ -from __future__ import annotations - -import g4f -from g4f import BaseProvider - - -def get_provider(provider: str) -> BaseProvider | None: - if not isinstance(provider, str): - return None - print(provider) - if provider == 'g4f.Provider.Auto': - return None - - return g4f.Provider.ProviderUtils.convert.get(provider) diff --git a/g4f/models.py b/g4f/models.py index 2f86891d..cf24ff52 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -7,6 +7,7 @@ from .Provider import ( ChatgptDemoAi, ChatAnywhere, ChatgptNext, + HuggingChat, GptForLove, ChatgptAi, DeepInfra, @@ -100,7 +101,7 @@ llama2_13b = Model( llama2_70b = Model( name = "meta-llama/Llama-2-70b-chat-hf", base_provider = "huggingface", - best_provider = RetryProvider([Llama2, DeepInfra])) + best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])) # Bard palm = Model( @@ -274,6 +275,8 @@ class ModelUtils: 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, + 'gpt-3.5-long': gpt_35_long, + # gpt-4 'gpt-4' : gpt_4, 'gpt-4-0613' : gpt_4_0613, diff --git a/g4f/webdriver.py b/g4f/webdriver.py index 288eed0e..f0fa1fba 100644 --- a/g4f/webdriver.py +++ b/g4f/webdriver.py @@ -4,6 +4,8 @@ import time from platformdirs import user_config_dir from selenium.webdriver.remote.webdriver import WebDriver from undetected_chromedriver import Chrome, ChromeOptions +import os.path +from . import debug try: from pyvirtualdisplay import Display @@ -19,12 +21,16 @@ def get_browser( ) -> WebDriver: if user_data_dir == None: user_data_dir = user_config_dir("g4f") + if debug.logging: + print("Open browser with config dir:", user_data_dir) if not options: options = ChromeOptions() - options.add_argument("window-size=1920,1080"); if proxy: options.add_argument(f'--proxy-server={proxy}') - return Chrome(options=options, user_data_dir=user_data_dir, headless=headless) + driver = '/usr/bin/chromedriver' + if not os.path.isfile(driver): + driver = None + return Chrome(options=options, user_data_dir=user_data_dir, driver_executable_path=driver, headless=headless) class WebDriverSession(): def __init__( diff --git a/ptest.py b/ptest.py deleted file mode 100644 index 38dd2aa9..00000000 --- a/ptest.py +++ /dev/null @@ -1,57 +0,0 @@ -import requests, json - - -headers = { - 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'Cache-Control': 'no-cache', - 'Connection': 'keep-alive', - 'Content-Type': 'application/json', - 'Origin': 'https://deepinfra.com', - 'Pragma': 'no-cache', - 'Referer': 'https://deepinfra.com/', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-site', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', - 'X-Deepinfra-Source': 'web-embed', - 'accept': 'text/event-stream', - 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', -} - -json_data = json.dumps({ - 'model': 'meta-llama/Llama-2-70b-chat-hf', - 'messages': [ - { - 'role': 'user', - 'content': 'what is the meaning of life ?', - }, - ], - 'stream': True}, separators=(',', ':')) - -response = requests.post('https://api.deepinfra.com/v1/openai/chat/completions', - headers=headers, data=json_data, stream=True) - -response.raise_for_status() -first = True - -for line in response.iter_content(chunk_size=1024): - if line.startswith(b"data: [DONE]"): - break - - elif line.startswith(b"data: "): - chunk = json.loads(line[6:])["choices"][0]["delta"].get("content") - - if chunk: - if first: - chunk = chunk.lstrip() - if chunk: - first = False - - print(chunk) - -# Note: json_data will not be serialized by requests -# exactly as it was in the original request. -#data = '{"model":"meta-llama/Llama-2-70b-chat-hf","messages":[{"role":"user","content":"what is the meaning of life ?"},{"role":"assistant","content":" The meaning of life is a question that has puzzled philosophers, religious leaders, scientists, and many others for centuries. There are many different perspectives on this question, and there is no one definitive answer. However, here are some possible approaches to understanding the meaning of life:\\n\\n1. Religious or spiritual perspective: Many people believe that the meaning of life is to fulfill a divine or spiritual purpose. According to this view, our lives have a higher purpose, which is to serve a deity or follow a set of moral principles. The meaning of life is then found in fulfilling this purpose, whether it be through prayer, service, or good works.\\n2. Personal fulfillment: Some people believe that the meaning of life is to find personal fulfillment and happiness. According to this view, we should pursue our passions and interests, cultivate strong relationships, and strive to live a life that is rich in experience and personal growth.\\n3. Social or cultural perspective: Others argue that the meaning of life is tied to our social and cultural contexts. From this perspective, the meaning of life is to contribute to the greater good of society, to uphold certain values or traditions, or to leave a lasting legacy.\\n4. Existentialism: Existentialists believe that the meaning of life is not predetermined or inherent, but rather something that each individual must create for themselves. According to this view, life has no inherent meaning, but we can give it meaning through our choices, actions, and experiences.\\n5. Scientific perspective: Some people believe that the meaning of life can be understood through scientific inquiry. From this perspective, the meaning of life might be seen as a product of evolution, a byproduct of the natural processes that gave rise to consciousness and intelligence.\\n6. Humanistic perspective: Humanists believe that the meaning of life is to promote human flourishing and well-being. According to this view, we should strive to create a world where individuals can live fulfilling lives, pursue their goals and dreams, and reach their full potential.\\n7. Nihilistic perspective: A minority perspective is that life has no inherent meaning and that any attempt to impose meaning on it is ultimately futile. This view is often associated with nihilism, which argues that traditional beliefs and values are baseless and that morality is an illusion.\\n8. Mystical perspective: Finally, some people believe that the meaning of life cannot be expressed in words or concepts, but can only be experienced directly through mystical practices such as meditation or psychedelic experiences.\\n\\nUltimately, the meaning of life may be something that each person must determine for themselves based on their own unique experiences, values, and beliefs. It could also be said that life has multiple meanings, and that these meanings can change throughout our lives as we grow, learn, and evolve as individuals."},{"role":"user","content":"welll well well"},{"role":"assistant","content":" I\'m glad you\'re feeling enthusiastic! \\"Well\\" can be used in various ways to express excitement or surprise, such as:\\n\\n* \\"Well, well, well!\\" - This phrase is often used to express surprise or intrigue, especially when something unexpected happens or when you\'re presented with new information.\\n* \\"Look at you, well done!\\" - Here, \\"well\\" is used to convey approval or admiration for someone\'s achievement or effort.\\n* \\"You think you\'re so smart, don\'t you?\\" - In this sentence, \\"well\\" is used to express sarcasm or skepticism towards someone who thinks they know everything.\\n\\nHowever, I\'d like to point out that using \\"well\\" too frequently can come across as insincere or filler words, especially in formal writing or professional settings. It\'s important to use language that accurately conveys your intended message without relying on unnecessary filler words. Is there anything else you\'d like me to help you with?"},{"role":"user","content":"nice"}],"stream":true}' -#response = requests.post('https://api.deepinfra.com/v1/openai/chat/completions', headers=headers, data=data)
\ No newline at end of file |