From e5b7f72b719814ffa2748e8e8ed1c6713a24e1a6 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Thu, 22 Feb 2024 00:16:58 +0100 Subject: Move some modules, create providers dir Set min version for duckduckgo Make duckduckgo search async Remove get_lastet_version --- README.md | 4 +- etc/tool/readme_table.py | 2 +- etc/unittest/__main__.py | 2 + etc/unittest/backend.py | 14 +- etc/unittest/client.py | 2 +- etc/unittest/include.py | 18 ++- etc/unittest/mocks.py | 2 +- g4f/Provider/You.py | 4 +- g4f/Provider/__init__.py | 12 +- g4f/Provider/base_provider.py | 283 +------------------------------------ g4f/Provider/bing/create_images.py | 4 +- g4f/Provider/create_images.py | 154 -------------------- g4f/Provider/helper.py | 64 +-------- g4f/Provider/retry_provider.py | 118 ---------------- g4f/__init__.py | 4 +- g4f/base_provider.py | 117 --------------- g4f/client.py | 17 ++- g4f/debug.py | 2 +- g4f/defaults.py | 13 -- g4f/gui/server/backend.py | 2 +- g4f/gui/server/internet.py | 18 +-- g4f/image.py | 23 +-- g4f/models.py | 2 + g4f/providers/base_provider.py | 280 ++++++++++++++++++++++++++++++++++++ g4f/providers/create_images.py | 155 ++++++++++++++++++++ g4f/providers/helper.py | 61 ++++++++ g4f/providers/retry_provider.py | 119 ++++++++++++++++ g4f/providers/types.py | 117 +++++++++++++++ g4f/requests.py | 56 -------- g4f/requests/__init__.py | 56 ++++++++ g4f/requests/aiohttp.py | 30 ++++ g4f/requests/curl_cffi.py | 77 ++++++++++ g4f/requests/defaults.py | 13 ++ g4f/requests_aiohttp.py | 30 ---- g4f/requests_curl_cffi.py | 77 ---------- g4f/version.py | 31 ++-- requirements.txt | 4 +- setup.py | 4 +- 38 files changed, 1002 insertions(+), 989 deletions(-) delete mode 100644 g4f/Provider/create_images.py delete mode 100644 g4f/Provider/retry_provider.py delete mode 100644 g4f/base_provider.py delete mode 100644 g4f/defaults.py create mode 100644 g4f/providers/base_provider.py create mode 100644 g4f/providers/create_images.py create mode 100644 g4f/providers/helper.py create mode 100644 g4f/providers/retry_provider.py create mode 100644 g4f/providers/types.py delete mode 100644 g4f/requests.py create mode 100644 g4f/requests/__init__.py create mode 100644 g4f/requests/aiohttp.py create mode 100644 g4f/requests/curl_cffi.py create mode 100644 g4f/requests/defaults.py delete mode 100644 g4f/requests_aiohttp.py delete mode 100644 g4f/requests_curl_cffi.py diff --git a/README.md b/README.md index 632100b3..7cdfcec5 100644 --- a/README.md +++ b/README.md @@ -167,7 +167,7 @@ To start the web interface, type the following codes in python: from g4f.gui import run_gui run_gui() ``` -or type in command line: +or execute the following command: ```bash python -m g4f.cli gui -port 8080 -debug ``` @@ -182,7 +182,7 @@ See: [/docs/interference](/docs/interference.md) ##### Cookies / Access Token -For generating images with Bing and for the OpenAi Chat you need cookies or a token from your browser session. From Bing you need the "_U" cookie and from OpenAI you need the "access_token". You can pass the cookies / the access token in the create function or you use the `set_cookies` setter before you run G4F: +For generating images with Bing and for the OpenAi Chat you need cookies or a token from your browser session. From Bing you need the "_U" cookie and from OpenAI you need the "access_token". You can pass the cookies / the access token in the create function or you use the `set_cookies` setter before you run G4F: ```python from g4f.cookies import set_cookies diff --git a/etc/tool/readme_table.py b/etc/tool/readme_table.py index d5a409ec..293d1eb3 100644 --- a/etc/tool/readme_table.py +++ b/etc/tool/readme_table.py @@ -3,7 +3,7 @@ from urllib.parse import urlparse import asyncio from g4f import models, ChatCompletion -from g4f.base_provider import BaseProvider, BaseRetryProvider, ProviderType +from g4f.providers.types import BaseRetryProvider, ProviderType from etc.testing._providers import get_providers from g4f import debug diff --git a/etc/unittest/__main__.py b/etc/unittest/__main__.py index a133343e..06b2dff5 100644 --- a/etc/unittest/__main__.py +++ b/etc/unittest/__main__.py @@ -3,5 +3,7 @@ from .asyncio import * from .backend import * from .main import * from .model import * +from .client import * +from .include import * unittest.main() \ No newline at end of file diff --git a/etc/unittest/backend.py b/etc/unittest/backend.py index e4a7b3ab..846c3554 100644 --- a/etc/unittest/backend.py +++ b/etc/unittest/backend.py @@ -1,9 +1,11 @@ import unittest +# import asyncio from unittest.mock import MagicMock from .mocks import ProviderMock import g4f try: from g4f.gui.server.backend import Backend_Api, get_error_message + # from g4f.gui.server.internet import search has_requirements = True except: has_requirements = False @@ -16,10 +18,10 @@ class TestBackendApi(unittest.TestCase): self.app = MagicMock() self.api = Backend_Api(self.app) - def test_version(self): - response = self.api.get_version() - self.assertIn("version", response) - self.assertIn("latest_version", response) + # def test_version(self): + # response = self.api.get_version() + # self.assertIn("version", response) + # self.assertIn("latest_version", response) def test_get_models(self): response = self.api.get_models() @@ -31,6 +33,10 @@ class TestBackendApi(unittest.TestCase): self.assertIsInstance(response, list) self.assertTrue(len(response) > 0) + # def test_search(self): + # result = asyncio.run(search("Hello")) + # self.assertEqual(5, len(result)) + class TestUtilityFunctions(unittest.TestCase): def setUp(self): diff --git a/etc/unittest/client.py b/etc/unittest/client.py index c63edbd2..2bc00c2e 100644 --- a/etc/unittest/client.py +++ b/etc/unittest/client.py @@ -43,7 +43,7 @@ class TestPassModel(unittest.TestCase): for chunk in response: self.assertEqual(chunk.choices[0].delta.content, "You ") - def no_test_stop(self): + def test_stop(self): client = Client(provider=YieldProviderMock) messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]] response = client.chat.completions.create(messages, "Hello", stop=["and"]) diff --git a/etc/unittest/include.py b/etc/unittest/include.py index e67fd5a7..9695bc5b 100644 --- a/etc/unittest/include.py +++ b/etc/unittest/include.py @@ -1,11 +1,15 @@ -import sys -import pathlib +import unittest -sys.path.append(str(pathlib.Path(__file__).parent.parent.parent)) +class TestImport(unittest.TestCase): -import g4f + def test_get_cookies(self): + from g4f import get_cookies as get_cookies_alias + from g4f.cookies import get_cookies + self.assertEqual(get_cookies_alias, get_cookies) -g4f.debug.logging = False -g4f.debug.version_check = False + def test_requests(self): + from g4f.requests import StreamSession + self.assertIsInstance(StreamSession, type) -DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}] \ No newline at end of file +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/etc/unittest/mocks.py b/etc/unittest/mocks.py index 8a67aaf7..102730fa 100644 --- a/etc/unittest/mocks.py +++ b/etc/unittest/mocks.py @@ -1,4 +1,4 @@ -from g4f.Provider.base_provider import AbstractProvider, AsyncProvider, AsyncGeneratorProvider +from g4f.providers.base_provider import AbstractProvider, AsyncProvider, AsyncGeneratorProvider class ProviderMock(AbstractProvider): working = True diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index ece1d340..34130c47 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -7,9 +7,9 @@ from aiohttp import ClientSession, FormData from ..typing import AsyncGenerator, Messages, ImageType, Cookies from .base_provider import AsyncGeneratorProvider -from .helper import get_connector, format_prompt +from ..providers.helper import get_connector, format_prompt from ..image import to_bytes -from ..defaults import DEFAULT_HEADERS +from ..requests.defaults import DEFAULT_HEADERS class You(AsyncGeneratorProvider): url = "https://you.com" diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 68b62fd9..bad77e9b 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -1,9 +1,10 @@ from __future__ import annotations -from ..base_provider import BaseProvider, ProviderType -from .retry_provider import RetryProvider -from .base_provider import AsyncProvider, AsyncGeneratorProvider -from .create_images import CreateImagesProvider +from ..providers.types import BaseProvider, ProviderType +from ..providers.retry_provider import RetryProvider +from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider +from ..providers.create_images import CreateImagesProvider + from .deprecated import * from .selenium import * from .needs_auth import * @@ -15,6 +16,7 @@ from .AItianhu import AItianhu from .Aura import Aura from .Bestim import Bestim from .Bing import Bing +from .BingCreateImages import BingCreateImages from .ChatAnywhere import ChatAnywhere from .ChatBase import ChatBase from .ChatForAi import ChatForAi @@ -53,8 +55,6 @@ from .Vercel import Vercel from .Ylokh import Ylokh from .You import You -from .BingCreateImages import BingCreateImages - import sys __modules__: list = [ diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py index 8659f506..8e761dba 100644 --- a/g4f/Provider/base_provider.py +++ b/g4f/Provider/base_provider.py @@ -1,281 +1,2 @@ -from __future__ import annotations - -import sys -import asyncio -from asyncio import AbstractEventLoop -from concurrent.futures import ThreadPoolExecutor -from abc import abstractmethod -from inspect import signature, Parameter -from .helper import get_cookies, format_prompt -from ..typing import CreateResult, AsyncResult, Messages, Union -from ..base_provider import BaseProvider -from ..errors import NestAsyncioError, ModelNotSupportedError -from .. import debug - -if sys.version_info < (3, 10): - NoneType = type(None) -else: - from types import NoneType - -# Set Windows event loop policy for better compatibility with asyncio and curl_cffi -if sys.platform == 'win32': - if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy): - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) - -def get_running_loop() -> Union[AbstractEventLoop, None]: - try: - loop = asyncio.get_running_loop() - if not hasattr(loop.__class__, "_nest_patched"): - raise NestAsyncioError( - 'Use "create_async" instead of "create" function in a running event loop. Or use "nest_asyncio" package.' - ) - return loop - except RuntimeError: - pass - -class AbstractProvider(BaseProvider): - """ - Abstract class for providing asynchronous functionality to derived classes. - """ - - @classmethod - async def create_async( - cls, - model: str, - messages: Messages, - *, - loop: AbstractEventLoop = None, - executor: ThreadPoolExecutor = None, - **kwargs - ) -> str: - """ - Asynchronously creates a result based on the given model and messages. - - Args: - cls (type): The class on which this method is called. - model (str): The model to use for creation. - messages (Messages): The messages to process. - loop (AbstractEventLoop, optional): The event loop to use. Defaults to None. - executor (ThreadPoolExecutor, optional): The executor for running async tasks. Defaults to None. - **kwargs: Additional keyword arguments. - - Returns: - str: The created result as a string. - """ - loop = loop or asyncio.get_running_loop() - - def create_func() -> str: - return "".join(cls.create_completion(model, messages, False, **kwargs)) - - return await asyncio.wait_for( - loop.run_in_executor(executor, create_func), - timeout=kwargs.get("timeout") - ) - - @classmethod - @property - def params(cls) -> str: - """ - Returns the parameters supported by the provider. - - Args: - cls (type): The class on which this property is called. - - Returns: - str: A string listing the supported parameters. - """ - sig = signature( - cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else - cls.create_async if issubclass(cls, AsyncProvider) else - cls.create_completion - ) - - def get_type_name(annotation: type) -> str: - return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation) - - args = "" - for name, param in sig.parameters.items(): - if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream): - continue - args += f"\n {name}" - args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else "" - args += f' = "{param.default}"' if param.default == "" else f" = {param.default}" if param.default is not Parameter.empty else "" - - return f"g4f.Provider.{cls.__name__} supports: ({args}\n)" - - -class AsyncProvider(AbstractProvider): - """ - Provides asynchronous functionality for creating completions. - """ - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool = False, - **kwargs - ) -> CreateResult: - """ - Creates a completion result synchronously. - - Args: - cls (type): The class on which this method is called. - model (str): The model to use for creation. - messages (Messages): The messages to process. - stream (bool): Indicates whether to stream the results. Defaults to False. - loop (AbstractEventLoop, optional): The event loop to use. Defaults to None. - **kwargs: Additional keyword arguments. - - Returns: - CreateResult: The result of the completion creation. - """ - get_running_loop() - yield asyncio.run(cls.create_async(model, messages, **kwargs)) - - @staticmethod - @abstractmethod - async def create_async( - model: str, - messages: Messages, - **kwargs - ) -> str: - """ - Abstract method for creating asynchronous results. - - Args: - model (str): The model to use for creation. - messages (Messages): The messages to process. - **kwargs: Additional keyword arguments. - - Raises: - NotImplementedError: If this method is not overridden in derived classes. - - Returns: - str: The created result as a string. - """ - raise NotImplementedError() - - -class AsyncGeneratorProvider(AsyncProvider): - """ - Provides asynchronous generator functionality for streaming results. - """ - supports_stream = True - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool = True, - **kwargs - ) -> CreateResult: - """ - Creates a streaming completion result synchronously. - - Args: - cls (type): The class on which this method is called. - model (str): The model to use for creation. - messages (Messages): The messages to process. - stream (bool): Indicates whether to stream the results. Defaults to True. - loop (AbstractEventLoop, optional): The event loop to use. Defaults to None. - **kwargs: Additional keyword arguments. - - Returns: - CreateResult: The result of the streaming completion creation. - """ - loop = get_running_loop() - new_loop = False - if not loop: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - new_loop = True - - generator = cls.create_async_generator(model, messages, stream=stream, **kwargs) - gen = generator.__aiter__() - - # Fix for RuntimeError: async generator ignored GeneratorExit - async def await_callback(callback): - return await callback() - - try: - while True: - yield loop.run_until_complete(await_callback(gen.__anext__)) - except StopAsyncIteration: - ... - # Fix for: ResourceWarning: unclosed event loop - finally: - if new_loop: - loop.close() - asyncio.set_event_loop(None) - - @classmethod - async def create_async( - cls, - model: str, - messages: Messages, - **kwargs - ) -> str: - """ - Asynchronously creates a result from a generator. - - Args: - cls (type): The class on which this method is called. - model (str): The model to use for creation. - messages (Messages): The messages to process. - **kwargs: Additional keyword arguments. - - Returns: - str: The created result as a string. - """ - return "".join([ - chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs) - if not isinstance(chunk, Exception) - ]) - - @staticmethod - @abstractmethod - async def create_async_generator( - model: str, - messages: Messages, - stream: bool = True, - **kwargs - ) -> AsyncResult: - """ - Abstract method for creating an asynchronous generator. - - Args: - model (str): The model to use for creation. - messages (Messages): The messages to process. - stream (bool): Indicates whether to stream the results. Defaults to True. - **kwargs: Additional keyword arguments. - - Raises: - NotImplementedError: If this method is not overridden in derived classes. - - Returns: - AsyncResult: An asynchronous generator yielding results. - """ - raise NotImplementedError() - -class ProviderModelMixin: - default_model: str - models: list[str] = [] - model_aliases: dict[str, str] = {} - - @classmethod - def get_models(cls) -> list[str]: - return cls.models - - @classmethod - def get_model(cls, model: str) -> str: - if not model: - model = cls.default_model - elif model in cls.model_aliases: - model = cls.model_aliases[model] - elif model not in cls.get_models(): - raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}") - debug.last_model = model - return model \ No newline at end of file +from ..providers.base_provider import * +from .helper import get_cookies, format_prompt \ No newline at end of file diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py index 7b82dc56..f6a8a372 100644 --- a/g4f/Provider/bing/create_images.py +++ b/g4f/Provider/bing/create_images.py @@ -17,9 +17,9 @@ try: except ImportError: has_requirements = False -from ..create_images import CreateImagesProvider +from ...providers.create_images import CreateImagesProvider from ..helper import get_connector -from ...base_provider import ProviderType +from ...providers.types import ProviderType from ...errors import MissingRequirementsError from ...webdriver import WebDriver, get_driver_cookies, get_browser diff --git a/g4f/Provider/create_images.py b/g4f/Provider/create_images.py deleted file mode 100644 index 2ca92432..00000000 --- a/g4f/Provider/create_images.py +++ /dev/null @@ -1,154 +0,0 @@ -from __future__ import annotations - -import re -import asyncio -from .. import debug -from ..typing import CreateResult, Messages -from ..base_provider import BaseProvider, ProviderType - -system_message = """ -You can generate images, pictures, photos or img with the DALL-E 3 image generator. -To generate an image with a prompt, do this: - - - -Never use own image links. Don't wrap it in backticks. -It is important to use a only a img tag with a prompt. - - -""" - -class CreateImagesProvider(BaseProvider): - """ - Provider class for creating images based on text prompts. - - This provider handles image creation requests embedded within message content, - using provided image creation functions. - - Attributes: - provider (ProviderType): The underlying provider to handle non-image related tasks. - create_images (callable): A function to create images synchronously. - create_images_async (callable): A function to create images asynchronously. - system_message (str): A message that explains the image creation capability. - include_placeholder (bool): Flag to determine whether to include the image placeholder in the output. - __name__ (str): Name of the provider. - url (str): URL of the provider. - working (bool): Indicates if the provider is operational. - supports_stream (bool): Indicates if the provider supports streaming. - """ - - def __init__( - self, - provider: ProviderType, - create_images: callable, - create_async: callable, - system_message: str = system_message, - include_placeholder: bool = True - ) -> None: - """ - Initializes the CreateImagesProvider. - - Args: - provider (ProviderType): The underlying provider. - create_images (callable): Function to create images synchronously. - create_async (callable): Function to create images asynchronously. - system_message (str, optional): System message to be prefixed to messages. Defaults to a predefined message. - include_placeholder (bool, optional): Whether to include image placeholders in the output. Defaults to True. - """ - self.provider = provider - self.create_images = create_images - self.create_images_async = create_async - self.system_message = system_message - self.include_placeholder = include_placeholder - self.__name__ = provider.__name__ - self.url = provider.url - self.working = provider.working - self.supports_stream = provider.supports_stream - - def create_completion( - self, - model: str, - messages: Messages, - stream: bool = False, - **kwargs - ) -> CreateResult: - """ - Creates a completion result, processing any image creation prompts found within the messages. - - Args: - model (str): The model to use for creation. - messages (Messages): The messages to process, which may contain image prompts. - stream (bool, optional): Indicates whether to stream the results. Defaults to False. - **kwargs: Additional keywordarguments for the provider. - - Yields: - CreateResult: Yields chunks of the processed messages, including image data if applicable. - - Note: - This method processes messages to detect image creation prompts. When such a prompt is found, - it calls the synchronous image creation function and includes the resulting image in the output. - """ - messages.insert(0, {"role": "system", "content": self.system_message}) - buffer = "" - for chunk in self.provider.create_completion(model, messages, stream, **kwargs): - if isinstance(chunk, str) and buffer or "<" in chunk: - buffer += chunk - if ">" in buffer: - match = re.search(r'', buffer) - if match: - placeholder, prompt = match.group(0), match.group(1) - start, append = buffer.split(placeholder, 1) - if start: - yield start - if self.include_placeholder: - yield placeholder - if debug.logging: - print(f"Create images with prompt: {prompt}") - yield from self.create_images(prompt) - if append: - yield append - else: - yield buffer - buffer = "" - else: - yield chunk - - async def create_async( - self, - model: str, - messages: Messages, - **kwargs - ) -> str: - """ - Asynchronously creates a response, processing any image creation prompts found within the messages. - - Args: - model (str): The model to use for creation. - messages (Messages): The messages to process, which may contain image prompts. - **kwargs: Additional keyword arguments for the provider. - - Returns: - str: The processed response string, including asynchronously generated image data if applicable. - - Note: - This method processes messages to detect image creation prompts. When such a prompt is found, - it calls the asynchronous image creation function and includes the resulting image in the output. - """ - messages.insert(0, {"role": "system", "content": self.system_message}) - response = await self.provider.create_async(model, messages, **kwargs) - matches = re.findall(r'()', response) - results = [] - placeholders = [] - for placeholder, prompt in matches: - if placeholder not in placeholders: - if debug.logging: - print(f"Create images with prompt: {prompt}") - results.append(self.create_images_async(prompt)) - placeholders.append(placeholder) - results = await asyncio.gather(*results) - for idx, result in enumerate(results): - placeholder = placeholder[idx] - if self.include_placeholder: - result = placeholder + result - response = response.replace(placeholder, result) - return response \ No newline at end of file diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 35480255..da5b99f6 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -1,62 +1,2 @@ -from __future__ import annotations - -import random -import secrets -import string -from aiohttp import BaseConnector - -from ..typing import Messages, Optional -from ..errors import MissingRequirementsError -from ..cookies import get_cookies - -def format_prompt(messages: Messages, add_special_tokens=False) -> str: - """ - Format a series of messages into a single string, optionally adding special tokens. - - Args: - messages (Messages): A list of message dictionaries, each containing 'role' and 'content'. - add_special_tokens (bool): Whether to add special formatting tokens. - - Returns: - str: A formatted string containing all messages. - """ - if not add_special_tokens and len(messages) <= 1: - return messages[0]["content"] - formatted = "\n".join([ - f'{message["role"].capitalize()}: {message["content"]}' - for message in messages - ]) - return f"{formatted}\nAssistant:" - -def get_random_string(length: int = 10) -> str: - """ - Generate a random string of specified length, containing lowercase letters and digits. - - Args: - length (int, optional): Length of the random string to generate. Defaults to 10. - - Returns: - str: A random string of the specified length. - """ - return ''.join( - random.choice(string.ascii_lowercase + string.digits) - for _ in range(length) - ) - -def get_random_hex() -> str: - """ - Generate a random hexadecimal string of a fixed length. - - Returns: - str: A random hexadecimal string of 32 characters (16 bytes). - """ - return secrets.token_hex(16).zfill(32) - -def get_connector(connector: BaseConnector = None, proxy: str = None) -> Optional[BaseConnector]: - if proxy and not connector: - try: - from aiohttp_socks import ProxyConnector - connector = ProxyConnector.from_url(proxy) - except ImportError: - raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support') - return connector \ No newline at end of file +from ..providers.helper import * +from ..cookies import get_cookies \ No newline at end of file diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py deleted file mode 100644 index 9cc026fc..00000000 --- a/g4f/Provider/retry_provider.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import annotations - -import asyncio -import random -from ..typing import CreateResult, Messages -from ..base_provider import BaseRetryProvider -from .. import debug -from ..errors import RetryProviderError, RetryNoProviderError - -class RetryProvider(BaseRetryProvider): - """ - A provider class to handle retries for creating completions with different providers. - - Attributes: - providers (list): A list of provider instances. - shuffle (bool): A flag indicating whether to shuffle providers before use. - exceptions (dict): A dictionary to store exceptions encountered during retries. - last_provider (BaseProvider): The last provider that was used. - """ - - def create_completion( - self, - model: str, - messages: Messages, - stream: bool = False, - **kwargs - ) -> CreateResult: - """ - Create a completion using available providers, with an option to stream the response. - - Args: - model (str): The model to be used for completion. - messages (Messages): The messages to be used for generating completion. - stream (bool, optional): Flag to indicate if the response should be streamed. Defaults to False. - - Yields: - CreateResult: Tokens or results from the completion. - - Raises: - Exception: Any exception encountered during the completion process. - """ - providers = [p for p in self.providers if stream and p.supports_stream] if stream else self.providers - if self.shuffle: - random.shuffle(providers) - - self.exceptions = {} - started: bool = False - for provider in providers: - self.last_provider = provider - try: - if debug.logging: - print(f"Using {provider.__name__} provider") - for token in provider.create_completion(model, messages, stream, **kwargs): - yield token - started = True - if started: - return - except Exception as e: - self.exceptions[provider.__name__] = e - if debug.logging: - print(f"{provider.__name__}: {e.__class__.__name__}: {e}") - if started: - raise e - - self.raise_exceptions() - - async def create_async( - self, - model: str, - messages: Messages, - **kwargs - ) -> str: - """ - Asynchronously create a completion using available providers. - - Args: - model (str): The model to be used for completion. - messages (Messages): The messages to be used for generating completion. - - Returns: - str: The result of the asynchronous completion. - - Raises: - Exception: Any exception encountered during the asynchronous completion process. - """ - providers = self.providers - if self.shuffle: - random.shuffle(providers) - - self.exceptions = {} - for provider in providers: - self.last_provider = provider - try: - return await asyncio.wait_for( - provider.create_async(model, messages, **kwargs), - timeout=kwargs.get("timeout", 60) - ) - except Exception as e: - self.exceptions[provider.__name__] = e - if debug.logging: - print(f"{provider.__name__}: {e.__class__.__name__}: {e}") - - self.raise_exceptions() - - def raise_exceptions(self) -> None: - """ - Raise a combined exception if any occurred during retries. - - Raises: - RetryProviderError: If any provider encountered an exception. - RetryNoProviderError: If no provider is found. - """ - if self.exceptions: - raise RetryProviderError("RetryProvider failed:\n" + "\n".join([ - f"{p}: {exception.__class__.__name__}: {exception}" for p, exception in self.exceptions.items() - ])) - - raise RetryNoProviderError("No provider found") \ No newline at end of file diff --git a/g4f/__init__.py b/g4f/__init__.py index ec4a1743..6c8e100e 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -8,8 +8,8 @@ from .Provider import AsyncGeneratorProvider, ProviderUtils from .typing import Messages, CreateResult, AsyncResult, Union from .cookies import get_cookies, set_cookies from . import debug, version -from .base_provider import BaseRetryProvider, ProviderType -from .Provider.base_provider import ProviderModelMixin +from .providers.types import BaseRetryProvider, ProviderType +from .providers.base_provider import ProviderModelMixin def get_model_and_provider(model : Union[Model, str], provider : Union[ProviderType, str, None], diff --git a/g4f/base_provider.py b/g4f/base_provider.py deleted file mode 100644 index cc3451a2..00000000 --- a/g4f/base_provider.py +++ /dev/null @@ -1,117 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import Union, List, Dict, Type -from .typing import Messages, CreateResult - -class BaseProvider(ABC): - """ - Abstract base class for a provider. - - Attributes: - url (str): URL of the provider. - working (bool): Indicates if the provider is currently working. - needs_auth (bool): Indicates if the provider needs authentication. - supports_stream (bool): Indicates if the provider supports streaming. - supports_gpt_35_turbo (bool): Indicates if the provider supports GPT-3.5 Turbo. - supports_gpt_4 (bool): Indicates if the provider supports GPT-4. - supports_message_history (bool): Indicates if the provider supports message history. - params (str): List parameters for the provider. - """ - - url: str = None - working: bool = False - needs_auth: bool = False - supports_stream: bool = False - supports_gpt_35_turbo: bool = False - supports_gpt_4: bool = False - supports_message_history: bool = False - params: str - - @classmethod - @abstractmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - **kwargs - ) -> CreateResult: - """ - Create a completion with the given parameters. - - Args: - model (str): The model to use. - messages (Messages): The messages to process. - stream (bool): Whether to use streaming. - **kwargs: Additional keyword arguments. - - Returns: - CreateResult: The result of the creation process. - """ - raise NotImplementedError() - - @classmethod - @abstractmethod - async def create_async( - cls, - model: str, - messages: Messages, - **kwargs - ) -> str: - """ - Asynchronously create a completion with the given parameters. - - Args: - model (str): The model to use. - messages (Messages): The messages to process. - **kwargs: Additional keyword arguments. - - Returns: - str: The result of the creation process. - """ - raise NotImplementedError() - - @classmethod - def get_dict(cls) -> Dict[str, str]: - """ - Get a dictionary representation of the provider. - - Returns: - Dict[str, str]: A dictionary with provider's details. - """ - return {'name': cls.__name__, 'url': cls.url} - -class BaseRetryProvider(BaseProvider): - """ - Base class for a provider that implements retry logic. - - Attributes: - providers (List[Type[BaseProvider]]): List of providers to use for retries. - shuffle (bool): Whether to shuffle the providers list. - exceptions (Dict[str, Exception]): Dictionary of exceptions encountered. - last_provider (Type[BaseProvider]): The last provider used. - """ - - __name__: str = "RetryProvider" - supports_stream: bool = True - - def __init__( - self, - providers: List[Type[BaseProvider]], - shuffle: bool = True - ) -> None: - """ - Initialize the BaseRetryProvider. - - Args: - providers (List[Type[BaseProvider]]): List of providers to use. - shuffle (bool): Whether to shuffle the providers list. - """ - self.providers = providers - self.shuffle = shuffle - self.working = True - self.exceptions: Dict[str, Exception] = {} - self.last_provider: Type[BaseProvider] = None - -ProviderType = Union[Type[BaseProvider], BaseRetryProvider] \ No newline at end of file diff --git a/g4f/client.py b/g4f/client.py index a1494d47..4e5394b7 100644 --- a/g4f/client.py +++ b/g4f/client.py @@ -1,12 +1,14 @@ from __future__ import annotations import re +import os from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse from .typing import Union, Generator, Messages, ImageType -from .base_provider import BaseProvider, ProviderType +from .providers.types import BaseProvider, ProviderType from .image import ImageResponse as ImageProviderResponse -from .Provider import BingCreateImages, Gemini, OpenaiChat +from .Provider.BingCreateImages import BingCreateImages +from .Provider.needs_auth import Gemini, OpenaiChat from .errors import NoImageResponseError from . import get_model_and_provider @@ -43,7 +45,7 @@ def iter_response( yield ChatCompletionChunk(last_chunk, finish_reason) content += str(chunk) if max_tokens is not None and idx + 1 >= max_tokens: - finish_reason = "max_tokens" + finish_reason = "length" first = -1 word = None if stop is not None: @@ -69,7 +71,7 @@ def iter_response( if not stream: if response_format is not None and "type" in response_format: if response_format["type"] == "json_object": - response = read_json(response) + content = read_json(content) yield ChatCompletion(content, finish_reason) class Client(): @@ -89,13 +91,14 @@ class Client(): self.proxies: Proxies = proxies def get_proxy(self) -> Union[str, None]: - if isinstance(self.proxies, str) or self.proxies is None: + if isinstance(self.proxies, str): return self.proxies + elif self.proxies is None: + return os.environ.get("G4F_PROXY") elif "all" in self.proxies: return self.proxies["all"] elif "https" in self.proxies: return self.proxies["https"] - return None class Completions(): def __init__(self, client: Client, provider: ProviderType = None): @@ -123,7 +126,7 @@ class Completions(): stream, **kwargs ) - response = provider.create_completion(model, messages, stream=stream, **kwargs) + response = provider.create_completion(model, messages, stream=stream, proxy=self.client.get_proxy(), **kwargs) stop = [stop] if isinstance(stop, str) else stop response = iter_response(response, stream, response_format, max_tokens, stop) return response if stream else next(response) diff --git a/g4f/debug.py b/g4f/debug.py index 050bb0b2..69f7d55c 100644 --- a/g4f/debug.py +++ b/g4f/debug.py @@ -1,4 +1,4 @@ -from .base_provider import ProviderType +from .providers.types import ProviderType logging: bool = False version_check: bool = True diff --git a/g4f/defaults.py b/g4f/defaults.py deleted file mode 100644 index 6ae6d7eb..00000000 --- a/g4f/defaults.py +++ /dev/null @@ -1,13 +0,0 @@ -DEFAULT_HEADERS = { - 'Accept': '*/*', - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US', - 'Connection': 'keep-alive', - 'Sec-Ch-Ua': '"Not A(Brand";v="99", "Google Chrome";v="121", "Chromium";v="121"', - 'Sec-Ch-Ua-Mobile': '?0', - 'Sec-Ch-Ua-Platform': '"Windows"', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-site', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36' -} \ No newline at end of file diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 6847be34..9788e5f5 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -97,7 +97,7 @@ class Backend_Api: current_version = None return { "version": current_version, - "latest_version": version.get_latest_version(), + "latest_version": version.utils.latest_version, } def generate_title(self): diff --git a/g4f/gui/server/internet.py b/g4f/gui/server/internet.py index a6bfc885..e784e52d 100644 --- a/g4f/gui/server/internet.py +++ b/g4f/gui/server/internet.py @@ -2,7 +2,7 @@ from __future__ import annotations from aiohttp import ClientSession, ClientTimeout try: - from duckduckgo_search import DDGS + from duckduckgo_search.duckduckgo_search_async import AsyncDDGS from bs4 import BeautifulSoup has_requirements = True except ImportError: @@ -30,7 +30,10 @@ class SearchResults(): search += result.snippet search += f"\n\nSource: [[{idx}]]({result.url})" return search - + + def __len__(self) -> int: + return len(self.results) + class SearchResultEntry(): def __init__(self, title: str, url: str, snippet: str, text: str = None): self.title = title @@ -96,21 +99,20 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text: bool = True) -> SearchResults: if not has_requirements: raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package') - with DDGS() as ddgs: + async with AsyncDDGS() as ddgs: results = [] - for result in ddgs.text( + async for result in ddgs.text( query, region="wt-wt", safesearch="moderate", timelimit="y", + max_results=n_results ): results.append(SearchResultEntry( result["title"], result["href"], result["body"] )) - if len(results) >= n_results: - break if add_text: requests = [] @@ -136,7 +138,6 @@ async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text return SearchResults(formatted_results) - def get_search_message(prompt) -> str: try: search_results = asyncio.run(search(prompt)) @@ -146,7 +147,6 @@ def get_search_message(prompt) -> str: Instruction: Using the provided web search results, to write a comprehensive reply to the user request. Make sure to add the sources of cites using [[Number]](Url) notation after the reference. Example: [[0]](http://google.com) -If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. User request: {prompt} @@ -154,4 +154,4 @@ User request: return message except Exception as e: print("Couldn't do web search:", e) - return prompt + return prompt \ No newline at end of file diff --git a/g4f/image.py b/g4f/image.py index 01d6ae50..6370a06f 100644 --- a/g4f/image.py +++ b/g4f/image.py @@ -11,7 +11,7 @@ try: has_requirements = True except ImportError: has_requirements = False - + from .errors import MissingRequirementsError ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'} @@ -28,9 +28,11 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image: """ if not has_requirements: raise MissingRequirementsError('Install "pillow" package for images') + if isinstance(image, str): is_data_uri_an_image(image) image = extract_data_uri(image) + if is_svg: try: import cairosvg @@ -41,6 +43,7 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image: buffer = BytesIO() cairosvg.svg2png(image, write_to=buffer) return open_image(buffer) + if isinstance(image, bytes): is_accepted_format(image) return open_image(BytesIO(image)) @@ -48,6 +51,7 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image: image = open_image(image) image.load() return image + return image def is_allowed_extension(filename: str) -> bool: @@ -200,17 +204,16 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st str: The formatted markdown string. """ if isinstance(images, str): - images = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})" + result = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})" else: if not isinstance(preview, list): preview = [preview.replace('{image}', image) if preview else image for image in images] - images = [ + result = "\n".join( f"[![#{idx+1} {alt}]({preview[idx]})]({image})" for idx, image in enumerate(images) - ] - images = "\n".join(images) + ) start_flag = "\n" end_flag = "\n" - return f"\n{start_flag}{images}\n{end_flag}\n" + return f"\n{start_flag}{result}\n{end_flag}\n" def to_bytes(image: ImageType) -> bytes: """ @@ -245,19 +248,19 @@ class ImageResponse: self.images = images self.alt = alt self.options = options - + def __str__(self) -> str: return format_images_markdown(self.images, self.alt, self.get("preview")) - + def get(self, key: str): return self.options.get(key) - + class ImageRequest: def __init__( self, options: dict = {} ): self.options = options - + def get(self, key: str): return self.options.get(key) \ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index 3b4ca468..f5951a29 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -1,5 +1,7 @@ from __future__ import annotations + from dataclasses import dataclass + from .Provider import RetryProvider, ProviderType from .Provider import ( Chatgpt4Online, diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py new file mode 100644 index 00000000..b8649ba5 --- /dev/null +++ b/g4f/providers/base_provider.py @@ -0,0 +1,280 @@ +from __future__ import annotations + +import sys +import asyncio +from asyncio import AbstractEventLoop +from concurrent.futures import ThreadPoolExecutor +from abc import abstractmethod +from inspect import signature, Parameter +from ..typing import CreateResult, AsyncResult, Messages, Union +from .types import BaseProvider +from ..errors import NestAsyncioError, ModelNotSupportedError +from .. import debug + +if sys.version_info < (3, 10): + NoneType = type(None) +else: + from types import NoneType + +# Set Windows event loop policy for better compatibility with asyncio and curl_cffi +if sys.platform == 'win32': + if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy): + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + +def get_running_loop() -> Union[AbstractEventLoop, None]: + try: + loop = asyncio.get_running_loop() + if not hasattr(loop.__class__, "_nest_patched"): + raise NestAsyncioError( + 'Use "create_async" instead of "create" function in a running event loop. Or use "nest_asyncio" package.' + ) + return loop + except RuntimeError: + pass + +class AbstractProvider(BaseProvider): + """ + Abstract class for providing asynchronous functionality to derived classes. + """ + + @classmethod + async def create_async( + cls, + model: str, + messages: Messages, + *, + loop: AbstractEventLoop = None, + executor: ThreadPoolExecutor = None, + **kwargs + ) -> str: + """ + Asynchronously creates a result based on the given model and messages. + + Args: + cls (type): The class on which this method is called. + model (str): The model to use for creation. + messages (Messages): The messages to process. + loop (AbstractEventLoop, optional): The event loop to use. Defaults to None. + executor (ThreadPoolExecutor, optional): The executor for running async tasks. Defaults to None. + **kwargs: Additional keyword arguments. + + Returns: + str: The created result as a string. + """ + loop = loop or asyncio.get_running_loop() + + def create_func() -> str: + return "".join(cls.create_completion(model, messages, False, **kwargs)) + + return await asyncio.wait_for( + loop.run_in_executor(executor, create_func), + timeout=kwargs.get("timeout") + ) + + @classmethod + @property + def params(cls) -> str: + """ + Returns the parameters supported by the provider. + + Args: + cls (type): The class on which this property is called. + + Returns: + str: A string listing the supported parameters. + """ + sig = signature( + cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else + cls.create_async if issubclass(cls, AsyncProvider) else + cls.create_completion + ) + + def get_type_name(annotation: type) -> str: + return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation) + + args = "" + for name, param in sig.parameters.items(): + if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream): + continue + args += f"\n {name}" + args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else "" + args += f' = "{param.default}"' if param.default == "" else f" = {param.default}" if param.default is not Parameter.empty else "" + + return f"g4f.Provider.{cls.__name__} supports: ({args}\n)" + + +class AsyncProvider(AbstractProvider): + """ + Provides asynchronous functionality for creating completions. + """ + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool = False, + **kwargs + ) -> CreateResult: + """ + Creates a completion result synchronously. + + Args: + cls (type): The class on which this method is called. + model (str): The model to use for creation. + messages (Messages): The messages to process. + stream (bool): Indicates whether to stream the results. Defaults to False. + loop (AbstractEventLoop, optional): The event loop to use. Defaults to None. + **kwargs: Additional keyword arguments. + + Returns: + CreateResult: The result of the completion creation. + """ + get_running_loop() + yield asyncio.run(cls.create_async(model, messages, **kwargs)) + + @staticmethod + @abstractmethod + async def create_async( + model: str, + messages: Messages, + **kwargs + ) -> str: + """ + Abstract method for creating asynchronous results. + + Args: + model (str): The model to use for creation. + messages (Messages): The messages to process. + **kwargs: Additional keyword arguments. + + Raises: + NotImplementedError: If this method is not overridden in derived classes. + + Returns: + str: The created result as a string. + """ + raise NotImplementedError() + + +class AsyncGeneratorProvider(AsyncProvider): + """ + Provides asynchronous generator functionality for streaming results. + """ + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool = True, + **kwargs + ) -> CreateResult: + """ + Creates a streaming completion result synchronously. + + Args: + cls (type): The class on which this method is called. + model (str): The model to use for creation. + messages (Messages): The messages to process. + stream (bool): Indicates whether to stream the results. Defaults to True. + loop (AbstractEventLoop, optional): The event loop to use. Defaults to None. + **kwargs: Additional keyword arguments. + + Returns: + CreateResult: The result of the streaming completion creation. + """ + loop = get_running_loop() + new_loop = False + if not loop: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + new_loop = True + + generator = cls.create_async_generator(model, messages, stream=stream, **kwargs) + gen = generator.__aiter__() + + # Fix for RuntimeError: async generator ignored GeneratorExit + async def await_callback(callback): + return await callback() + + try: + while True: + yield loop.run_until_complete(await_callback(gen.__anext__)) + except StopAsyncIteration: + ... + # Fix for: ResourceWarning: unclosed event loop + finally: + if new_loop: + loop.close() + asyncio.set_event_loop(None) + + @classmethod + async def create_async( + cls, + model: str, + messages: Messages, + **kwargs + ) -> str: + """ + Asynchronously creates a result from a generator. + + Args: + cls (type): The class on which this method is called. + model (str): The model to use for creation. + messages (Messages): The messages to process. + **kwargs: Additional keyword arguments. + + Returns: + str: The created result as a string. + """ + return "".join([ + chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs) + if not isinstance(chunk, Exception) + ]) + + @staticmethod + @abstractmethod + async def create_async_generator( + model: str, + messages: Messages, + stream: bool = True, + **kwargs + ) -> AsyncResult: + """ + Abstract method for creating an asynchronous generator. + + Args: + model (str): The model to use for creation. + messages (Messages): The messages to process. + stream (bool): Indicates whether to stream the results. Defaults to True. + **kwargs: Additional keyword arguments. + + Raises: + NotImplementedError: If this method is not overridden in derived classes. + + Returns: + AsyncResult: An asynchronous generator yielding results. + """ + raise NotImplementedError() + +class ProviderModelMixin: + default_model: str + models: list[str] = [] + model_aliases: dict[str, str] = {} + + @classmethod + def get_models(cls) -> list[str]: + return cls.models + + @classmethod + def get_model(cls, model: str) -> str: + if not model: + model = cls.default_model + elif model in cls.model_aliases: + model = cls.model_aliases[model] + elif model not in cls.get_models(): + raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}") + debug.last_model = model + return model \ No newline at end of file diff --git a/g4f/providers/create_images.py b/g4f/providers/create_images.py new file mode 100644 index 00000000..29a2a041 --- /dev/null +++ b/g4f/providers/create_images.py @@ -0,0 +1,155 @@ +from __future__ import annotations + +import re +import asyncio + +from .. import debug +from ..typing import CreateResult, Messages +from .types import BaseProvider, ProviderType + +system_message = """ +You can generate images, pictures, photos or img with the DALL-E 3 image generator. +To generate an image with a prompt, do this: + + + +Never use own image links. Don't wrap it in backticks. +It is important to use a only a img tag with a prompt. + + +""" + +class CreateImagesProvider(BaseProvider): + """ + Provider class for creating images based on text prompts. + + This provider handles image creation requests embedded within message content, + using provided image creation functions. + + Attributes: + provider (ProviderType): The underlying provider to handle non-image related tasks. + create_images (callable): A function to create images synchronously. + create_images_async (callable): A function to create images asynchronously. + system_message (str): A message that explains the image creation capability. + include_placeholder (bool): Flag to determine whether to include the image placeholder in the output. + __name__ (str): Name of the provider. + url (str): URL of the provider. + working (bool): Indicates if the provider is operational. + supports_stream (bool): Indicates if the provider supports streaming. + """ + + def __init__( + self, + provider: ProviderType, + create_images: callable, + create_async: callable, + system_message: str = system_message, + include_placeholder: bool = True + ) -> None: + """ + Initializes the CreateImagesProvider. + + Args: + provider (ProviderType): The underlying provider. + create_images (callable): Function to create images synchronously. + create_async (callable): Function to create images asynchronously. + system_message (str, optional): System message to be prefixed to messages. Defaults to a predefined message. + include_placeholder (bool, optional): Whether to include image placeholders in the output. Defaults to True. + """ + self.provider = provider + self.create_images = create_images + self.create_images_async = create_async + self.system_message = system_message + self.include_placeholder = include_placeholder + self.__name__ = provider.__name__ + self.url = provider.url + self.working = provider.working + self.supports_stream = provider.supports_stream + + def create_completion( + self, + model: str, + messages: Messages, + stream: bool = False, + **kwargs + ) -> CreateResult: + """ + Creates a completion result, processing any image creation prompts found within the messages. + + Args: + model (str): The model to use for creation. + messages (Messages): The messages to process, which may contain image prompts. + stream (bool, optional): Indicates whether to stream the results. Defaults to False. + **kwargs: Additional keywordarguments for the provider. + + Yields: + CreateResult: Yields chunks of the processed messages, including image data if applicable. + + Note: + This method processes messages to detect image creation prompts. When such a prompt is found, + it calls the synchronous image creation function and includes the resulting image in the output. + """ + messages.insert(0, {"role": "system", "content": self.system_message}) + buffer = "" + for chunk in self.provider.create_completion(model, messages, stream, **kwargs): + if isinstance(chunk, str) and buffer or "<" in chunk: + buffer += chunk + if ">" in buffer: + match = re.search(r'', buffer) + if match: + placeholder, prompt = match.group(0), match.group(1) + start, append = buffer.split(placeholder, 1) + if start: + yield start + if self.include_placeholder: + yield placeholder + if debug.logging: + print(f"Create images with prompt: {prompt}") + yield from self.create_images(prompt) + if append: + yield append + else: + yield buffer + buffer = "" + else: + yield chunk + + async def create_async( + self, + model: str, + messages: Messages, + **kwargs + ) -> str: + """ + Asynchronously creates a response, processing any image creation prompts found within the messages. + + Args: + model (str): The model to use for creation. + messages (Messages): The messages to process, which may contain image prompts. + **kwargs: Additional keyword arguments for the provider. + + Returns: + str: The processed response string, including asynchronously generated image data if applicable. + + Note: + This method processes messages to detect image creation prompts. When such a prompt is found, + it calls the asynchronous image creation function and includes the resulting image in the output. + """ + messages.insert(0, {"role": "system", "content": self.system_message}) + response = await self.provider.create_async(model, messages, **kwargs) + matches = re.findall(r'()', response) + results = [] + placeholders = [] + for placeholder, prompt in matches: + if placeholder not in placeholders: + if debug.logging: + print(f"Create images with prompt: {prompt}") + results.append(self.create_images_async(prompt)) + placeholders.append(placeholder) + results = await asyncio.gather(*results) + for idx, result in enumerate(results): + placeholder = placeholder[idx] + if self.include_placeholder: + result = placeholder + result + response = response.replace(placeholder, result) + return response \ No newline at end of file diff --git a/g4f/providers/helper.py b/g4f/providers/helper.py new file mode 100644 index 00000000..49d033d1 --- /dev/null +++ b/g4f/providers/helper.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +import random +import secrets +import string +from aiohttp import BaseConnector + +from ..typing import Messages, Optional +from ..errors import MissingRequirementsError + +def format_prompt(messages: Messages, add_special_tokens=False) -> str: + """ + Format a series of messages into a single string, optionally adding special tokens. + + Args: + messages (Messages): A list of message dictionaries, each containing 'role' and 'content'. + add_special_tokens (bool): Whether to add special formatting tokens. + + Returns: + str: A formatted string containing all messages. + """ + if not add_special_tokens and len(messages) <= 1: + return messages[0]["content"] + formatted = "\n".join([ + f'{message["role"].capitalize()}: {message["content"]}' + for message in messages + ]) + return f"{formatted}\nAssistant:" + +def get_random_string(length: int = 10) -> str: + """ + Generate a random string of specified length, containing lowercase letters and digits. + + Args: + length (int, optional): Length of the random string to generate. Defaults to 10. + + Returns: + str: A random string of the specified length. + """ + return ''.join( + random.choice(string.ascii_lowercase + string.digits) + for _ in range(length) + ) + +def get_random_hex() -> str: + """ + Generate a random hexadecimal string of a fixed length. + + Returns: + str: A random hexadecimal string of 32 characters (16 bytes). + """ + return secrets.token_hex(16).zfill(32) + +def get_connector(connector: BaseConnector = None, proxy: str = None) -> Optional[BaseConnector]: + if proxy and not connector: + try: + from aiohttp_socks import ProxyConnector + connector = ProxyConnector.from_url(proxy) + except ImportError: + raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support') + return connector \ No newline at end of file diff --git a/g4f/providers/retry_provider.py b/g4f/providers/retry_provider.py new file mode 100644 index 00000000..a7ab2881 --- /dev/null +++ b/g4f/providers/retry_provider.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +import asyncio +import random + +from ..typing import CreateResult, Messages +from .types import BaseRetryProvider +from .. import debug +from ..errors import RetryProviderError, RetryNoProviderError + +class RetryProvider(BaseRetryProvider): + """ + A provider class to handle retries for creating completions with different providers. + + Attributes: + providers (list): A list of provider instances. + shuffle (bool): A flag indicating whether to shuffle providers before use. + exceptions (dict): A dictionary to store exceptions encountered during retries. + last_provider (BaseProvider): The last provider that was used. + """ + + def create_completion( + self, + model: str, + messages: Messages, + stream: bool = False, + **kwargs + ) -> CreateResult: + """ + Create a completion using available providers, with an option to stream the response. + + Args: + model (str): The model to be used for completion. + messages (Messages): The messages to be used for generating completion. + stream (bool, optional): Flag to indicate if the response should be streamed. Defaults to False. + + Yields: + CreateResult: Tokens or results from the completion. + + Raises: + Exception: Any exception encountered during the completion process. + """ + providers = [p for p in self.providers if stream and p.supports_stream] if stream else self.providers + if self.shuffle: + random.shuffle(providers) + + self.exceptions = {} + started: bool = False + for provider in providers: + self.last_provider = provider + try: + if debug.logging: + print(f"Using {provider.__name__} provider") + for token in provider.create_completion(model, messages, stream, **kwargs): + yield token + started = True + if started: + return + except Exception as e: + self.exceptions[provider.__name__] = e + if debug.logging: + print(f"{provider.__name__}: {e.__class__.__name__}: {e}") + if started: + raise e + + self.raise_exceptions() + + async def create_async( + self, + model: str, + messages: Messages, + **kwargs + ) -> str: + """ + Asynchronously create a completion using available providers. + + Args: + model (str): The model to be used for completion. + messages (Messages): The messages to be used for generating completion. + + Returns: + str: The result of the asynchronous completion. + + Raises: + Exception: Any exception encountered during the asynchronous completion process. + """ + providers = self.providers + if self.shuffle: + random.shuffle(providers) + + self.exceptions = {} + for provider in providers: + self.last_provider = provider + try: + return await asyncio.wait_for( + provider.create_async(model, messages, **kwargs), + timeout=kwargs.get("timeout", 60) + ) + except Exception as e: + self.exceptions[provider.__name__] = e + if debug.logging: + print(f"{provider.__name__}: {e.__class__.__name__}: {e}") + + self.raise_exceptions() + + def raise_exceptions(self) -> None: + """ + Raise a combined exception if any occurred during retries. + + Raises: + RetryProviderError: If any provider encountered an exception. + RetryNoProviderError: If no provider is found. + """ + if self.exceptions: + raise RetryProviderError("RetryProvider failed:\n" + "\n".join([ + f"{p}: {exception.__class__.__name__}: {exception}" for p, exception in self.exceptions.items() + ])) + + raise RetryNoProviderError("No provider found") \ No newline at end of file diff --git a/g4f/providers/types.py b/g4f/providers/types.py new file mode 100644 index 00000000..7b11ec43 --- /dev/null +++ b/g4f/providers/types.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Union, List, Dict, Type +from ..typing import Messages, CreateResult + +class BaseProvider(ABC): + """ + Abstract base class for a provider. + + Attributes: + url (str): URL of the provider. + working (bool): Indicates if the provider is currently working. + needs_auth (bool): Indicates if the provider needs authentication. + supports_stream (bool): Indicates if the provider supports streaming. + supports_gpt_35_turbo (bool): Indicates if the provider supports GPT-3.5 Turbo. + supports_gpt_4 (bool): Indicates if the provider supports GPT-4. + supports_message_history (bool): Indicates if the provider supports message history. + params (str): List parameters for the provider. + """ + + url: str = None + working: bool = False + needs_auth: bool = False + supports_stream: bool = False + supports_gpt_35_turbo: bool = False + supports_gpt_4: bool = False + supports_message_history: bool = False + params: str + + @classmethod + @abstractmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + **kwargs + ) -> CreateResult: + """ + Create a completion with the given parameters. + + Args: + model (str): The model to use. + messages (Messages): The messages to process. + stream (bool): Whether to use streaming. + **kwargs: Additional keyword arguments. + + Returns: + CreateResult: The result of the creation process. + """ + raise NotImplementedError() + + @classmethod + @abstractmethod + async def create_async( + cls, + model: str, + messages: Messages, + **kwargs + ) -> str: + """ + Asynchronously create a completion with the given parameters. + + Args: + model (str): The model to use. + messages (Messages): The messages to process. + **kwargs: Additional keyword arguments. + + Returns: + str: The result of the creation process. + """ + raise NotImplementedError() + + @classmethod + def get_dict(cls) -> Dict[str, str]: + """ + Get a dictionary representation of the provider. + + Returns: + Dict[str, str]: A dictionary with provider's details. + """ + return {'name': cls.__name__, 'url': cls.url} + +class BaseRetryProvider(BaseProvider): + """ + Base class for a provider that implements retry logic. + + Attributes: + providers (List[Type[BaseProvider]]): List of providers to use for retries. + shuffle (bool): Whether to shuffle the providers list. + exceptions (Dict[str, Exception]): Dictionary of exceptions encountered. + last_provider (Type[BaseProvider]): The last provider used. + """ + + __name__: str = "RetryProvider" + supports_stream: bool = True + + def __init__( + self, + providers: List[Type[BaseProvider]], + shuffle: bool = True + ) -> None: + """ + Initialize the BaseRetryProvider. + + Args: + providers (List[Type[BaseProvider]]): List of providers to use. + shuffle (bool): Whether to shuffle the providers list. + """ + self.providers = providers + self.shuffle = shuffle + self.working = True + self.exceptions: Dict[str, Exception] = {} + self.last_provider: Type[BaseProvider] = None + +ProviderType = Union[Type[BaseProvider], BaseRetryProvider] \ No newline at end of file diff --git a/g4f/requests.py b/g4f/requests.py deleted file mode 100644 index d7b5996b..00000000 --- a/g4f/requests.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import annotations - -from urllib.parse import urlparse - -try: - from curl_cffi.requests import Session - from .requests_curl_cffi import StreamResponse, StreamSession - has_curl_cffi = True -except ImportError: - from typing import Type as Session - from .requests_aiohttp import StreamResponse, StreamSession - has_curl_cffi = False - -from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies -from .errors import MissingRequirementsError -from .defaults import DEFAULT_HEADERS - -def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> dict: - """ - Create a Session object using a WebDriver to handle cookies and headers. - - Args: - url (str): The URL to navigate to using the WebDriver. - webdriver (WebDriver, optional): The WebDriver instance to use. - proxy (str, optional): Proxy server to use for the Session. - timeout (int, optional): Timeout in seconds for the WebDriver. - - Returns: - Session: A Session object configured with cookies and headers from the WebDriver. - """ - with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=False) as driver: - bypass_cloudflare(driver, url, timeout) - cookies = get_driver_cookies(driver) - user_agent = driver.execute_script("return navigator.userAgent") - parse = urlparse(url) - return { - 'cookies': cookies, - 'headers': { - **DEFAULT_HEADERS, - 'Authority': parse.netloc, - 'Origin': f'{parse.scheme}://{parse.netloc}', - 'Referer': url, - 'User-Agent': user_agent, - }, - } - -def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> Session: - if not has_curl_cffi: - raise MissingRequirementsError('Install "curl_cffi" package') - args = get_args_from_browser(url, webdriver, proxy, timeout) - return Session( - **args, - proxies={"https": proxy, "http": proxy}, - timeout=timeout, - impersonate="chrome110" - ) \ No newline at end of file diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py new file mode 100644 index 00000000..d278ffaf --- /dev/null +++ b/g4f/requests/__init__.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from urllib.parse import urlparse + +try: + from curl_cffi.requests import Session + from .curl_cffi import StreamResponse, StreamSession + has_curl_cffi = True +except ImportError: + from typing import Type as Session + from .aiohttp import StreamResponse, StreamSession + has_curl_cffi = False + +from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies +from ..errors import MissingRequirementsError +from .defaults import DEFAULT_HEADERS + +def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> dict: + """ + Create a Session object using a WebDriver to handle cookies and headers. + + Args: + url (str): The URL to navigate to using the WebDriver. + webdriver (WebDriver, optional): The WebDriver instance to use. + proxy (str, optional): Proxy server to use for the Session. + timeout (int, optional): Timeout in seconds for the WebDriver. + + Returns: + Session: A Session object configured with cookies and headers from the WebDriver. + """ + with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=False) as driver: + bypass_cloudflare(driver, url, timeout) + cookies = get_driver_cookies(driver) + user_agent = driver.execute_script("return navigator.userAgent") + parse = urlparse(url) + return { + 'cookies': cookies, + 'headers': { + **DEFAULT_HEADERS, + 'Authority': parse.netloc, + 'Origin': f'{parse.scheme}://{parse.netloc}', + 'Referer': url, + 'User-Agent': user_agent, + }, + } + +def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> Session: + if not has_curl_cffi: + raise MissingRequirementsError('Install "curl_cffi" package') + args = get_args_from_browser(url, webdriver, proxy, timeout) + return Session( + **args, + proxies={"https": proxy, "http": proxy}, + timeout=timeout, + impersonate="chrome110" + ) \ No newline at end of file diff --git a/g4f/requests/aiohttp.py b/g4f/requests/aiohttp.py new file mode 100644 index 00000000..d9bd6541 --- /dev/null +++ b/g4f/requests/aiohttp.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from aiohttp import ClientSession, ClientResponse, ClientTimeout +from typing import AsyncGenerator, Any + +from ..providers.helper import get_connector +from .defaults import DEFAULT_HEADERS + +class StreamResponse(ClientResponse): + async def iter_lines(self) -> AsyncGenerator[bytes, None]: + async for line in self.content: + yield line.rstrip(b"\r\n") + + async def json(self) -> Any: + return await super().json(content_type=None) + +class StreamSession(ClientSession): + def __init__(self, headers: dict = {}, timeout: int = None, proxies: dict = {}, impersonate = None, **kwargs): + if impersonate: + headers = { + **DEFAULT_HEADERS, + **headers + } + super().__init__( + **kwargs, + timeout=ClientTimeout(timeout) if timeout else None, + response_class=StreamResponse, + connector=get_connector(kwargs.get("connector"), proxies.get("https")), + headers=headers + ) \ No newline at end of file diff --git a/g4f/requests/curl_cffi.py b/g4f/requests/curl_cffi.py new file mode 100644 index 00000000..cfcdd63b --- /dev/null +++ b/g4f/requests/curl_cffi.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +from curl_cffi.requests import AsyncSession, Response +from typing import AsyncGenerator, Any +from functools import partialmethod +import json + +class StreamResponse: + """ + A wrapper class for handling asynchronous streaming responses. + + Attributes: + inner (Response): The original Response object. + """ + + def __init__(self, inner: Response) -> None: + """Initialize the StreamResponse with the provided Response object.""" + self.inner: Response = inner + + async def text(self) -> str: + """Asynchronously get the response text.""" + return await self.inner.atext() + + def raise_for_status(self) -> None: + """Raise an HTTPError if one occurred.""" + self.inner.raise_for_status() + + async def json(self, **kwargs) -> Any: + """Asynchronously parse the JSON response content.""" + return json.loads(await self.inner.acontent(), **kwargs) + + async def iter_lines(self) -> AsyncGenerator[bytes, None]: + """Asynchronously iterate over the lines of the response.""" + async for line in self.inner.aiter_lines(): + yield line + + async def iter_content(self) -> AsyncGenerator[bytes, None]: + """Asynchronously iterate over the response content.""" + async for chunk in self.inner.aiter_content(): + yield chunk + + async def __aenter__(self): + """Asynchronously enter the runtime context for the response object.""" + inner: Response = await self.inner + self.inner = inner + self.request = inner.request + self.status: int = inner.status_code + self.reason: str = inner.reason + self.ok: bool = inner.ok + self.headers = inner.headers + self.cookies = inner.cookies + return self + + async def __aexit__(self, *args): + """Asynchronously exit the runtime context for the response object.""" + await self.inner.aclose() + +class StreamSession(AsyncSession): + """ + An asynchronous session class for handling HTTP requests with streaming. + + Inherits from AsyncSession. + """ + + def request( + self, method: str, url: str, **kwargs + ) -> StreamResponse: + """Create and return a StreamResponse object for the given HTTP request.""" + return StreamResponse(super().request(method, url, stream=True, **kwargs)) + + # Defining HTTP methods as partial methods of the request method. + head = partialmethod(request, "HEAD") + get = partialmethod(request, "GET") + post = partialmethod(request, "POST") + put = partialmethod(request, "PUT") + patch = partialmethod(request, "PATCH") + delete = partialmethod(request, "DELETE") diff --git a/g4f/requests/defaults.py b/g4f/requests/defaults.py new file mode 100644 index 00000000..6ae6d7eb --- /dev/null +++ b/g4f/requests/defaults.py @@ -0,0 +1,13 @@ +DEFAULT_HEADERS = { + 'Accept': '*/*', + 'Accept-Encoding': 'gzip, deflate, br', + 'Accept-Language': 'en-US', + 'Connection': 'keep-alive', + 'Sec-Ch-Ua': '"Not A(Brand";v="99", "Google Chrome";v="121", "Chromium";v="121"', + 'Sec-Ch-Ua-Mobile': '?0', + 'Sec-Ch-Ua-Platform': '"Windows"', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-site', + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36' +} \ No newline at end of file diff --git a/g4f/requests_aiohttp.py b/g4f/requests_aiohttp.py deleted file mode 100644 index 0da8973b..00000000 --- a/g4f/requests_aiohttp.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession, ClientResponse, ClientTimeout -from typing import AsyncGenerator, Any - -from .Provider.helper import get_connector -from .defaults import DEFAULT_HEADERS - -class StreamResponse(ClientResponse): - async def iter_lines(self) -> AsyncGenerator[bytes, None]: - async for line in self.content: - yield line.rstrip(b"\r\n") - - async def json(self) -> Any: - return await super().json(content_type=None) - -class StreamSession(ClientSession): - def __init__(self, headers: dict = {}, timeout: int = None, proxies: dict = {}, impersonate = None, **kwargs): - if impersonate: - headers = { - **DEFAULT_HEADERS, - **headers - } - super().__init__( - **kwargs, - timeout=ClientTimeout(timeout) if timeout else None, - response_class=StreamResponse, - connector=get_connector(kwargs.get("connector"), proxies.get("https")), - headers=headers - ) \ No newline at end of file diff --git a/g4f/requests_curl_cffi.py b/g4f/requests_curl_cffi.py deleted file mode 100644 index 64e41d65..00000000 --- a/g4f/requests_curl_cffi.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import annotations - -from curl_cffi.requests import AsyncSession, Response -from typing import AsyncGenerator, Any -from functools import partialmethod -import json - -class StreamResponse: - """ - A wrapper class for handling asynchronous streaming responses. - - Attributes: - inner (Response): The original Response object. - """ - - def __init__(self, inner: Response) -> None: - """Initialize the StreamResponse with the provided Response object.""" - self.inner: Response = inner - - async def text(self) -> str: - """Asynchronously get the response text.""" - return await self.inner.atext() - - def raise_for_status(self) -> None: - """Raise an HTTPError if one occurred.""" - self.inner.raise_for_status() - - async def json(self, **kwargs) -> Any: - """Asynchronously parse the JSON response content.""" - return json.loads(await self.inner.acontent(), **kwargs) - - async def iter_lines(self) -> AsyncGenerator[bytes, None]: - """Asynchronously iterate over the lines of the response.""" - async for line in self.inner.aiter_lines(): - yield line - - async def iter_content(self) -> AsyncGenerator[bytes, None]: - """Asynchronously iterate over the response content.""" - async for chunk in self.inner.aiter_content(): - yield chunk - - async def __aenter__(self): - """Asynchronously enter the runtime context for the response object.""" - inner: Response = await self.inner - self.inner = inner - self.request = inner.request - self.status_code: int = inner.status_code - self.reason: str = inner.reason - self.ok: bool = inner.ok - self.headers = inner.headers - self.cookies = inner.cookies - return self - - async def __aexit__(self, *args): - """Asynchronously exit the runtime context for the response object.""" - await self.inner.aclose() - -class StreamSession(AsyncSession): - """ - An asynchronous session class for handling HTTP requests with streaming. - - Inherits from AsyncSession. - """ - - def request( - self, method: str, url: str, **kwargs - ) -> StreamResponse: - """Create and return a StreamResponse object for the given HTTP request.""" - return StreamResponse(super().request(method, url, stream=True, **kwargs)) - - # Defining HTTP methods as partial methods of the request method. - head = partialmethod(request, "HEAD") - get = partialmethod(request, "GET") - post = partialmethod(request, "POST") - put = partialmethod(request, "PUT") - patch = partialmethod(request, "PATCH") - delete = partialmethod(request, "DELETE") diff --git a/g4f/version.py b/g4f/version.py index 63941baf..0d85a7f5 100644 --- a/g4f/version.py +++ b/g4f/version.py @@ -7,6 +7,9 @@ from importlib.metadata import version as get_package_version, PackageNotFoundEr from subprocess import check_output, CalledProcessError, PIPE from .errors import VersionNotFoundError +PACKAGE_NAME = "g4f" +GITHUB_REPOSITORY = "xtekky/gpt4free" + def get_pypi_version(package_name: str) -> str: """ Retrieves the latest version of a package from PyPI. @@ -45,25 +48,6 @@ def get_github_version(repo: str) -> str: except requests.RequestException as e: raise VersionNotFoundError(f"Failed to get GitHub release version: {e}") -def get_latest_version() -> str: - """ - Retrieves the latest release version of the 'g4f' package from PyPI or GitHub. - - Returns: - str: The latest release version of 'g4f'. - - Note: - The function first tries to fetch the version from PyPI. If the package is not found, - it retrieves the version from the GitHub repository. - """ - try: - # Is installed via package manager? - get_package_version("g4f") - return get_pypi_version("g4f") - except PackageNotFoundError: - # Else use Github version: - return get_github_version("xtekky/gpt4free") - class VersionUtils: """ Utility class for managing and comparing package versions of 'g4f'. @@ -82,7 +66,7 @@ class VersionUtils: """ # Read from package manager try: - return get_package_version("g4f") + return get_package_version(PACKAGE_NAME) except PackageNotFoundError: pass @@ -108,7 +92,12 @@ class VersionUtils: Returns: str: The latest version of 'g4f'. """ - return get_latest_version() + # Is installed via package manager? + try: + get_package_version(PACKAGE_NAME) + except PackageNotFoundError: + return get_github_version(GITHUB_REPOSITORY) + return get_pypi_version(PACKAGE_NAME) def check_version(self) -> None: """ diff --git a/requirements.txt b/requirements.txt index ecb69a11..a0a18af7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,11 @@ requests pycryptodome -curl_cffi>=0.5.10 +curl_cffi>=0.6.0b9 aiohttp certifi browser_cookie3 PyExecJS -duckduckgo-search +duckduckgo-search>=4.4.3 nest_asyncio werkzeug loguru diff --git a/setup.py b/setup.py index 4f3b0359..b866f3e9 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ EXTRA_REQUIRE = { "py-arkose-generator", # openai "browser_cookie3", # get_cookies "PyExecJS", # GptForLove - "duckduckgo-search", # internet.search + "duckduckgo-search>=4.4.3",# internet.search "beautifulsoup4", # internet.search and bing.create_images "brotli", # openai "platformdirs", # webdriver @@ -56,7 +56,7 @@ EXTRA_REQUIRE = { "gui": [ "werkzeug", "flask", "beautifulsoup4", "pillow", - "duckduckgo-search", + "duckduckgo-search>=4.4.3", "browser_cookie3" ] } -- cgit v1.2.3 From 1b4a86a857d9f9b77e0c7f0cd87468b627a250c8 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Thu, 22 Feb 2024 04:35:11 +0100 Subject: Update client docs, Enable some tests --- docs/client.md | 23 +++++++++++++++++------ etc/unittest/backend.py | 32 +++++++++++++++++--------------- g4f/gui/server/backend.py | 4 ++-- 3 files changed, 36 insertions(+), 23 deletions(-) diff --git a/docs/client.md b/docs/client.md index f2ba9bcd..6cc08ac3 100644 --- a/docs/client.md +++ b/docs/client.md @@ -2,13 +2,13 @@ #### Introduction -The G4F Client API introduces a new way to integrate advanced AI functionalities into your Python applications. This guide will help you transition from using the OpenAI client to the new G4F Client, offering compatibility with the existing OpenAI API alongside additional features. +Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI API. #### Getting Started **Switching to G4F Client:** -Replace the OpenAI client import statement in your Python code as follows: +To begin using the G4F Client, simply update your import statement in your Python code: Old Import: ```python @@ -20,11 +20,11 @@ New Import: from g4f.client import Client as OpenAI ``` -The G4F Client maintains the same API interface as OpenAI, ensuring a seamless transition. +The G4F Client preserves the same familiar API interface as OpenAI, ensuring a smooth transition process. -#### Initializing the Client +### Initializing the Client -To use the G4F Client, create an instance with customized providers: +To utilize the G4F Client, create an new instance. Below is an example showcasing custom providers: ```python from g4f.client import Client @@ -33,7 +33,18 @@ from g4f.Provider import BingCreateImages, OpenaiChat, Gemini client = Client( provider=OpenaiChat, image_provider=Gemini, - proxies=None + ... +) +``` + +You also have the option to define a proxy in the client for all outgoing requests: + +```python +from g4f.client import Client + +client = Client( + proxies="http://user:pass@host", + ... ) ``` diff --git a/etc/unittest/backend.py b/etc/unittest/backend.py index 846c3554..c4ab219e 100644 --- a/etc/unittest/backend.py +++ b/etc/unittest/backend.py @@ -1,15 +1,15 @@ import unittest -# import asyncio +import asyncio from unittest.mock import MagicMock from .mocks import ProviderMock import g4f + try: from g4f.gui.server.backend import Backend_Api, get_error_message - # from g4f.gui.server.internet import search has_requirements = True except: has_requirements = False - + class TestBackendApi(unittest.TestCase): def setUp(self): @@ -18,25 +18,27 @@ class TestBackendApi(unittest.TestCase): self.app = MagicMock() self.api = Backend_Api(self.app) - # def test_version(self): - # response = self.api.get_version() - # self.assertIn("version", response) - # self.assertIn("latest_version", response) - + def test_version(self): + response = self.api.get_version() + self.assertIn("version", response) + self.assertIn("latest_version", response) + def test_get_models(self): response = self.api.get_models() self.assertIsInstance(response, list) self.assertTrue(len(response) > 0) - + def test_get_providers(self): response = self.api.get_providers() self.assertIsInstance(response, list) self.assertTrue(len(response) > 0) - - # def test_search(self): - # result = asyncio.run(search("Hello")) - # self.assertEqual(5, len(result)) - + + def test_search(self): + # Task was destroyed but it is pending! + from g4f.gui.server.internet import search + result = asyncio.run(search("Hello")) + self.assertEqual(5, len(result)) + class TestUtilityFunctions(unittest.TestCase): def setUp(self): @@ -48,6 +50,6 @@ class TestUtilityFunctions(unittest.TestCase): exception = Exception("Message") result = get_error_message(exception) self.assertEqual("ProviderMock: Exception: Message", result) - + if __name__ == '__main__': unittest.main() \ No newline at end of file diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 9788e5f5..454ed1c6 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -8,8 +8,6 @@ from g4f.image import is_allowed_extension, to_image from g4f.errors import VersionNotFoundError from g4f.Provider import __providers__ from g4f.Provider.bing.create_images import patch_provider -from .internet import get_search_message - class Backend_Api: """ @@ -157,6 +155,8 @@ class Backend_Api: if provider == "Bing": kwargs['web_search'] = True else: + # ResourceWarning: unclosed event loop + from .internet import get_search_message messages[-1]["content"] = get_search_message(messages[-1]["content"]) model = json_data.get('model') -- cgit v1.2.3 From 5071cd95334f8b7a4c57db2a2a8e00e8fde17773 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Thu, 22 Feb 2024 09:54:50 +0100 Subject: Add challenge_seeds in Phind --- g4f/Provider/Phind.py | 47 ++++++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py index 746dcbcc..096cdd29 100644 --- a/g4f/Provider/Phind.py +++ b/g4f/Provider/Phind.py @@ -1,5 +1,7 @@ from __future__ import annotations +import re +import json from urllib import parse from datetime import datetime @@ -32,10 +34,18 @@ class Phind(AsyncGeneratorProvider): "Sec-Fetch-Site": "same-origin", } async with StreamSession( - impersonate="chrome110", + headers=headers, + impersonate="chrome", proxies={"https": proxy}, timeout=timeout ) as session: + url = "https://www.phind.com/search?home=true" + async with session.get(url) as response: + text = await response.text() + match = re.search(r'', text) + data = json.loads(match.group("json")) + challenge_seeds = data["props"]["pageProps"]["challengeSeeds"] + prompt = messages[-1]["content"] data = { "question": prompt, @@ -51,14 +61,13 @@ class Phind(AsyncGeneratorProvider): "language": "en-US", "detailed": True, "anonUserId": "", - "answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind Model", + "answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind-34B", "creativeMode": creative_mode, "customLinks": [] }, "context": "\n".join([message["content"] for message in messages if message["role"] == "system"]), } - data["challenge"] = generate_challenge(data) - + data["challenge"] = generate_challenge(data, **challenge_seeds) async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response: new_line = False async for line in response.iter_lines(): @@ -101,6 +110,18 @@ def deterministic_stringify(obj): items = sorted(obj.items(), key=lambda x: x[0]) return ','.join([f'{k}:{handle_value(v)}' for k, v in items if handle_value(v) is not None]) +def prng_general(seed, multiplier, addend, modulus): + a = seed * multiplier + addend + if a < 0: + return ((a%modulus)-modulus)/modulus + else: + return a%modulus/modulus + +def generate_challenge_seed(l): + I = deterministic_stringify(l) + d = parse.quote(I, safe='') + return simple_hash(d) + def simple_hash(s): d = 0 for char in s: @@ -111,16 +132,8 @@ def simple_hash(s): d -= 0x100000000 # Subtract 2**32 return d -def generate_challenge(obj): - deterministic_str = deterministic_stringify(obj) - encoded_str = parse.quote(deterministic_str, safe='') - - c = simple_hash(encoded_str) - a = (9301 * c + 49297) - b = 233280 - - # If negativ, we need a special logic - if a < 0: - return ((a%b)-b)/b - else: - return a%b/b \ No newline at end of file +def generate_challenge(obj, **kwargs): + return prng_general( + seed=generate_challenge_seed(obj), + **kwargs + ) \ No newline at end of file -- cgit v1.2.3 From 74397096b794631e718e7e5dfc7ed8517d0e42c2 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Fri, 23 Feb 2024 02:35:13 +0100 Subject: Use new client in inter api --- g4f/api/__init__.py | 199 +++++++++++++++++----------------------------------- g4f/client.py | 37 ++++++---- g4f/stubs.py | 85 ++++++++++++++++++---- 3 files changed, 159 insertions(+), 162 deletions(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 3f0778a1..9033aafe 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -1,21 +1,27 @@ -import ast import logging -import time import json -import random -import string import uvicorn import nest_asyncio from fastapi import FastAPI, Response, Request -from fastapi.responses import StreamingResponse -from typing import List, Union, Any, Dict, AnyStr -#from ._tokenizer import tokenize +from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse +from pydantic import BaseModel +from typing import List import g4f -from .. import debug - -debug.logging = True +import g4f.debug +from g4f.client import Client +from g4f.typing import Messages + +class ChatCompletionsConfig(BaseModel): + messages: Messages + model: str + provider: str | None + stream: bool = False + temperature: float | None + max_tokens: int = None + stop: list[str] | str | None + access_token: str | None class Api: def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False, @@ -25,169 +31,82 @@ class Api: self.sentry = sentry self.list_ignored_providers = list_ignored_providers - self.app = FastAPI() + if debug: + g4f.debug.logging = True + self.client = Client() + nest_asyncio.apply() + self.app = FastAPI() - JSONObject = Dict[AnyStr, Any] - JSONArray = List[Any] - JSONStructure = Union[JSONArray, JSONObject] + self.routes() + def routes(self): @self.app.get("/") async def read_root(): - return Response(content=json.dumps({"info": "g4f API"}, indent=4), media_type="application/json") + return RedirectResponse("/v1", 302) @self.app.get("/v1") async def read_root_v1(): - return Response(content=json.dumps({"info": "Go to /v1/chat/completions or /v1/models."}, indent=4), media_type="application/json") + return HTMLResponse('g4f API: Go to ' + 'chat/completions ' + 'or models.') @self.app.get("/v1/models") async def models(): - model_list = [] - for model in g4f.Model.__all__(): - model_info = (g4f.ModelUtils.convert[model]) - model_list.append({ - 'id': model, + model_list = dict( + (model, g4f.ModelUtils.convert[model]) + for model in g4f.Model.__all__() + ) + model_list = [{ + 'id': model_id, 'object': 'model', 'created': 0, - 'owned_by': model_info.base_provider} - ) - return Response(content=json.dumps({ - 'object': 'list', - 'data': model_list}, indent=4), media_type="application/json") + 'owned_by': model.base_provider + } for model_id, model in model_list.items()] + return JSONResponse(model_list) @self.app.get("/v1/models/{model_name}") async def model_info(model_name: str): try: - model_info = (g4f.ModelUtils.convert[model_name]) - - return Response(content=json.dumps({ + model_info = g4f.ModelUtils.convert[model_name] + return JSONResponse({ 'id': model_name, 'object': 'model', 'created': 0, 'owned_by': model_info.base_provider - }, indent=4), media_type="application/json") + }) except: - return Response(content=json.dumps({"error": "The model does not exist."}, indent=4), media_type="application/json") + return JSONResponse({"error": "The model does not exist."}) @self.app.post("/v1/chat/completions") - async def chat_completions(request: Request, item: JSONStructure = None): - item_data = { - 'model': 'gpt-3.5-turbo', - 'stream': False, - } - - # item contains byte keys, and dict.get suppresses error - item_data.update({ - key.decode('utf-8') if isinstance(key, bytes) else key: str(value) - for key, value in (item or {}).items() - }) - # messages is str, need dict - if isinstance(item_data.get('messages'), str): - item_data['messages'] = ast.literal_eval(item_data.get('messages')) - - model = item_data.get('model') - stream = True if item_data.get("stream") == "True" else False - messages = item_data.get('messages') - provider = item_data.get('provider', '').replace('g4f.Provider.', '') - provider = provider if provider and provider != "Auto" else None - temperature = item_data.get('temperature') - + async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None): try: - response = g4f.ChatCompletion.create( - model=model, - stream=stream, - messages=messages, - temperature = temperature, - provider = provider, + config.provider = provider if config.provider is None else config.provider + if config.access_token is None and request is not None: + auth_header = request.headers.get("Authorization") + if auth_header is not None: + config.access_token = auth_header.split(None, 1)[-1] + + response = self.client.chat.completions.create( + **dict(config), ignored=self.list_ignored_providers ) except Exception as e: logging.exception(e) - content = json.dumps({ - "error": {"message": f"An error occurred while generating the response:\n{e}"}, - "model": model, - "provider": g4f.get_last_provider(True) - }) - return Response(content=content, status_code=500, media_type="application/json") - completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) - completion_timestamp = int(time.time()) - - if not stream: - #prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) - #completion_tokens, _ = tokenize(response) - - json_data = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion', - 'created': completion_timestamp, - 'model': model, - 'provider': g4f.get_last_provider(True), - 'choices': [ - { - 'index': 0, - 'message': { - 'role': 'assistant', - 'content': response, - }, - 'finish_reason': 'stop', - } - ], - 'usage': { - 'prompt_tokens': 0, #prompt_tokens, - 'completion_tokens': 0, #completion_tokens, - 'total_tokens': 0, #prompt_tokens + completion_tokens, - }, - } - - return Response(content=json.dumps(json_data, indent=4), media_type="application/json") + return Response(content=format_exception(e, config), status_code=500, media_type="application/json") + + if not config.stream: + return JSONResponse(response.to_json()) def streaming(): try: for chunk in response: - completion_data = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion.chunk', - 'created': completion_timestamp, - 'model': model, - 'provider': g4f.get_last_provider(True), - 'choices': [ - { - 'index': 0, - 'delta': { - 'role': 'assistant', - 'content': chunk, - }, - 'finish_reason': None, - } - ], - } - yield f'data: {json.dumps(completion_data)}\n\n' - time.sleep(0.03) - end_completion_data = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion.chunk', - 'created': completion_timestamp, - 'model': model, - 'provider': g4f.get_last_provider(True), - 'choices': [ - { - 'index': 0, - 'delta': {}, - 'finish_reason': 'stop', - } - ], - } - yield f'data: {json.dumps(end_completion_data)}\n\n' + yield f"data: {json.dumps(chunk.to_json())}\n\n" except GeneratorExit: pass except Exception as e: logging.exception(e) - content = json.dumps({ - "error": {"message": f"An error occurred while generating the response:\n{e}"}, - "model": model, - "provider": g4f.get_last_provider(True), - }) - yield f'data: {content}' + yield f'data: {format_exception(e, config)}' return StreamingResponse(streaming(), media_type="text/event-stream") @@ -198,3 +117,11 @@ class Api: def run(self, ip): split_ip = ip.split(":") uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False) + +def format_exception(e: Exception, config: ChatCompletionsConfig) -> str: + last_provider = g4f.get_last_provider(True) + return json.dumps({ + "error": {"message": f"ChatCompletionsError: {e.__class__.__name__}: {e}"}, + "model": last_provider.get("model") if last_provider else config.model, + "provider": last_provider.get("name") if last_provider else config.provider + }) \ No newline at end of file diff --git a/g4f/client.py b/g4f/client.py index 4e5394b7..b44a5230 100644 --- a/g4f/client.py +++ b/g4f/client.py @@ -2,6 +2,9 @@ from __future__ import annotations import re import os +import time +import random +import string from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse from .typing import Union, Generator, Messages, ImageType @@ -10,10 +13,11 @@ from .image import ImageResponse as ImageProviderResponse from .Provider.BingCreateImages import BingCreateImages from .Provider.needs_auth import Gemini, OpenaiChat from .errors import NoImageResponseError -from . import get_model_and_provider +from . import get_model_and_provider, get_last_provider ImageProvider = Union[BaseProvider, object] Proxies = Union[dict, str] +IterResponse = Generator[ChatCompletion | ChatCompletionChunk, None, None] def read_json(text: str) -> dict: """ @@ -31,18 +35,16 @@ def read_json(text: str) -> dict: return text def iter_response( - response: iter, + response: iter[str], stream: bool, response_format: dict = None, max_tokens: int = None, stop: list = None -) -> Generator: +) -> IterResponse: content = "" finish_reason = None - last_chunk = None + completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) for idx, chunk in enumerate(response): - if last_chunk is not None: - yield ChatCompletionChunk(last_chunk, finish_reason) content += str(chunk) if max_tokens is not None and idx + 1 >= max_tokens: finish_reason = "length" @@ -63,16 +65,25 @@ def iter_response( if first != -1: finish_reason = "stop" if stream: - last_chunk = chunk + yield ChatCompletionChunk(chunk, None, completion_id, int(time.time())) if finish_reason is not None: break - if last_chunk is not None: - yield ChatCompletionChunk(last_chunk, finish_reason) - if not stream: + finish_reason = "stop" if finish_reason is None else finish_reason + if stream: + yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time())) + else: if response_format is not None and "type" in response_format: if response_format["type"] == "json_object": content = read_json(content) - yield ChatCompletion(content, finish_reason) + yield ChatCompletion(content, finish_reason, completion_id, int(time.time())) + +def iter_append_model_and_provider(response: IterResponse) -> IterResponse: + last_provider = None + for chunk in response: + last_provider = get_last_provider(True) if last_provider is None else last_provider + chunk.model = last_provider.get("model") + chunk.provider = last_provider.get("name") + yield chunk class Client(): proxies: Proxies = None @@ -113,7 +124,7 @@ class Completions(): stream: bool = False, response_format: dict = None, max_tokens: int = None, - stop: Union[list. str] = None, + stop: list[str] | str = None, **kwargs ) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]: if max_tokens is not None: @@ -128,7 +139,7 @@ class Completions(): ) response = provider.create_completion(model, messages, stream=stream, proxy=self.client.get_proxy(), **kwargs) stop = [stop] if isinstance(stop, str) else stop - response = iter_response(response, stream, response_format, max_tokens, stop) + response = iter_append_model_and_provider(iter_response(response, stream, response_format, max_tokens, stop)) return response if stream else next(response) class Chat(): diff --git a/g4f/stubs.py b/g4f/stubs.py index 1cbbb134..b9934b8c 100644 --- a/g4f/stubs.py +++ b/g4f/stubs.py @@ -2,34 +2,93 @@ from __future__ import annotations class Model(): - def __getitem__(self, item): - return getattr(self, item) + ... class ChatCompletion(Model): - def __init__(self, content: str, finish_reason: str): - self.choices = [ChatCompletionChoice(ChatCompletionMessage(content, finish_reason))] + def __init__( + self, + content: str, + finish_reason: str, + completion_id: str = None, + created: int = None + ): + self.id: str = f"chatcmpl-{completion_id}" if completion_id else None + self.object: str = "chat.completion" + self.created: int = created + self.model: str = None + self.provider: str = None + self.choices = [ChatCompletionChoice(ChatCompletionMessage(content), finish_reason)] + self.usage: dict[str, int] = { + "prompt_tokens": 0, #prompt_tokens, + "completion_tokens": 0, #completion_tokens, + "total_tokens": 0, #prompt_tokens + completion_tokens, + } + + def to_json(self): + return { + **self.__dict__, + "choices": [choice.to_json() for choice in self.choices] + } class ChatCompletionChunk(Model): - def __init__(self, content: str, finish_reason: str): - self.choices = [ChatCompletionDeltaChoice(ChatCompletionDelta(content, finish_reason))] + def __init__( + self, + content: str, + finish_reason: str, + completion_id: str = None, + created: int = None + ): + self.id: str = f"chatcmpl-{completion_id}" if completion_id else None + self.object: str = "chat.completion.chunk" + self.created: int = created + self.model: str = None + self.provider: str = None + self.choices = [ChatCompletionDeltaChoice(ChatCompletionDelta(content), finish_reason)] + + def to_json(self): + return { + **self.__dict__, + "choices": [choice.to_json() for choice in self.choices] + } class ChatCompletionMessage(Model): - def __init__(self, content: str, finish_reason: str): + def __init__(self, content: str | None): + self.role = "assistant" self.content = content - self.finish_reason = finish_reason + + def to_json(self): + return self.__dict__ class ChatCompletionChoice(Model): - def __init__(self, message: ChatCompletionMessage): + def __init__(self, message: ChatCompletionMessage, finish_reason: str): + self.index = 0 self.message = message + self.finish_reason = finish_reason + + def to_json(self): + return { + **self.__dict__, + "message": self.message.to_json() + } class ChatCompletionDelta(Model): - def __init__(self, content: str, finish_reason: str): - self.content = content - self.finish_reason = finish_reason + def __init__(self, content: str | None): + if content is not None: + self.content = content + + def to_json(self): + return self.__dict__ class ChatCompletionDeltaChoice(Model): - def __init__(self, delta: ChatCompletionDelta): + def __init__(self, delta: ChatCompletionDelta, finish_reason: str | None): self.delta = delta + self.finish_reason = finish_reason + + def to_json(self): + return { + **self.__dict__, + "delta": self.delta.to_json() + } class Image(Model): url: str -- cgit v1.2.3 From d733930a2b1876340039d90f19ece81fab0d078d Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Fri, 23 Feb 2024 02:51:10 +0100 Subject: Fix unittests, use Union typing --- etc/unittest/client.py | 8 +++++--- g4f/api/__init__.py | 10 +++++----- g4f/client.py | 4 ++-- g4f/stubs.py | 10 +++++++--- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/etc/unittest/client.py b/etc/unittest/client.py index 2bc00c2e..ec8aa4b7 100644 --- a/etc/unittest/client.py +++ b/etc/unittest/client.py @@ -35,13 +35,15 @@ class TestPassModel(unittest.TestCase): response = client.chat.completions.create(messages, "Hello", stream=True) for chunk in response: self.assertIsInstance(chunk, ChatCompletionChunk) - self.assertIsInstance(chunk.choices[0].delta.content, str) + if chunk.choices[0].delta.content is not None: + self.assertIsInstance(chunk.choices[0].delta.content, str) messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]] response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2) response = list(response) - self.assertEqual(len(response), 2) + self.assertEqual(len(response), 3) for chunk in response: - self.assertEqual(chunk.choices[0].delta.content, "You ") + if chunk.choices[0].delta.content is not None: + self.assertEqual(chunk.choices[0].delta.content, "You ") def test_stop(self): client = Client(provider=YieldProviderMock) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 9033aafe..d1e8539f 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -6,7 +6,7 @@ import nest_asyncio from fastapi import FastAPI, Response, Request from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse from pydantic import BaseModel -from typing import List +from typing import List, Union import g4f import g4f.debug @@ -16,12 +16,12 @@ from g4f.typing import Messages class ChatCompletionsConfig(BaseModel): messages: Messages model: str - provider: str | None + provider: Union[str, None] stream: bool = False - temperature: float | None + temperature: Union[float, None] max_tokens: int = None - stop: list[str] | str | None - access_token: str | None + stop: Union[list[str], str, None] + access_token: Union[str, None] class Api: def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False, diff --git a/g4f/client.py b/g4f/client.py index b44a5230..023d53f6 100644 --- a/g4f/client.py +++ b/g4f/client.py @@ -17,7 +17,7 @@ from . import get_model_and_provider, get_last_provider ImageProvider = Union[BaseProvider, object] Proxies = Union[dict, str] -IterResponse = Generator[ChatCompletion | ChatCompletionChunk, None, None] +IterResponse = Generator[Union[ChatCompletion, ChatCompletionChunk], None, None] def read_json(text: str) -> dict: """ @@ -124,7 +124,7 @@ class Completions(): stream: bool = False, response_format: dict = None, max_tokens: int = None, - stop: list[str] | str = None, + stop: Union[list[str], str] = None, **kwargs ) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]: if max_tokens is not None: diff --git a/g4f/stubs.py b/g4f/stubs.py index b9934b8c..49cf8a88 100644 --- a/g4f/stubs.py +++ b/g4f/stubs.py @@ -1,6 +1,8 @@ from __future__ import annotations +from typing import Union + class Model(): ... @@ -52,7 +54,7 @@ class ChatCompletionChunk(Model): } class ChatCompletionMessage(Model): - def __init__(self, content: str | None): + def __init__(self, content: Union[str, None]): self.role = "assistant" self.content = content @@ -72,7 +74,9 @@ class ChatCompletionChoice(Model): } class ChatCompletionDelta(Model): - def __init__(self, content: str | None): + content: Union[str, None] = None + + def __init__(self, content: Union[str, None]): if content is not None: self.content = content @@ -80,7 +84,7 @@ class ChatCompletionDelta(Model): return self.__dict__ class ChatCompletionDeltaChoice(Model): - def __init__(self, delta: ChatCompletionDelta, finish_reason: str | None): + def __init__(self, delta: ChatCompletionDelta, finish_reason: Union[str, None]): self.delta = delta self.finish_reason = finish_reason -- cgit v1.2.3