diff options
-rw-r--r-- | README.md | 1 | ||||
-rw-r--r-- | g4f/Provider/ChatgptFree.py | 66 | ||||
-rw-r--r-- | g4f/Provider/ChatgptLogin.py | 71 | ||||
-rw-r--r-- | g4f/Provider/GptChatly.py | 49 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 10 | ||||
-rw-r--r-- | g4f/Provider/deprecated/ChatForAi.py (renamed from g4f/Provider/ChatForAi.py) | 9 | ||||
-rw-r--r-- | g4f/Provider/deprecated/ChatgptLogin.py | 74 | ||||
-rw-r--r-- | g4f/Provider/deprecated/Opchatgpts.py | 73 | ||||
-rw-r--r-- | g4f/Provider/deprecated/__init__.py | 2 | ||||
-rw-r--r-- | g4f/Provider/unfinished/ChatAiGpt.py | 64 | ||||
-rw-r--r-- | g4f/Provider/unfinished/TalkAi.py | 60 | ||||
-rw-r--r-- | g4f/Provider/unfinished/__init__.py | 4 | ||||
-rw-r--r-- | g4f/__init__.py | 27 |
13 files changed, 417 insertions, 93 deletions
@@ -222,6 +222,7 @@ print(response) ##### Providers: ```py import g4f + from g4f.Provider import ( AItianhu, Acytoo, diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py new file mode 100644 index 00000000..7dee1e64 --- /dev/null +++ b/g4f/Provider/ChatgptFree.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +import re +from aiohttp import ClientSession + +from ..typing import Messages +from .base_provider import AsyncProvider +from .helper import format_prompt + + +class ChatgptFree(AsyncProvider): + url = "https://chatgptfree.ai" + supports_gpt_35_turbo = True + working = True + _post_id = None + _nonce = None + + @classmethod + async def create_async( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> str: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Origin": cls.url, + "Alt-Used": "chatgptfree.ai", + "Connection": "keep-alive", + "Referer": f"{cls.url}/", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Pragma": "no-cache", + "Cache-Control": "no-cache", + "TE": "trailers" + } + async with ClientSession(headers=headers) as session: + if not cls._nonce: + async with session.get(f"{cls.url}/", proxy=proxy) as response: + response.raise_for_status() + response = await response.text() + result = re.search(r'data-post-id="([0-9]+)"', response) + if not result: + raise RuntimeError("No post id found") + cls._post_id = result.group(1) + result = re.search(r'data-nonce="(.*?)"', response) + if not result: + raise RuntimeError("No nonce found") + cls._nonce = result.group(1) + prompt = format_prompt(messages) + data = { + "_wpnonce": cls._nonce, + "post_id": cls._post_id, + "url": cls.url, + "action": "wpaicg_chat_shortcode_message", + "message": prompt, + "bot_id": "0" + } + async with session.post(cls.url + "/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response: + response.raise_for_status() + return (await response.json())["data"]
\ No newline at end of file diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py new file mode 100644 index 00000000..1cf0698b --- /dev/null +++ b/g4f/Provider/ChatgptLogin.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +import re +import time +import json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider +from .helper import format_prompt + + +class ChatgptLogin(AsyncGeneratorProvider): + url = "https://chatgptlogin.ai" + supports_gpt_35_turbo = True + working = True + _user_id = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/chat/", + "Content-Type": "application/json", + "Origin": cls.url, + "Alt-Used": "chatgptlogin.ai", + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Pragma": "no-cache", + "Cache-Control": "no-cache" + } + async with ClientSession(headers=headers) as session: + if not cls._user_id: + async with session.get(f"{cls.url}/chat/", proxy=proxy) as response: + response.raise_for_status() + response = await response.text() + result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response) + if not result: + raise RuntimeError("No user id found") + cls._user_id = result.group(1) + async with session.post(f"{cls.url}/chat/new_chat", json={"user_id": cls._user_id}, proxy=proxy) as response: + response.raise_for_status() + chat_id = (await response.json())["id_"] + if not chat_id: + raise RuntimeError("Could not create new chat") + prompt = format_prompt(messages) + data = { + "question": prompt, + "chat_id": chat_id, + "timestamp": int(time.time() * 1e3), + } + async with session.post(f"{cls.url}/chat/chat_api_stream", json=data, proxy=proxy) as response: + response.raise_for_status() + async for line in response.content: + if line.startswith(b"data: "): + content = json.loads(line[6:])["choices"][0]["delta"].get("content") + if content: + yield content + async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response: + response.raise_for_status()
\ No newline at end of file diff --git a/g4f/Provider/GptChatly.py b/g4f/Provider/GptChatly.py new file mode 100644 index 00000000..1d9b76cf --- /dev/null +++ b/g4f/Provider/GptChatly.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import Messages +from .base_provider import AsyncProvider + + +class GptChatly(AsyncProvider): + url = "https://gptchatly.com" + supports_gpt_35_turbo = True + supports_gpt_4 = True + working = True + + @classmethod + async def create_async( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> str: + if model.startswith("gpt-4"): + chat_url = f"{cls.url}/fetch-gpt4-response" + else: + chat_url = f"{cls.url}/fetch-response" + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/", + "Content-Type": "application/json", + "Origin": cls.url, + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Pragma": "no-cache", + "Cache-Control": "no-cache", + "TE": "trailers", + } + async with ClientSession(headers=headers) as session: + data = { + "past_conversations": messages + } + async with session.post(chat_url, json=data, proxy=proxy) as response: + response.raise_for_status() + return (await response.json())["chatGPTResponse"]
\ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 50ac3181..86346851 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -9,15 +9,17 @@ from .AItianhu import AItianhu from .AItianhuSpace import AItianhuSpace from .Bing import Bing from .ChatBase import ChatBase -from .ChatForAi import ChatForAi from .Chatgpt4Online import Chatgpt4Online from .ChatgptAi import ChatgptAi from .ChatgptDemo import ChatgptDemo from .ChatgptDuo import ChatgptDuo +from .ChatgptFree import ChatgptFree +from .ChatgptLogin import ChatgptLogin from .ChatgptX import ChatgptX from .Cromicle import Cromicle from .FreeGpt import FreeGpt from .GPTalk import GPTalk +from .GptChatly import GptChatly from .GptForLove import GptForLove from .GptGo import GptGo from .GptGod import GptGod @@ -59,6 +61,7 @@ class ProviderUtils: 'ChatgptAi': ChatgptAi, 'ChatgptDemo': ChatgptDemo, 'ChatgptDuo': ChatgptDuo, + 'ChatgptFree': ChatgptFree, 'ChatgptLogin': ChatgptLogin, 'ChatgptX': ChatgptX, 'CodeLinkAva': CodeLinkAva, @@ -70,6 +73,7 @@ class ProviderUtils: 'Forefront': Forefront, 'FreeGpt': FreeGpt, 'GPTalk': GPTalk, + 'GptChatly': GptChatly, 'GetGpt': GetGpt, 'GptForLove': GptForLove, 'GptGo': GptGo, @@ -121,6 +125,7 @@ __all__ = [ 'ChatgptAi', 'ChatgptDemo', 'ChatgptDuo', + 'ChatgptFree', 'ChatgptLogin', 'ChatgptX', 'Cromicle', @@ -130,6 +135,7 @@ __all__ = [ 'Forefront', 'FreeGpt', 'GPTalk', + 'GptChatly', 'GptForLove', 'GetGpt', 'GptGo', @@ -156,4 +162,4 @@ __all__ = [ 'FastGpt', 'Wuguokai', 'V50' -]
\ No newline at end of file +] diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/deprecated/ChatForAi.py index f2fe0335..ab4cd89c 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/deprecated/ChatForAi.py @@ -1,14 +1,13 @@ from __future__ import annotations -from ..typing import AsyncResult, Messages -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider +from ...typing import AsyncResult, Messages +from ...requests import StreamSession +from ..base_provider import AsyncGeneratorProvider class ChatForAi(AsyncGeneratorProvider): url = "https://chatforai.com" supports_gpt_35_turbo = True - working = True @classmethod async def create_async_generator( @@ -40,6 +39,8 @@ class ChatForAi(AsyncGeneratorProvider): async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: response.raise_for_status() async for chunk in response.iter_content(): + if b"https://chatforai.store" in chunk: + raise RuntimeError(f"Response: {chunk.decode()}") yield chunk.decode() @classmethod diff --git a/g4f/Provider/deprecated/ChatgptLogin.py b/g4f/Provider/deprecated/ChatgptLogin.py deleted file mode 100644 index 07f3b914..00000000 --- a/g4f/Provider/deprecated/ChatgptLogin.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import annotations - -import os, re -from aiohttp import ClientSession - -from ..base_provider import AsyncProvider, format_prompt - - -class ChatgptLogin(AsyncProvider): - url = "https://opchatgpts.net" - supports_gpt_35_turbo = True - working = True - _nonce = None - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> str: - headers = { - "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", - "Accept" : "*/*", - "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "Origin" : "https://opchatgpts.net", - "Alt-Used" : "opchatgpts.net", - "Referer" : "https://opchatgpts.net/chatgpt-free-use/", - "Sec-Fetch-Dest" : "empty", - "Sec-Fetch-Mode" : "cors", - "Sec-Fetch-Site" : "same-origin", - } - async with ClientSession( - headers=headers - ) as session: - if not cls._nonce: - async with session.get( - "https://opchatgpts.net/chatgpt-free-use/", - params={"id": os.urandom(6).hex()}, - ) as response: - result = re.search(r'data-nonce="(.*?)"', await response.text()) - if not result: - raise RuntimeError("No nonce value") - cls._nonce = result.group(1) - data = { - "_wpnonce": cls._nonce, - "post_id": 28, - "url": "https://opchatgpts.net/chatgpt-free-use", - "action": "wpaicg_chat_shortcode_message", - "message": format_prompt(messages), - "bot_id": 0 - } - async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response: - response.raise_for_status() - data = await response.json() - if "data" in data: - return data["data"] - elif "msg" in data: - raise RuntimeError(data["msg"]) - else: - raise RuntimeError(f"Response: {data}") - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/deprecated/Opchatgpts.py b/g4f/Provider/deprecated/Opchatgpts.py index ab0d68c9..cc6a133c 100644 --- a/g4f/Provider/deprecated/Opchatgpts.py +++ b/g4f/Provider/deprecated/Opchatgpts.py @@ -1,7 +1,74 @@ from __future__ import annotations -from .ChatgptLogin import ChatgptLogin +import os, re +from aiohttp import ClientSession +from ..base_provider import AsyncProvider, format_prompt -class Opchatgpts(ChatgptLogin): - url = "https://opchatgpts.net"
\ No newline at end of file + +class Opchatgpts(AsyncProvider): + url = "https://opchatgpts.net" + supports_gpt_35_turbo = True + working = True + _nonce = None + + @classmethod + async def create_async( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> str: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : "https://opchatgpts.net", + "Alt-Used" : "opchatgpts.net", + "Referer" : "https://opchatgpts.net/chatgpt-free-use/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + if not cls._nonce: + async with session.get( + "https://opchatgpts.net/chatgpt-free-use/", + params={"id": os.urandom(6).hex()}, + ) as response: + result = re.search(r'data-nonce="(.*?)"', await response.text()) + if not result: + raise RuntimeError("No nonce value") + cls._nonce = result.group(1) + data = { + "_wpnonce": cls._nonce, + "post_id": 28, + "url": "https://opchatgpts.net/chatgpt-free-use", + "action": "wpaicg_chat_shortcode_message", + "message": format_prompt(messages), + "bot_id": 0 + } + async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response: + response.raise_for_status() + data = await response.json() + if "data" in data: + return data["data"] + elif "msg" in data: + raise RuntimeError(data["msg"]) + else: + raise RuntimeError(f"Response: {data}") + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index 5c66c87f..0644dc52 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -11,4 +11,4 @@ from .Equing import Equing from .Wuguokai import Wuguokai from .V50 import V50 from .FastGpt import FastGpt -from .ChatgptLogin import ChatgptLogin
\ No newline at end of file +from .ChatForAi import ChatForAi
\ No newline at end of file diff --git a/g4f/Provider/unfinished/ChatAiGpt.py b/g4f/Provider/unfinished/ChatAiGpt.py new file mode 100644 index 00000000..8ade40c2 --- /dev/null +++ b/g4f/Provider/unfinished/ChatAiGpt.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import re +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt + + +class ChatAiGpt(AsyncGeneratorProvider): + url = "https://chataigpt.org" + supports_gpt_35_turbo = True + working = True + _nonce = None + _post_id = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Origin": cls.url, + "Alt-Used": cls.url, + "Connection": "keep-alive", + "Referer": cls.url, + "Pragma": "no-cache", + "Cache-Control": "no-cache", + "TE": "trailers", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + } + async with ClientSession(headers=headers) as session: + if not cls._nonce: + async with session.get(f"{cls.url}/", proxy=proxy) as response: + response.raise_for_status() + response = await response.text() + result = re.search(r'data-nonce=(.*?) data-post-id=([0-9]+)', response) + if not result: + raise RuntimeError("No nonce found") + cls._nonce, cls._post_id = result.group(1), result.group(2) + prompt = format_prompt(messages) + data = { + "_wpnonce": cls._nonce, + "post_id": cls._post_id, + "url": cls.url, + "action": "wpaicg_chat_shortcode_message", + "message": prompt, + "bot_id": 0 + } + async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + yield chunk.decode()
\ No newline at end of file diff --git a/g4f/Provider/unfinished/TalkAi.py b/g4f/Provider/unfinished/TalkAi.py new file mode 100644 index 00000000..a7f1dd84 --- /dev/null +++ b/g4f/Provider/unfinished/TalkAi.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider + + +class TalkAi(AsyncGeneratorProvider): + url = "https://talkai.info" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + if not model: + model = "gpt-3.5-turbo" + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "application/json", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/de/chat/", + "content-type": "application/json", + "Origin": cls.url, + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Pragma": "no-cache", + "Cache-Control": "no-cache" + } + async with ClientSession(headers=headers) as session: + history = [{ + "content": message["content"], + "from": "you" if message["role"] == "user" else "chatGPT" + } for message in messages] + data = { + "type": "chat", + "message": messages[-1]["content"], + "messagesHistory": history, + "model": model, + "max_tokens": 256, + "temperature": 1, + "top_p": 1, + "presence_penalty": 0, + "frequency_penalty": 0, + **kwargs + } + async with session.post(f"{cls.url}/de/chat/send2/", json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + yield chunk.decode()
\ No newline at end of file diff --git a/g4f/Provider/unfinished/__init__.py b/g4f/Provider/unfinished/__init__.py index 8330b5e4..bf5ff9aa 100644 --- a/g4f/Provider/unfinished/__init__.py +++ b/g4f/Provider/unfinished/__init__.py @@ -1,3 +1,5 @@ from .MikuChat import MikuChat from .PerplexityAi import PerplexityAi -from .Komo import Komo
\ No newline at end of file +from .Komo import Komo +from .TalkAi import TalkAi +from .ChatAiGpt import ChatAiGpt
\ No newline at end of file diff --git a/g4f/__init__.py b/g4f/__init__.py index 1a696c6c..6f777e4c 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -1,13 +1,14 @@ from __future__ import annotations from requests import get from g4f.models import Model, ModelUtils -from .Provider import BaseProvider -from .typing import Messages, CreateResult, Union +from .Provider import BaseProvider, RetryProvider +from .typing import Messages, CreateResult, Union, List from .debug import logging version = '0.1.6.2' version_check = True + def check_pypi_version() -> None: try: response = get("https://pypi.org/pypi/g4f/json").json() @@ -19,9 +20,11 @@ def check_pypi_version() -> None: except Exception as e: print(f'Failed to check g4f pypi version: {e}') + def get_model_and_provider(model : Union[Model, str], provider : Union[type[BaseProvider], None], - stream : bool) -> tuple[Model, type[BaseProvider]]: + stream : bool, + ignored : List[str] = None) -> tuple[Model, type[BaseProvider]]: if isinstance(model, str): if model in ModelUtils.convert: @@ -32,6 +35,9 @@ def get_model_and_provider(model : Union[Model, str], if not provider: provider = model.best_provider + if isinstance(provider, RetryProvider) and ignored: + provider.providers = [p for p in provider.providers if p.__name__ not in ignored] + if not provider: raise RuntimeError(f'No provider found for model: {model}') @@ -46,15 +52,17 @@ def get_model_and_provider(model : Union[Model, str], return model, provider + class ChatCompletion: @staticmethod def create(model: Union[Model, str], messages : Messages, provider : Union[type[BaseProvider], None] = None, stream : bool = False, - auth : Union[str, None] = None, **kwargs) -> Union[CreateResult, str]: + auth : Union[str, None] = None, + ignored : List[str] = None, **kwargs) -> Union[CreateResult, str]: - model, provider = get_model_and_provider(model, provider, stream) + model, provider = get_model_and_provider(model, provider, stream, ignored) if provider.needs_auth and not auth: raise ValueError( @@ -71,15 +79,17 @@ class ChatCompletion: model : Union[Model, str], messages: Messages, provider: Union[type[BaseProvider], None] = None, - stream : bool = False, **kwargs) -> str: + stream : bool = False, + ignored : List[str] = None, **kwargs) -> str: if stream: raise ValueError(f'"create_async" does not support "stream" argument') - model, provider = get_model_and_provider(model, provider, False) + model, provider = get_model_and_provider(model, provider, False, ignored) return await provider.create_async(model.name, messages, **kwargs) + class Completion: @staticmethod def create( @@ -87,6 +97,7 @@ class Completion: prompt: str, provider: Union[type[BaseProvider], None] = None, stream: bool = False, + ignored : List[str] = None, **kwargs ) -> Union[CreateResult, str]: @@ -102,7 +113,7 @@ class Completion: if model not in allowed_models: raise Exception(f'ValueError: Can\'t use {model} with Completion.create()') - model, provider = get_model_and_provider(model, provider, stream) + model, provider = get_model_and_provider(model, provider, stream, ignored) result = provider.create_completion(model.name, [{"role": "user", "content": prompt}], stream, **kwargs) |