summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-08-25 18:22:25 +0200
committerGitHub <noreply@github.com>2023-08-25 18:22:25 +0200
commit473ac1450ae09cd59c7dc818a4bb272ed5640610 (patch)
tree60052aad3746053eb4aec5b7e44d75404f4d11fe
parentRemove judge function (diff)
parent~ | Merge pull request #841 (diff)
downloadgpt4free-473ac1450ae09cd59c7dc818a4bb272ed5640610.tar
gpt4free-473ac1450ae09cd59c7dc818a4bb272ed5640610.tar.gz
gpt4free-473ac1450ae09cd59c7dc818a4bb272ed5640610.tar.bz2
gpt4free-473ac1450ae09cd59c7dc818a4bb272ed5640610.tar.lz
gpt4free-473ac1450ae09cd59c7dc818a4bb272ed5640610.tar.xz
gpt4free-473ac1450ae09cd59c7dc818a4bb272ed5640610.tar.zst
gpt4free-473ac1450ae09cd59c7dc818a4bb272ed5640610.zip
-rw-r--r--README.md9
-rw-r--r--g4f/Provider/Ails.py2
-rw-r--r--g4f/Provider/Bard.py22
-rw-r--r--g4f/Provider/Bing.py466
-rw-r--r--g4f/Provider/EasyChat.py8
-rw-r--r--g4f/Provider/Hugchat.py67
-rw-r--r--g4f/Provider/OpenaiChat.py74
-rw-r--r--g4f/Provider/Wuguokai.py65
-rw-r--r--g4f/Provider/You.py13
-rw-r--r--g4f/Provider/Yqcloud.py5
-rw-r--r--g4f/Provider/__init__.py6
-rw-r--r--g4f/Provider/base_provider.py85
-rw-r--r--testing/test_needs_auth.py95
13 files changed, 619 insertions, 298 deletions
diff --git a/README.md b/README.md
index 22db26cc..76c46ce5 100644
--- a/README.md
+++ b/README.md
@@ -299,6 +299,13 @@ if __name__ == "__main__":
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
</tr>
+ <tr>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme"><b>Action Translate Readme</b></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/issues"><img alt="Issues" src="https://img.shields.io/github/issues/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/Lin-jun-xiang/action-translate-readme/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/Lin-jun-xiang/action-translate-readme?style=flat-square&labelColor=343b41"/></a></td>
+ </tr>
</tbody>
</table>
@@ -387,4 +394,4 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
<a href="https://github.com/xtekky/gpt4free/stargazers">
<img width="500" alt="Star History Chart" src="https://api.star-history.com/svg?repos=xtekky/gpt4free&type=Date">
-</a> \ No newline at end of file
+</a>
diff --git a/g4f/Provider/Ails.py b/g4f/Provider/Ails.py
index 52b3745d..6665b677 100644
--- a/g4f/Provider/Ails.py
+++ b/g4f/Provider/Ails.py
@@ -72,6 +72,8 @@ class Ails(BaseProvider):
if b"content" in token:
completion_chunk = json.loads(token.decode().replace("data: ", ""))
token = completion_chunk["choices"][0]["delta"].get("content")
+ if "ai.ls" in token.lower() or "ai.ci" in token.lower():
+ raise Exception("Response Error: " + token)
if token != None:
yield token
diff --git a/g4f/Provider/Bard.py b/g4f/Provider/Bard.py
index cbe728cd..a8c7d13f 100644
--- a/g4f/Provider/Bard.py
+++ b/g4f/Provider/Bard.py
@@ -2,42 +2,26 @@ import json
import random
import re
-import browser_cookie3
from aiohttp import ClientSession
import asyncio
from ..typing import Any, CreateResult
-from .base_provider import BaseProvider
+from .base_provider import AsyncProvider, get_cookies
-class Bard(BaseProvider):
+class Bard(AsyncProvider):
url = "https://bard.google.com"
needs_auth = True
working = True
@classmethod
- def create_completion(
- cls,
- model: str,
- messages: list[dict[str, str]],
- stream: bool,
- proxy: str = None,
- cookies: dict = {},
- **kwargs: Any,
- ) -> CreateResult:
- yield asyncio.run(cls.create_async(str, messages, proxy, cookies))
-
- @classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
- cookies: dict = {},
+ cookies: dict = get_cookies(".google.com"),
**kwargs: Any,
) -> str:
- if not cookies:
- for cookie in browser_cookie3.load(domain_name='.google.com'):
- cookies[cookie.name] = cookie.value
formatted = "\n".join(
["%s: %s" % (message["role"], message["content"]) for message in messages]
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 48b5477d..2c2e60ad 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -2,42 +2,39 @@ import asyncio
import json
import os
import random
-import ssl
-import uuid
import aiohttp
-import certifi
-import requests
-
-from ..typing import Any, AsyncGenerator, CreateResult, Tuple, Union
-from .base_provider import BaseProvider
+import asyncio
+from aiohttp import ClientSession
+from ..typing import Any, AsyncGenerator, CreateResult, Union
+from .base_provider import AsyncGeneratorProvider, get_cookies
-class Bing(BaseProvider):
+class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat"
+ needs_auth = True
+ working = True
supports_gpt_4 = True
-
+ supports_stream=True
+
@staticmethod
- def create_completion(
- model: str,
- messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any,
- ) -> CreateResult:
+ def create_async_generator(
+ model: str,
+ messages: list[dict[str, str]],
+ cookies: dict = get_cookies(".bing.com"),
+ **kwargs
+ ) -> AsyncGenerator:
if len(messages) < 2:
prompt = messages[0]["content"]
- context = False
+ context = None
else:
prompt = messages[-1]["content"]
- context = convert(messages[:-1])
+ context = create_context(messages[:-1])
- response = run(stream_generate(prompt, jailbreak, context))
- for token in response:
- yield token
+ return stream_generate(prompt, context, cookies)
-
-def convert(messages: list[dict[str, str]]):
+def create_context(messages: list[dict[str, str]]):
context = ""
for message in messages:
@@ -45,250 +42,43 @@ def convert(messages: list[dict[str, str]]):
return context
-
-jailbreak = {
- "optionsSets": [
- "saharasugg",
- "enablenewsfc",
- "clgalileo",
- "gencontentv3",
- "nlu_direct_response_filter",
- "deepleo",
- "disable_emoji_spoken_text",
- "responsible_ai_policy_235",
- "enablemm",
- "h3precise"
- # "harmonyv3",
- "dtappid",
- "cricinfo",
- "cricinfov2",
- "dv3sugg",
- "nojbfedge",
- ]
-}
-
-
-ssl_context = ssl.create_default_context()
-ssl_context.load_verify_locations(certifi.where())
-
-
-def _format(msg: dict[str, Any]) -> str:
- return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
-
-
-async def stream_generate(
- prompt: str,
- mode: dict[str, list[str]] = jailbreak,
- context: Union[bool, str] = False,
-):
- timeout = aiohttp.ClientTimeout(total=900)
- session = aiohttp.ClientSession(timeout=timeout)
-
- conversationId, clientId, conversationSignature = await create_conversation()
-
- wss = await session.ws_connect(
- "wss://sydney.bing.com/sydney/ChatHub",
- ssl=ssl_context,
- autoping=False,
- headers={
- "accept": "application/json",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json",
- "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
- "sec-ch-ua-arch": '"x86"',
- "sec-ch-ua-bitness": '"64"',
- "sec-ch-ua-full-version": '"109.0.1518.78"',
- "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-model": "",
- "sec-ch-ua-platform": '"Windows"',
- "sec-ch-ua-platform-version": '"15.0.0"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "x-ms-client-request-id": str(uuid.uuid4()),
- "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
- "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
- "Referrer-Policy": "origin-when-cross-origin",
- "x-forwarded-for": Defaults.ip_address,
- },
- )
-
- await wss.send_str(_format({"protocol": "json", "version": 1}))
- await wss.receive(timeout=900)
-
- argument: dict[str, Any] = {
- **mode,
+class Conversation():
+ def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None:
+ self.conversationId = conversationId
+ self.clientId = clientId
+ self.conversationSignature = conversationSignature
+
+async def create_conversation(session: ClientSession) -> Conversation:
+ url = 'https://www.bing.com/turing/conversation/create'
+ async with await session.get(url) as response:
+ response = await response.json()
+ conversationId = response.get('conversationId')
+ clientId = response.get('clientId')
+ conversationSignature = response.get('conversationSignature')
+
+ if not conversationId or not clientId or not conversationSignature:
+ raise Exception('Failed to create conversation.')
+
+ return Conversation(conversationId, clientId, conversationSignature)
+
+async def list_conversations(session: ClientSession) -> list:
+ url = "https://www.bing.com/turing/conversation/chats"
+ async with session.get(url) as response:
+ response = await response.json()
+ return response["chats"]
+
+async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
+ url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
+ json = {
+ "conversationId": conversation.conversationId,
+ "conversationSignature": conversation.conversationSignature,
+ "participant": {"id": conversation.clientId},
"source": "cib",
- "allowedMessageTypes": Defaults.allowedMessageTypes,
- "sliceIds": Defaults.sliceIds,
- "traceId": os.urandom(16).hex(),
- "isStartOfSession": True,
- "message": Defaults.location
- | {
- "author": "user",
- "inputMethod": "Keyboard",
- "text": prompt,
- "messageType": "Chat",
- },
- "conversationSignature": conversationSignature,
- "participant": {"id": clientId},
- "conversationId": conversationId,
- }
-
- if context:
- argument["previousMessages"] = [
- {
- "author": "user",
- "description": context,
- "contextType": "WebPage",
- "messageType": "Context",
- "messageId": "discover-web--page-ping-mriduna-----",
- }
- ]
-
- struct: dict[str, list[dict[str, Any]] | str | int] = {
- "arguments": [argument],
- "invocationId": "0",
- "target": "chat",
- "type": 4,
+ "optionsSets": ["autosave"]
}
-
- await wss.send_str(_format(struct))
-
- final = False
- draw = False
- resp_txt = ""
- result_text = ""
- resp_txt_no_link = ""
- cache_text = ""
-
- while not final:
- msg = await wss.receive(timeout=900)
- objects = msg.data.split(Defaults.delimiter) # type: ignore
-
- for obj in objects: # type: ignore
- if obj is None or not obj:
- continue
-
- response = json.loads(obj) # type: ignore
- if response.get("type") == 1 and response["arguments"][0].get(
- "messages",
- ):
- if not draw:
- if (
- response["arguments"][0]["messages"][0]["contentOrigin"]
- != "Apology"
- ) and not draw:
- resp_txt = result_text + response["arguments"][0]["messages"][
- 0
- ]["adaptiveCards"][0]["body"][0].get("text", "")
- resp_txt_no_link = result_text + response["arguments"][0][
- "messages"
- ][0].get("text", "")
-
- if response["arguments"][0]["messages"][0].get(
- "messageType",
- ):
- resp_txt = (
- resp_txt
- + response["arguments"][0]["messages"][0][
- "adaptiveCards"
- ][0]["body"][0]["inlines"][0].get("text")
- + "\n"
- )
- result_text = (
- result_text
- + response["arguments"][0]["messages"][0][
- "adaptiveCards"
- ][0]["body"][0]["inlines"][0].get("text")
- + "\n"
- )
-
- if cache_text.endswith(" "):
- final = True
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
- yield (resp_txt.replace(cache_text, ""))
- cache_text = resp_txt
-
- elif response.get("type") == 2:
- if response["item"]["result"].get("error"):
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
- raise Exception(
- f"{response['item']['result']['value']}: {response['item']['result']['message']}"
- )
-
- if draw:
- cache = response["item"]["messages"][1]["adaptiveCards"][0]["body"][
- 0
- ]["text"]
- response["item"]["messages"][1]["adaptiveCards"][0]["body"][0][
- "text"
- ] = (cache + resp_txt)
-
- if (
- response["item"]["messages"][-1]["contentOrigin"] == "Apology"
- and resp_txt
- ):
- response["item"]["messages"][-1]["text"] = resp_txt_no_link
- response["item"]["messages"][-1]["adaptiveCards"][0]["body"][0][
- "text"
- ] = resp_txt
-
- # print('Preserved the message from being deleted', file=sys.stderr)
-
- final = True
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
-
-async def create_conversation() -> Tuple[str, str, str]:
- create = requests.get(
- "https://www.bing.com/turing/conversation/create",
- headers={
- "authority": "edgeservices.bing.com",
- "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "max-age=0",
- "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
- "sec-ch-ua-arch": '"x86"',
- "sec-ch-ua-bitness": '"64"',
- "sec-ch-ua-full-version": '"110.0.1587.69"',
- "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-model": '""',
- "sec-ch-ua-platform": '"Windows"',
- "sec-ch-ua-platform-version": '"15.0.0"',
- "sec-fetch-dest": "document",
- "sec-fetch-mode": "navigate",
- "sec-fetch-site": "none",
- "sec-fetch-user": "?1",
- "upgrade-insecure-requests": "1",
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
- "x-edge-shopping-flag": "1",
- "x-forwarded-for": Defaults.ip_address,
- },
- )
-
- conversationId = create.json().get("conversationId")
- clientId = create.json().get("clientId")
- conversationSignature = create.json().get("conversationSignature")
-
- if not conversationId or not clientId or not conversationSignature:
- raise Exception("Failed to create conversation.")
-
- return conversationId, clientId, conversationSignature
-
+ async with session.post(url, json=json) as response:
+ response = await response.json()
+ return response["result"]["value"] == "Success"
class Defaults:
delimiter = "\x1e"
@@ -309,9 +99,6 @@ class Defaults:
]
sliceIds = [
- # "222dtappid",
- # "225cricinfo",
- # "224locals0"
"winmuid3tf",
"osbsdusgreccf",
"ttstmout",
@@ -349,6 +136,151 @@ class Defaults:
],
}
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'max-age=0',
+ 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"110.0.1587.69"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
+ 'x-edge-shopping-flag': '1',
+ 'x-forwarded-for': ip_address,
+ }
+
+ optionsSets = [
+ 'saharasugg',
+ 'enablenewsfc',
+ 'clgalileo',
+ 'gencontentv3',
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise"
+ "dtappid",
+ "cricinfo",
+ "cricinfov2",
+ "dv3sugg",
+ "nojbfedge"
+ ]
+
+def format_message(message: dict) -> str:
+ return json.dumps(message, ensure_ascii=False) + Defaults.delimiter
+
+def create_message(conversation: Conversation, prompt: str, context: str=None) -> str:
+ struct = {
+ 'arguments': [
+ {
+ 'optionsSets': Defaults.optionsSets,
+ 'source': 'cib',
+ 'allowedMessageTypes': Defaults.allowedMessageTypes,
+ 'sliceIds': Defaults.sliceIds,
+ 'traceId': os.urandom(16).hex(),
+ 'isStartOfSession': True,
+ 'message': Defaults.location | {
+ 'author': 'user',
+ 'inputMethod': 'Keyboard',
+ 'text': prompt,
+ 'messageType': 'Chat'
+ },
+ 'conversationSignature': conversation.conversationSignature,
+ 'participant': {
+ 'id': conversation.clientId
+ },
+ 'conversationId': conversation.conversationId
+ }
+ ],
+ 'invocationId': '0',
+ 'target': 'chat',
+ 'type': 4
+ }
+
+ if context:
+ struct['arguments'][0]['previousMessages'] = [{
+ "author": "user",
+ "description": context,
+ "contextType": "WebPage",
+ "messageType": "Context",
+ "messageId": "discover-web--page-ping-mriduna-----"
+ }]
+ return format_message(struct)
+
+async def stream_generate(
+ prompt: str,
+ context: str=None,
+ cookies: dict=None
+ ):
+ async with ClientSession(
+ timeout=aiohttp.ClientTimeout(total=900),
+ cookies=cookies,
+ headers=Defaults.headers,
+ ) as session:
+ conversation = await create_conversation(session)
+ try:
+ async with session.ws_connect(
+ 'wss://sydney.bing.com/sydney/ChatHub',
+ autoping=False,
+ ) as wss:
+
+ await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
+ msg = await wss.receive(timeout=900)
+
+ await wss.send_str(create_message(conversation, prompt, context))
+
+ response_txt = ''
+ result_text = ''
+ returned_text = ''
+ final = False
+
+ while not final:
+ msg = await wss.receive(timeout=900)
+ objects = msg.data.split(Defaults.delimiter)
+ for obj in objects:
+ if obj is None or not obj:
+ continue
+
+ response = json.loads(obj)
+ if response.get('type') == 1 and response['arguments'][0].get('messages'):
+ message = response['arguments'][0]['messages'][0]
+ if (message['contentOrigin'] != 'Apology'):
+ response_txt = result_text + \
+ message['adaptiveCards'][0]['body'][0].get('text', '')
+
+ if message.get('messageType'):
+ inline_txt = message['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
+ response_txt += inline_txt + '\n'
+ result_text += inline_txt + '\n'
+
+ if returned_text.endswith(' '):
+ final = True
+ break
+
+ if response_txt.startswith(returned_text):
+ new = response_txt[len(returned_text):]
+ if new != "\n":
+ yield new
+ returned_text = response_txt
+ elif response.get('type') == 2:
+ result = response['item']['result']
+ if result.get('error'):
+ raise Exception(f"{result['value']}: {result['message']}")
+ final = True
+ break
+ finally:
+ await delete_conversation(session, conversation)
def run(generator: AsyncGenerator[Union[Any, str], Any]):
loop = asyncio.get_event_loop()
diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py
index 2a61346c..3c6562f0 100644
--- a/g4f/Provider/EasyChat.py
+++ b/g4f/Provider/EasyChat.py
@@ -75,16 +75,18 @@ class EasyChat(BaseProvider):
if "choices" in json_data:
yield json_data["choices"][0]["message"]["content"]
else:
- yield Exception("No response from server")
+ raise Exception("No response from server")
else:
for chunk in response.iter_lines():
if b"content" in chunk:
- splitData = chunk.decode().split("data: ")
+ splitData = chunk.decode().split("data:")
if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
+ else:
+ continue
else:
- yield Exception(f"Error {response.status_code} from server")
+ raise Exception(f"Error {response.status_code} from server : {response.reason}")
@classmethod
diff --git a/g4f/Provider/Hugchat.py b/g4f/Provider/Hugchat.py
new file mode 100644
index 00000000..cedf8402
--- /dev/null
+++ b/g4f/Provider/Hugchat.py
@@ -0,0 +1,67 @@
+has_module = False
+try:
+ from hugchat.hugchat import ChatBot
+except ImportError:
+ has_module = False
+
+from .base_provider import BaseProvider, get_cookies
+from g4f.typing import CreateResult
+
+class Hugchat(BaseProvider):
+ url = "https://huggingface.co/chat/"
+ needs_auth = True
+ working = has_module
+ llms = ['OpenAssistant/oasst-sft-6-llama-30b-xor', 'meta-llama/Llama-2-70b-chat-hf']
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = False,
+ proxy: str = None,
+ cookies: str = get_cookies(".huggingface.co"),
+ **kwargs
+ ) -> CreateResult:
+ bot = ChatBot(
+ cookies=cookies
+ )
+
+ if proxy and "://" not in proxy:
+ proxy = f"http://{proxy}"
+ bot.session.proxies = {"http": proxy, "https": proxy}
+
+ if model:
+ try:
+ if not isinstance(model, int):
+ model = cls.llms.index(model)
+ bot.switch_llm(model)
+ except:
+ raise RuntimeError(f"Model are not supported: {model}")
+
+ if len(messages) > 1:
+ formatted = "\n".join(
+ ["%s: %s" % (message["role"], message["content"]) for message in messages]
+ )
+ prompt = f"{formatted}\nAssistant:"
+ else:
+ prompt = messages.pop()["content"]
+
+ try:
+ yield bot.chat(prompt, **kwargs)
+ finally:
+ bot.delete_conversation(bot.current_conversation)
+ bot.current_conversation = ""
+ pass
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/OpenaiChat.py b/g4f/Provider/OpenaiChat.py
new file mode 100644
index 00000000..cca258b3
--- /dev/null
+++ b/g4f/Provider/OpenaiChat.py
@@ -0,0 +1,74 @@
+has_module = True
+try:
+ from revChatGPT.V1 import AsyncChatbot
+except ImportError:
+ has_module = False
+from .base_provider import AsyncGeneratorProvider, get_cookies
+from ..typing import AsyncGenerator
+
+class OpenaiChat(AsyncGeneratorProvider):
+ url = "https://chat.openai.com"
+ needs_auth = True
+ working = has_module
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ access_token: str = None,
+ cookies: dict = None,
+ **kwargs
+ ) -> AsyncGenerator:
+
+ config = {"access_token": access_token, "model": model}
+ if proxy:
+ if "://" not in proxy:
+ proxy = f"http://{proxy}"
+ config["proxy"] = proxy
+
+ bot = AsyncChatbot(
+ config=config
+ )
+
+ if not access_token:
+ cookies = cookies if cookies else get_cookies("chat.openai.com")
+ response = await bot.session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
+ access_token = response.json()["accessToken"]
+ bot.set_access_token(access_token)
+
+ if len(messages) > 1:
+ formatted = "\n".join(
+ ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
+ )
+ prompt = f"{formatted}\nAssistant:"
+ else:
+ prompt = messages.pop()["content"]
+
+ returned = None
+ async for message in bot.ask(prompt):
+ message = message["message"]
+ if returned:
+ if message.startswith(returned):
+ new = message[len(returned):]
+ if new:
+ yield new
+ else:
+ yield message
+ returned = message
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Wuguokai.py b/g4f/Provider/Wuguokai.py
new file mode 100644
index 00000000..906283ad
--- /dev/null
+++ b/g4f/Provider/Wuguokai.py
@@ -0,0 +1,65 @@
+import random, requests, json
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Wuguokai(BaseProvider):
+ url = 'https://chat.wuguokai.xyz'
+ supports_gpt_35_turbo = True
+ supports_stream = False
+ needs_auth = False
+ working = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+ headers = {
+ 'authority': 'ai-api.wuguokai.xyz',
+ 'accept': 'application/json, text/plain, */*',
+ 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.wuguokai.xyz',
+ 'referer': 'https://chat.wuguokai.xyz/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+ }
+ data ={
+ "prompt": base,
+ "options": {},
+ "userId": f"#/chat/{random.randint(1,99999999)}",
+ "usingContext": True
+ }
+ response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, data=json.dumps(data),proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
+ _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
+ if response.status_code == 200:
+ if len(_split) > 1:
+ yield _split[1].strip()
+ else:
+ yield _split[0].strip()
+ else:
+ raise Exception(f"Error: {response.status_code} {response.reason}")
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool")
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index cbd741ba..7ea699a2 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -20,7 +20,7 @@ class You(BaseProvider):
stream: bool,
**kwargs: Any,
) -> CreateResult:
- url_param = _create_url_param(messages)
+ url_param = _create_url_param(messages, kwargs.get("history", []))
headers = _create_header()
url = f"https://you.com/api/streamingSearch?{url_param}"
response = requests.get(
@@ -29,16 +29,19 @@ class You(BaseProvider):
impersonate="chrome107",
)
response.raise_for_status()
+
start = 'data: {"youChatToken": '
for line in response.content.splitlines():
line = line.decode('utf-8')
if line.startswith(start):
yield json.loads(line[len(start): -1])
-
-def _create_url_param(messages: list[dict[str, str]]):
- prompt = messages.pop()["content"]
- chat = _convert_chat(messages)
+def _create_url_param(messages: list[dict[str, str]], history: list[dict[str, str]]):
+ prompt = ""
+ for message in messages:
+ prompt += "%s: %s\n" % (message["role"], message["content"])
+ prompt += "assistant:"
+ chat = _convert_chat(history)
param = {"q": prompt, "domain": "youchat", "chat": chat}
return urllib.parse.urlencode(param)
diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py
index a3147c2d..82ece2d5 100644
--- a/g4f/Provider/Yqcloud.py
+++ b/g4f/Provider/Yqcloud.py
@@ -35,7 +35,10 @@ def _create_header():
def _create_payload(messages: list[dict[str, str]]):
- prompt = messages[-1]["content"]
+ prompt = ""
+ for message in messages:
+ prompt += "%s: %s\n" % (message["role"], message["content"])
+ prompt += "assistant:"
return {
"prompt": prompt,
"network": True,
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index e27dee5d..7b2cd1e5 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -14,9 +14,11 @@ from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
from .H2o import H2o
+from .Hugchat import Hugchat
from .Liaobots import Liaobots
from .Lockchat import Lockchat
from .Opchatgpts import Opchatgpts
+from .OpenaiChat import OpenaiChat
from .Raycast import Raycast
from .Theb import Theb
from .Vercel import Vercel
@@ -26,6 +28,7 @@ from .Yqcloud import Yqcloud
from .Equing import Equing
from .FastGpt import FastGpt
from .V50 import V50
+from .Wuguokai import Wuguokai
__all__ = [
"BaseProvider",
@@ -44,10 +47,12 @@ __all__ = [
"Forefront",
"GetGpt",
"H2o",
+ "Hugchat",
"Liaobots",
"Lockchat",
"Opchatgpts",
"Raycast",
+ "OpenaiChat",
"Theb",
"Vercel",
"Wewordle",
@@ -55,5 +60,6 @@ __all__ = [
"Yqcloud",
"Equing",
"FastGpt",
+ "Wuguokai"
"V50"
]
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 98ad3514..56d79ee6 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,7 +1,11 @@
from abc import ABC, abstractmethod
-from ..typing import Any, CreateResult
+from ..typing import Any, CreateResult, AsyncGenerator, Union
+import browser_cookie3
+import asyncio
+from time import time
+import math
class BaseProvider(ABC):
url: str
@@ -30,4 +34,81 @@ class BaseProvider(ABC):
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+_cookies = {}
+
+def get_cookies(cookie_domain: str) -> dict:
+ if cookie_domain not in _cookies:
+ _cookies[cookie_domain] = {}
+ for cookie in browser_cookie3.load(cookie_domain):
+ _cookies[cookie_domain][cookie.name] = cookie.value
+ return _cookies[cookie_domain]
+
+
+class AsyncProvider(BaseProvider):
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = False,
+ **kwargs: Any
+ ) -> CreateResult:
+ yield asyncio.run(cls.create_async(model, messages, **kwargs))
+
+ @staticmethod
+ @abstractmethod
+ async def create_async(
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs: Any,
+ ) -> str:
+ raise NotImplementedError()
+
+
+class AsyncGeneratorProvider(AsyncProvider):
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = True,
+ **kwargs: Any
+ ) -> CreateResult:
+ if stream:
+ yield from run_generator(cls.create_async_generator(model, messages, **kwargs))
+ else:
+ yield from AsyncProvider.create_completion(cls=cls, model=model, messages=messages, **kwargs)
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs: Any,
+ ) -> str:
+ chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)]
+ if chunks:
+ return "".join(chunks)
+
+ @staticmethod
+ @abstractmethod
+ def create_async_generator(
+ model: str,
+ messages: list[dict[str, str]],
+ ) -> AsyncGenerator:
+ raise NotImplementedError()
+
+
+def run_generator(generator: AsyncGenerator[Union[Any, str], Any]):
+ loop = asyncio.new_event_loop()
+ gen = generator.__aiter__()
+
+ while True:
+ try:
+ yield loop.run_until_complete(gen.__anext__())
+
+ except StopAsyncIteration:
+ break
diff --git a/testing/test_needs_auth.py b/testing/test_needs_auth.py
new file mode 100644
index 00000000..d44ed1df
--- /dev/null
+++ b/testing/test_needs_auth.py
@@ -0,0 +1,95 @@
+import sys
+from pathlib import Path
+import asyncio
+from time import time
+
+sys.path.append(str(Path(__file__).parent.parent))
+
+import g4f
+
+providers = [g4f.Provider.OpenaiChat, g4f.Provider.Bard, g4f.Provider.Bing]
+
+# Async support
+async def log_time_async(method: callable, **kwargs):
+ start = time()
+ result = await method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
+
+def log_time_yield(method: callable, **kwargs):
+ start = time()
+ result = yield from method(**kwargs)
+ yield f" {round(time() - start, 2)} secs"
+
+def log_time(method: callable, **kwargs):
+ start = time()
+ result = method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
+
+async def run_async():
+ responses = []
+ for provider in providers:
+ responses.append(log_time_async(
+ provider.create_async,
+ model=None,
+ messages=[{"role": "user", "content": "Hello"}],
+ log_time=True
+ ))
+ responses = await asyncio.gather(*responses)
+ for idx, provider in enumerate(providers):
+ print(f"{provider.__name__}:", responses[idx])
+print("Async Total:", asyncio.run(log_time_async(run_async)))
+
+# Streaming support:
+def run_stream():
+ for provider in providers:
+ print(f"{provider.__name__}: ", end="")
+ for response in log_time_yield(
+ provider.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": "Hello"}],
+ ):
+ print(response, end="")
+ print()
+print("Stream Total:", log_time(run_stream))
+
+# No streaming support:
+def create_completion():
+ for provider in providers:
+ print(f"{provider.__name__}:", end=" ")
+ for response in log_time_yield(
+ g4f.Provider.Bard.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": "Hello"}],
+ ):
+ print(response, end="")
+ print()
+print("No Stream Total:", log_time(create_completion))
+
+for response in g4f.Provider.Hugchat.create_completion(
+ model=None,
+ messages=[{"role": "user", "content": "Hello, tell about you."}],
+):
+ print("Hugchat:", response)
+
+"""
+OpenaiChat: Hello! How can I assist you today? 2.0 secs
+Bard: Hello! How can I help you today? 3.44 secs
+Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
+Async Total: 4.25 secs
+
+OpenaiChat: Hello! How can I assist you today? 1.85 secs
+Bard: Hello! How can I help you today? 3.38 secs
+Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
+Stream Total: 11.37 secs
+
+OpenaiChat: Hello! How can I help you today? 3.28 secs
+Bard: Hello there! How can I help you today? 3.58 secs
+Bing: Hello! How can I help you today? 3.28 secs
+No Stream Total: 10.14 secs
+""" \ No newline at end of file