summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-03-12 02:06:06 +0100
committerGitHub <noreply@github.com>2024-03-12 02:06:06 +0100
commit6ef282de3a3245acbfecd08ae48dba85ff91d031 (patch)
tree0236c9678eea8f9c78ed7c09f3d86eaf3d7c691c /g4f/Provider/deprecated
parentUpdate .gitignore (diff)
downloadgpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.gz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.bz2
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.lz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.xz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.zst
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.zip
Diffstat (limited to 'g4f/Provider/deprecated')
-rw-r--r--g4f/Provider/deprecated/AiAsk.py46
-rw-r--r--g4f/Provider/deprecated/AiChatOnline.py59
-rw-r--r--g4f/Provider/deprecated/ChatAnywhere.py54
-rw-r--r--g4f/Provider/deprecated/FakeGpt.py91
-rw-r--r--g4f/Provider/deprecated/GPTalk.py87
-rw-r--r--g4f/Provider/deprecated/GeekGpt.py73
-rw-r--r--g4f/Provider/deprecated/Hashnode.py80
-rw-r--r--g4f/Provider/deprecated/Ylokh.py58
-rw-r--r--g4f/Provider/deprecated/__init__.py10
9 files changed, 557 insertions, 1 deletions
diff --git a/g4f/Provider/deprecated/AiAsk.py b/g4f/Provider/deprecated/AiAsk.py
new file mode 100644
index 00000000..6ea5f3e0
--- /dev/null
+++ b/g4f/Provider/deprecated/AiAsk.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+class AiAsk(AsyncGeneratorProvider):
+ url = "https://e.aiask.me"
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "origin": cls.url,
+ "referer": f"{cls.url}/chat",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "continuous": True,
+ "id": "fRMSQtuHl91A4De9cCvKD",
+ "list": messages,
+ "models": "0",
+ "prompt": "",
+ "temperature": kwargs.get("temperature", 0.5),
+ "title": "",
+ }
+ buffer = ""
+ rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
+ async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ buffer += chunk.decode()
+ if not rate_limit.startswith(buffer):
+ yield buffer
+ buffer = ""
+ elif buffer == rate_limit:
+ raise RuntimeError("Rate limit reached") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/AiChatOnline.py b/g4f/Provider/deprecated/AiChatOnline.py
new file mode 100644
index 00000000..e690f28e
--- /dev/null
+++ b/g4f/Provider/deprecated/AiChatOnline.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
+
+class AiChatOnline(AsyncGeneratorProvider):
+ url = "https://aichatonline.org"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_message_history = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept": "text/event-stream",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/chatgpt/chat/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "aichatonline.org",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "botId": "default",
+ "customId": None,
+ "session": get_random_string(16),
+ "chatId": get_random_string(),
+ "contextId": 7,
+ "messages": messages,
+ "newMessage": messages[-1]["content"],
+ "newImageId": None,
+ "stream": True
+ }
+ async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk.startswith(b"data: "):
+ data = json.loads(chunk[6:])
+ if data["type"] == "live":
+ yield data["data"]
+ elif data["type"] == "end":
+ break \ No newline at end of file
diff --git a/g4f/Provider/deprecated/ChatAnywhere.py b/g4f/Provider/deprecated/ChatAnywhere.py
new file mode 100644
index 00000000..d035eaf0
--- /dev/null
+++ b/g4f/Provider/deprecated/ChatAnywhere.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ClientTimeout
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+
+class ChatAnywhere(AsyncGeneratorProvider):
+ url = "https://chatanywhere.cn"
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ temperature: float = 0.5,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "application/json",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Authorization": "",
+ "Connection": "keep-alive",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
+ data = {
+ "list": messages,
+ "id": "s1_qYuOLXjI3rEpc7WHfQ",
+ "title": messages[-1]["content"],
+ "prompt": "",
+ "temperature": temperature,
+ "models": "61490748",
+ "continuous": True
+ }
+ async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/FakeGpt.py b/g4f/Provider/deprecated/FakeGpt.py
new file mode 100644
index 00000000..99b6bb1a
--- /dev/null
+++ b/g4f/Provider/deprecated/FakeGpt.py
@@ -0,0 +1,91 @@
+from __future__ import annotations
+
+import uuid, time, random, json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt, get_random_string
+
+
+class FakeGpt(AsyncGeneratorProvider):
+ url = "https://chat-shared2.zhile.io"
+ supports_gpt_35_turbo = True
+ working = False
+ _access_token = None
+ _cookie_jar = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept-Language": "en-US",
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
+ "Referer": "https://chat-shared2.zhile.io/?v=2",
+ "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-ua-mobile": "?0",
+ }
+ async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
+ if not cls._access_token:
+ async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
+ response.raise_for_status()
+ list = (await response.json())["loads"]
+ token_ids = [t["token_id"] for t in list]
+ data = {
+ "token_key": random.choice(token_ids),
+ "session_password": get_random_string()
+ }
+ async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
+ response.raise_for_status()
+ cls._access_token = (await response.json())["accessToken"]
+ cls._cookie_jar = session.cookie_jar
+ headers = {
+ "Content-Type": "application/json",
+ "Accept": "text/event-stream",
+ "X-Authorization": f"Bearer {cls._access_token}",
+ }
+ prompt = format_prompt(messages)
+ data = {
+ "action": "next",
+ "messages": [
+ {
+ "id": str(uuid.uuid4()),
+ "author": {"role": "user"},
+ "content": {"content_type": "text", "parts": [prompt]},
+ "metadata": {},
+ }
+ ],
+ "parent_message_id": str(uuid.uuid4()),
+ "model": "text-davinci-002-render-sha",
+ "plugin_ids": [],
+ "timezone_offset_min": -120,
+ "suggestions": [],
+ "history_and_training_disabled": True,
+ "arkose_token": "",
+ "force_paragen": False,
+ }
+ last_message = ""
+ async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ line = line[6:]
+ if line == b"[DONE]":
+ break
+ try:
+ line = json.loads(line)
+ if line["message"]["metadata"]["message_type"] == "next":
+ new_message = line["message"]["content"]["parts"][0]
+ yield new_message[len(last_message):]
+ last_message = new_message
+ except:
+ continue
+ if not last_message:
+ raise RuntimeError("No valid response") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/GPTalk.py b/g4f/Provider/deprecated/GPTalk.py
new file mode 100644
index 00000000..5b36d37b
--- /dev/null
+++ b/g4f/Provider/deprecated/GPTalk.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+
+import secrets, time, json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class GPTalk(AsyncGeneratorProvider):
+ url = "https://gptalk.net"
+ working = False
+ supports_gpt_35_turbo = True
+ _auth = None
+ used_times = 0
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ timestamp = int(time.time())
+ headers = {
+ 'authority': 'gptalk.net',
+ 'accept': '*/*',
+ 'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2',
+ 'content-type': 'application/json',
+ 'origin': 'https://gptalk.net',
+ 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
+ 'x-auth-appid': '2229',
+ 'x-auth-openid': '',
+ 'x-auth-platform': '',
+ 'x-auth-timestamp': f"{timestamp}",
+ }
+ async with ClientSession(headers=headers) as session:
+ if not cls._auth or cls._auth["expires_at"] < timestamp or cls.used_times == 5:
+ data = {
+ "fingerprint": secrets.token_hex(16).zfill(32),
+ "platform": "fingerprint"
+ }
+ async with session.post(f"{cls.url}/api/chatgpt/user/login", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ cls._auth = (await response.json())["data"]
+ cls.used_times = 0
+ data = {
+ "content": format_prompt(messages),
+ "accept": "stream",
+ "from": 1,
+ "model": model,
+ "is_mobile": 0,
+ "user_agent": headers["user-agent"],
+ "is_open_ctx": 0,
+ "prompt": "",
+ "roid": 111,
+ "temperature": 0,
+ "ctx_msg_count": 3,
+ "created_at": timestamp
+ }
+ headers = {
+ 'authorization': f'Bearer {cls._auth["token"]}',
+ }
+ async with session.post(f"{cls.url}/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
+ response.raise_for_status()
+ token = (await response.json())["data"]["token"]
+ cls.used_times += 1
+ last_message = ""
+ async with session.get(f"{cls.url}/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ if line.startswith(b"data: [DONE]"):
+ break
+ message = json.loads(line[6:-1])["content"]
+ yield message[len(last_message):]
+ last_message = message
diff --git a/g4f/Provider/deprecated/GeekGpt.py b/g4f/Provider/deprecated/GeekGpt.py
new file mode 100644
index 00000000..7a460083
--- /dev/null
+++ b/g4f/Provider/deprecated/GeekGpt.py
@@ -0,0 +1,73 @@
+from __future__ import annotations
+import requests, json
+
+from ..base_provider import AbstractProvider
+from ...typing import CreateResult, Messages
+from json import dumps
+
+
+class GeekGpt(AbstractProvider):
+ url = 'https://chat.geekgpt.org'
+ working = False
+ supports_message_history = True
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ json_data = {
+ 'messages': messages,
+ 'model': model,
+ 'temperature': kwargs.get('temperature', 0.9),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'stream': True
+ }
+
+ data = dumps(json_data, separators=(',', ':'))
+
+ headers = {
+ 'authority': 'ai.fakeopen.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': 'Bearer pk-this-is-a-real-free-pool-token-for-everyone',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.geekgpt.org',
+ 'referer': 'https://chat.geekgpt.org/',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ response = requests.post("https://ai.fakeopen.com/v1/chat/completions",
+ headers=headers, data=data, stream=True)
+ response.raise_for_status()
+
+ for chunk in response.iter_lines():
+ if b'content' in chunk:
+ json_data = chunk.decode().replace("data: ", "")
+
+ if json_data == "[DONE]":
+ break
+
+ try:
+ content = json.loads(json_data)["choices"][0]["delta"].get("content")
+ except Exception as e:
+ raise RuntimeError(f'error | {e} :', json_data)
+
+ if content:
+ yield content \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Hashnode.py b/g4f/Provider/deprecated/Hashnode.py
new file mode 100644
index 00000000..c2c0ffb7
--- /dev/null
+++ b/g4f/Provider/deprecated/Hashnode.py
@@ -0,0 +1,80 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_hex
+
+class SearchTypes():
+ quick = "quick"
+ code = "code"
+ websearch = "websearch"
+
+class Hashnode(AsyncGeneratorProvider):
+ url = "https://hashnode.com"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+ _sources = []
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ search_type: str = SearchTypes.websearch,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/rix",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[-1]["content"]
+ cls._sources = []
+ if search_type == "websearch":
+ async with session.post(
+ f"{cls.url}/api/ai/rix/search",
+ json={"prompt": prompt},
+ proxy=proxy,
+ ) as response:
+ response.raise_for_status()
+ cls._sources = (await response.json())["result"]
+ data = {
+ "chatId": get_random_hex(),
+ "history": messages,
+ "prompt": prompt,
+ "searchType": search_type,
+ "urlToScan": None,
+ "searchResults": cls._sources,
+ }
+ async with session.post(
+ f"{cls.url}/api/ai/rix/completion",
+ json=data,
+ proxy=proxy,
+ ) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
+
+ @classmethod
+ def get_sources(cls) -> list:
+ return [{
+ "title": source["name"],
+ "url": source["url"]
+ } for source in cls._sources] \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Ylokh.py b/g4f/Provider/deprecated/Ylokh.py
new file mode 100644
index 00000000..dbff4602
--- /dev/null
+++ b/g4f/Provider/deprecated/Ylokh.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import json
+
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+
+class Ylokh(AsyncGeneratorProvider):
+ url = "https://chat.ylokh.xyz"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs
+ ) -> AsyncResult:
+ model = model if model else "gpt-3.5-turbo"
+ headers = {"Origin": cls.url, "Referer": f"{cls.url}/"}
+ data = {
+ "messages": messages,
+ "model": model,
+ "temperature": 1,
+ "presence_penalty": 0,
+ "top_p": 1,
+ "frequency_penalty": 0,
+ "allow_fallback": True,
+ "stream": stream,
+ **kwargs
+ }
+ async with StreamSession(
+ headers=headers,
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
+ async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
+ response.raise_for_status()
+ if stream:
+ async for line in response.iter_lines():
+ line = line.decode()
+ if line.startswith("data: "):
+ if line.startswith("data: [DONE]"):
+ break
+ line = json.loads(line[6:])
+ content = line["choices"][0]["delta"].get("content")
+ if content:
+ yield content
+ else:
+ chat = await response.json()
+ yield chat["choices"][0]["message"].get("content") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index 8ec5f2fc..f6b4a1d9 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -23,4 +23,12 @@ from .Opchatgpts import Opchatgpts
from .Yqcloud import Yqcloud
from .Aichat import Aichat
from .Berlin import Berlin
-from .Phind import Phind \ No newline at end of file
+from .Phind import Phind
+from .AiAsk import AiAsk
+from .AiChatOnline import AiChatOnline
+from .ChatAnywhere import ChatAnywhere
+from .FakeGpt import FakeGpt
+from .GeekGpt import GeekGpt
+from .GPTalk import GPTalk
+from .Hashnode import Hashnode
+from .Ylokh import Ylokh \ No newline at end of file