summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/deprecated')
-rw-r--r--g4f/Provider/deprecated/AiService.py36
-rw-r--r--g4f/Provider/deprecated/CodeLinkAva.py64
-rw-r--r--g4f/Provider/deprecated/DfeHub.py77
-rw-r--r--g4f/Provider/deprecated/EasyChat.py111
-rw-r--r--g4f/Provider/deprecated/Equing.py81
-rw-r--r--g4f/Provider/deprecated/FastGpt.py87
-rw-r--r--g4f/Provider/deprecated/Forefront.py40
-rw-r--r--g4f/Provider/deprecated/GetGpt.py88
-rw-r--r--g4f/Provider/deprecated/Lockchat.py64
-rw-r--r--g4f/Provider/deprecated/Opchatgpts.py8
-rw-r--r--g4f/Provider/deprecated/PerplexityAi.py101
-rw-r--r--g4f/Provider/deprecated/V50.py67
-rw-r--r--g4f/Provider/deprecated/Wewordle.py65
-rw-r--r--g4f/Provider/deprecated/Wuguokai.py63
-rw-r--r--g4f/Provider/deprecated/__init__.py14
15 files changed, 966 insertions, 0 deletions
diff --git a/g4f/Provider/deprecated/AiService.py b/g4f/Provider/deprecated/AiService.py
new file mode 100644
index 00000000..9b41e3c8
--- /dev/null
+++ b/g4f/Provider/deprecated/AiService.py
@@ -0,0 +1,36 @@
+from __future__ import annotations
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class AiService(BaseProvider):
+ url = "https://aiservice.vercel.app/"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
+ base += "\nassistant: "
+
+ headers = {
+ "accept": "*/*",
+ "content-type": "text/plain;charset=UTF-8",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "Referer": "https://aiservice.vercel.app/chat",
+ }
+ data = {"input": base}
+ url = "https://aiservice.vercel.app/api/chat/answer"
+ response = requests.post(url, headers=headers, json=data)
+ response.raise_for_status()
+ yield response.json()["data"]
diff --git a/g4f/Provider/deprecated/CodeLinkAva.py b/g4f/Provider/deprecated/CodeLinkAva.py
new file mode 100644
index 00000000..8407ebb9
--- /dev/null
+++ b/g4f/Provider/deprecated/CodeLinkAva.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider
+
+
+class CodeLinkAva(AsyncGeneratorProvider):
+ url = "https://ava-ai-ef611.web.app"
+ supports_gpt_35_turbo = True
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> AsyncGenerator:
+ headers = {
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "Accept" : "*/*",
+ "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "Origin" : cls.url,
+ "Referer" : cls.url + "/",
+ "Sec-Fetch-Dest" : "empty",
+ "Sec-Fetch-Mode" : "cors",
+ "Sec-Fetch-Site" : "same-origin",
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ data = {
+ "messages": messages,
+ "temperature": 0.6,
+ "stream": True,
+ **kwargs
+ }
+ async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ line = line.decode()
+ if line.startswith("data: "):
+ if line.startswith("data: [DONE]"):
+ break
+ line = json.loads(line[6:-1])
+ content = line["choices"][0]["delta"].get("content")
+ if content:
+ yield content
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/DfeHub.py b/g4f/Provider/deprecated/DfeHub.py
new file mode 100644
index 00000000..4ea7501f
--- /dev/null
+++ b/g4f/Provider/deprecated/DfeHub.py
@@ -0,0 +1,77 @@
+from __future__ import annotations
+
+import json
+import re
+import time
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class DfeHub(BaseProvider):
+ url = "https://chat.dfehub.com/"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ headers = {
+ "authority" : "chat.dfehub.com",
+ "accept" : "*/*",
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "content-type" : "application/json",
+ "origin" : "https://chat.dfehub.com",
+ "referer" : "https://chat.dfehub.com/",
+ "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile" : "?0",
+ "sec-ch-ua-platform": '"macOS"',
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "x-requested-with" : "XMLHttpRequest",
+ }
+
+ json_data = {
+ "messages" : messages,
+ "model" : "gpt-3.5-turbo",
+ "temperature" : kwargs.get("temperature", 0.5),
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
+ "top_p" : kwargs.get("top_p", 1),
+ "stream" : True
+ }
+
+ response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
+ headers=headers, json=json_data, timeout=3)
+
+ for chunk in response.iter_lines():
+ if b"detail" in chunk:
+ delay = re.findall(r"\d+\.\d+", chunk.decode())
+ delay = float(delay[-1])
+ time.sleep(delay)
+ yield from DfeHub.create_completion(model, messages, stream, **kwargs)
+ if b"content" in chunk:
+ data = json.loads(chunk.decode().split("data: ")[1])
+ yield (data["choices"][0]["delta"]["content"])
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("presence_penalty", "int"),
+ ("frequency_penalty", "int"),
+ ("top_p", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/deprecated/EasyChat.py b/g4f/Provider/deprecated/EasyChat.py
new file mode 100644
index 00000000..ffe9a785
--- /dev/null
+++ b/g4f/Provider/deprecated/EasyChat.py
@@ -0,0 +1,111 @@
+from __future__ import annotations
+
+import json
+import random
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class EasyChat(BaseProvider):
+ url: str = "https://free.easychat.work"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ working = False
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ active_servers = [
+ "https://chat10.fastgpt.me",
+ "https://chat9.fastgpt.me",
+ "https://chat1.fastgpt.me",
+ "https://chat2.fastgpt.me",
+ "https://chat3.fastgpt.me",
+ "https://chat4.fastgpt.me",
+ "https://gxos1h1ddt.fastgpt.me"
+ ]
+
+ server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
+ headers = {
+ "authority" : f"{server}".replace("https://", ""),
+ "accept" : "text/event-stream",
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
+ "content-type" : "application/json",
+ "origin" : f"{server}",
+ "referer" : f"{server}/",
+ "x-requested-with" : "XMLHttpRequest",
+ 'plugins' : '0',
+ 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+ 'sec-ch-ua-mobile' : '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest'
+ }
+
+ json_data = {
+ "messages" : messages,
+ "stream" : stream,
+ "model" : model,
+ "temperature" : kwargs.get("temperature", 0.5),
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
+ "top_p" : kwargs.get("top_p", 1)
+ }
+
+ session = requests.Session()
+ # init cookies from server
+ session.get(f"{server}/")
+
+ response = session.post(f"{server}/api/openai/v1/chat/completions",
+ headers=headers, json=json_data, stream=stream)
+
+ if response.status_code == 200:
+
+ if stream == False:
+ json_data = response.json()
+
+ if "choices" in json_data:
+ yield json_data["choices"][0]["message"]["content"]
+ else:
+ raise Exception("No response from server")
+
+ else:
+
+ for chunk in response.iter_lines():
+
+ if b"content" in chunk:
+ splitData = chunk.decode().split("data:")
+
+ if len(splitData) > 1:
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
+ else:
+ continue
+ else:
+ raise Exception(f"Error {response.status_code} from server : {response.reason}")
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("presence_penalty", "int"),
+ ("frequency_penalty", "int"),
+ ("top_p", "int"),
+ ("active_server", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/deprecated/Equing.py b/g4f/Provider/deprecated/Equing.py
new file mode 100644
index 00000000..794274f2
--- /dev/null
+++ b/g4f/Provider/deprecated/Equing.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+import json
+from abc import ABC, abstractmethod
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class Equing(BaseProvider):
+ url: str = 'https://next.eqing.tech/'
+ working = False
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = False
+
+ @staticmethod
+ @abstractmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ headers = {
+ 'authority' : 'next.eqing.tech',
+ 'accept' : 'text/event-stream',
+ 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control' : 'no-cache',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://next.eqing.tech',
+ 'plugins' : '0',
+ 'pragma' : 'no-cache',
+ 'referer' : 'https://next.eqing.tech/',
+ 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
+ 'sec-ch-ua-mobile' : '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest'
+ }
+
+ json_data = {
+ 'messages' : messages,
+ 'stream' : stream,
+ 'model' : model,
+ 'temperature' : kwargs.get('temperature', 0.5),
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'top_p' : kwargs.get('top_p', 1),
+ }
+
+ response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
+ headers=headers, json=json_data, stream=stream)
+
+ if not stream:
+ yield response.json()["choices"][0]["message"]["content"]
+ return
+
+ for line in response.iter_content(chunk_size=1024):
+ if line:
+ if b'content' in line:
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ token = line_json['choices'][0]['delta'].get('content')
+ if token:
+ yield token
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/FastGpt.py b/g4f/Provider/deprecated/FastGpt.py
new file mode 100644
index 00000000..65efa29d
--- /dev/null
+++ b/g4f/Provider/deprecated/FastGpt.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+
+import json
+import random
+from abc import ABC, abstractmethod
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class FastGpt(BaseProvider):
+ url: str = 'https://chat9.fastgpt.me/'
+ working = False
+ needs_auth = False
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = False
+
+ @staticmethod
+ @abstractmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ headers = {
+ 'authority' : 'chat9.fastgpt.me',
+ 'accept' : 'text/event-stream',
+ 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control' : 'no-cache',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://chat9.fastgpt.me',
+ 'plugins' : '0',
+ 'pragma' : 'no-cache',
+ 'referer' : 'https://chat9.fastgpt.me/',
+ 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
+ 'sec-ch-ua-mobile' : '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest',
+ }
+
+ json_data = {
+ 'messages' : messages,
+ 'stream' : stream,
+ 'model' : model,
+ 'temperature' : kwargs.get('temperature', 0.5),
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'top_p' : kwargs.get('top_p', 1),
+ }
+
+ subdomain = random.choice([
+ 'jdaen979ew',
+ 'chat9'
+ ])
+
+ response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
+ headers=headers, json=json_data, stream=stream)
+
+ for line in response.iter_lines():
+ if line:
+ try:
+ if b'content' in line:
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ token = line_json['choices'][0]['delta'].get('content')
+ if token:
+ yield token
+ except:
+ continue
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Forefront.py b/g4f/Provider/deprecated/Forefront.py
new file mode 100644
index 00000000..2f807e91
--- /dev/null
+++ b/g4f/Provider/deprecated/Forefront.py
@@ -0,0 +1,40 @@
+from __future__ import annotations
+
+import json
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class Forefront(BaseProvider):
+ url = "https://forefront.com"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ json_data = {
+ "text" : messages[-1]["content"],
+ "action" : "noauth",
+ "id" : "",
+ "parentId" : "",
+ "workspaceId" : "",
+ "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
+ "model" : "gpt-4",
+ "messages" : messages[:-1] if len(messages) > 1 else [],
+ "internetMode" : "auto",
+ }
+
+ response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
+ json=json_data, stream=True)
+
+ response.raise_for_status()
+ for token in response.iter_lines():
+ if b"delta" in token:
+ yield json.loads(token.decode().split("data: ")[1])["delta"]
diff --git a/g4f/Provider/deprecated/GetGpt.py b/g4f/Provider/deprecated/GetGpt.py
new file mode 100644
index 00000000..a5de1d29
--- /dev/null
+++ b/g4f/Provider/deprecated/GetGpt.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import json
+import os
+import uuid
+
+import requests
+from Crypto.Cipher import AES
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class GetGpt(BaseProvider):
+ url = 'https://chat.getgpt.world/'
+ supports_stream = True
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ headers = {
+ 'Content-Type' : 'application/json',
+ 'Referer' : 'https://chat.getgpt.world/',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ }
+
+ data = json.dumps(
+ {
+ 'messages' : messages,
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'max_tokens' : kwargs.get('max_tokens', 4000),
+ 'model' : 'gpt-3.5-turbo',
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'temperature' : kwargs.get('temperature', 1),
+ 'top_p' : kwargs.get('top_p', 1),
+ 'stream' : True,
+ 'uuid' : str(uuid.uuid4())
+ }
+ )
+
+ res = requests.post('https://chat.getgpt.world/api/chat/stream',
+ headers=headers, json={'signature': _encrypt(data)}, stream=True)
+
+ res.raise_for_status()
+ for line in res.iter_lines():
+ if b'content' in line:
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ yield (line_json['choices'][0]['delta']['content'])
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ('model', 'str'),
+ ('messages', 'list[dict[str, str]]'),
+ ('stream', 'bool'),
+ ('temperature', 'float'),
+ ('presence_penalty', 'int'),
+ ('frequency_penalty', 'int'),
+ ('top_p', 'int'),
+ ('max_tokens', 'int'),
+ ]
+ param = ', '.join([': '.join(p) for p in params])
+ return f'g4f.provider.{cls.__name__} supports: ({param})'
+
+
+def _encrypt(e: str):
+ t = os.urandom(8).hex().encode('utf-8')
+ n = os.urandom(8).hex().encode('utf-8')
+ r = e.encode('utf-8')
+
+ cipher = AES.new(t, AES.MODE_CBC, n)
+ ciphertext = cipher.encrypt(_pad_data(r))
+
+ return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
+
+
+def _pad_data(data: bytes) -> bytes:
+ block_size = AES.block_size
+ padding_size = block_size - len(data) % block_size
+ padding = bytes([padding_size] * padding_size)
+
+ return data + padding
diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py
new file mode 100644
index 00000000..4bd7c5fe
--- /dev/null
+++ b/g4f/Provider/deprecated/Lockchat.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import json
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class Lockchat(BaseProvider):
+ url: str = "http://supertest.lockchat.app"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ temperature = float(kwargs.get("temperature", 0.7))
+ payload = {
+ "temperature": temperature,
+ "messages" : messages,
+ "model" : model,
+ "stream" : True,
+ }
+
+ headers = {
+ "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
+ }
+ response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
+ json=payload, headers=headers, stream=True)
+
+ response.raise_for_status()
+ for token in response.iter_lines():
+ if b"The model: `gpt-4` does not exist" in token:
+ print("error, retrying...")
+ Lockchat.create_completion(
+ model = model,
+ messages = messages,
+ stream = stream,
+ temperature = temperature,
+ **kwargs)
+
+ if b"content" in token:
+ token = json.loads(token.decode("utf-8").split("data: ")[1])
+ token = token["choices"][0]["delta"].get("content")
+ if token:
+ yield (token)
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/deprecated/Opchatgpts.py b/g4f/Provider/deprecated/Opchatgpts.py
new file mode 100644
index 00000000..3bfb96f1
--- /dev/null
+++ b/g4f/Provider/deprecated/Opchatgpts.py
@@ -0,0 +1,8 @@
+from __future__ import annotations
+
+from ..ChatgptLogin import ChatgptLogin
+
+
+class Opchatgpts(ChatgptLogin):
+ url = "https://opchatgpts.net"
+ working = True \ No newline at end of file
diff --git a/g4f/Provider/deprecated/PerplexityAi.py b/g4f/Provider/deprecated/PerplexityAi.py
new file mode 100644
index 00000000..f4f71712
--- /dev/null
+++ b/g4f/Provider/deprecated/PerplexityAi.py
@@ -0,0 +1,101 @@
+from __future__ import annotations
+
+import json
+import time
+import base64
+from curl_cffi.requests import AsyncSession
+
+from ..base_provider import AsyncProvider, format_prompt, get_cookies
+
+
+class PerplexityAi(AsyncProvider):
+ url = "https://www.perplexity.ai"
+ working = False
+ supports_gpt_35_turbo = True
+ _sources = []
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ **kwargs
+ ) -> str:
+ url = cls.url + "/socket.io/?EIO=4&transport=polling"
+ headers = {
+ "Referer": f"{cls.url}/"
+ }
+ async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session:
+ url_session = "https://www.perplexity.ai/api/auth/session"
+ response = await session.get(url_session)
+ response.raise_for_status()
+
+ url_session = "https://www.perplexity.ai/api/auth/session"
+ response = await session.get(url_session)
+ response.raise_for_status()
+
+ response = await session.get(url, params={"t": timestamp()})
+ response.raise_for_status()
+ sid = json.loads(response.text[1:])["sid"]
+
+ response = await session.get(url, params={"t": timestamp(), "sid": sid})
+ response.raise_for_status()
+
+ data = '40{"jwt":"anonymous-ask-user"}'
+ response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
+ response.raise_for_status()
+
+ response = await session.get(url, params={"t": timestamp(), "sid": sid})
+ response.raise_for_status()
+
+ data = "424" + json.dumps([
+ "perplexity_ask",
+ format_prompt(messages),
+ {
+ "version":"2.1",
+ "source":"default",
+ "language":"en",
+ "timezone": time.tzname[0],
+ "search_focus":"internet",
+ "mode":"concise"
+ }
+ ])
+ response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
+ response.raise_for_status()
+
+ while True:
+ response = await session.get(url, params={"t": timestamp(), "sid": sid})
+ response.raise_for_status()
+ for line in response.text.splitlines():
+ if line.startswith("434"):
+ result = json.loads(json.loads(line[3:])[0]["text"])
+
+ cls._sources = [{
+ "title": source["name"],
+ "url": source["url"],
+ "snippet": source["snippet"]
+ } for source in result["web_results"]]
+
+ return result["answer"]
+
+ @classmethod
+ def get_sources(cls):
+ return cls._sources
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+def timestamp() -> str:
+ return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/V50.py b/g4f/Provider/deprecated/V50.py
new file mode 100644
index 00000000..9a8b032c
--- /dev/null
+++ b/g4f/Provider/deprecated/V50.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+
+import uuid
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider
+
+
+class V50(BaseProvider):
+ url = 'https://p5.v50.ltd'
+ supports_gpt_35_turbo = True
+ supports_stream = False
+ needs_auth = False
+ working = False
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
+ conversation += "\nassistant: "
+
+ payload = {
+ "prompt" : conversation,
+ "options" : {},
+ "systemMessage" : ".",
+ "temperature" : kwargs.get("temperature", 0.4),
+ "top_p" : kwargs.get("top_p", 0.4),
+ "model" : model,
+ "user" : str(uuid.uuid4())
+ }
+
+ headers = {
+ 'authority' : 'p5.v50.ltd',
+ 'accept' : 'application/json, text/plain, */*',
+ 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://p5.v50.ltd',
+ 'referer' : 'https://p5.v50.ltd/',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
+ }
+ response = requests.post("https://p5.v50.ltd/api/chat-process",
+ json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
+
+ if "https://fk1.v50.ltd" not in response.text:
+ yield response.text
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("top_p", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Wewordle.py b/g4f/Provider/deprecated/Wewordle.py
new file mode 100644
index 00000000..c30887fb
--- /dev/null
+++ b/g4f/Provider/deprecated/Wewordle.py
@@ -0,0 +1,65 @@
+from __future__ import annotations
+
+import random, string, time
+from aiohttp import ClientSession
+
+from ..base_provider import AsyncProvider
+
+
+class Wewordle(AsyncProvider):
+ url = "https://wewordle.org"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ **kwargs
+ ) -> str:
+
+ headers = {
+ "accept" : "*/*",
+ "pragma" : "no-cache",
+ "Content-Type" : "application/json",
+ "Connection" : "keep-alive"
+ }
+
+ _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
+ _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
+ _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
+ data = {
+ "user" : _user_id,
+ "messages" : messages,
+ "subscriber": {
+ "originalPurchaseDate" : None,
+ "originalApplicationVersion" : None,
+ "allPurchaseDatesMillis" : {},
+ "entitlements" : {"active": {}, "all": {}},
+ "allPurchaseDates" : {},
+ "allExpirationDatesMillis" : {},
+ "allExpirationDates" : {},
+ "originalAppUserId" : f"$RCAnonymousID:{_app_id}",
+ "latestExpirationDate" : None,
+ "requestDate" : _request_date,
+ "latestExpirationDateMillis" : None,
+ "nonSubscriptionTransactions" : [],
+ "originalPurchaseDateMillis" : None,
+ "managementURL" : None,
+ "allPurchasedProductIdentifiers": [],
+ "firstSeen" : _request_date,
+ "activeSubscriptions" : [],
+ }
+ }
+
+
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
+ response.raise_for_status()
+ content = (await response.json())["message"]["content"]
+ if content:
+ return content \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Wuguokai.py b/g4f/Provider/deprecated/Wuguokai.py
new file mode 100644
index 00000000..311131cf
--- /dev/null
+++ b/g4f/Provider/deprecated/Wuguokai.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import random
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import BaseProvider, format_prompt
+
+
+class Wuguokai(BaseProvider):
+ url = 'https://chat.wuguokai.xyz'
+ supports_gpt_35_turbo = True
+ working = False
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = {
+ 'authority': 'ai-api.wuguokai.xyz',
+ 'accept': 'application/json, text/plain, */*',
+ 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.wuguokai.xyz',
+ 'referer': 'https://chat.wuguokai.xyz/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+ }
+ data ={
+ "prompt": format_prompt(messages),
+ "options": {},
+ "userId": f"#/chat/{random.randint(1,99999999)}",
+ "usingContext": True
+ }
+ response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
+ _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
+ if response.status_code == 200:
+ if len(_split) > 1:
+ yield _split[1].strip()
+ else:
+ yield _split[0].strip()
+ else:
+ raise Exception(f"Error: {response.status_code} {response.reason}")
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool")
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
new file mode 100644
index 00000000..8d22a3c3
--- /dev/null
+++ b/g4f/Provider/deprecated/__init__.py
@@ -0,0 +1,14 @@
+from .AiService import AiService
+from .CodeLinkAva import CodeLinkAva
+from .DfeHub import DfeHub
+from .EasyChat import EasyChat
+from .Forefront import Forefront
+from .GetGpt import GetGpt
+from .Opchatgpts import Opchatgpts
+from .Lockchat import Lockchat
+from .PerplexityAi import PerplexityAi
+from .Wewordle import Wewordle
+from .Equing import Equing
+from .Wuguokai import Wuguokai
+from .V50 import V50
+from .FastGpt import FastGpt \ No newline at end of file