summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorxtekky <98614666+xtekky@users.noreply.github.com>2023-05-24 18:30:41 +0200
committerGitHub <noreply@github.com>2023-05-24 18:30:41 +0200
commit362f9877cedd92681dec32997f9ea555d0f8d228 (patch)
tree2c53994b4f4796067e458d0fea0786270b129cde
parentMerge pull request #590 from ezerinz/main (diff)
parentadd gptworldai (diff)
downloadgpt4free-362f9877cedd92681dec32997f9ea555d0f8d228.tar
gpt4free-362f9877cedd92681dec32997f9ea555d0f8d228.tar.gz
gpt4free-362f9877cedd92681dec32997f9ea555d0f8d228.tar.bz2
gpt4free-362f9877cedd92681dec32997f9ea555d0f8d228.tar.lz
gpt4free-362f9877cedd92681dec32997f9ea555d0f8d228.tar.xz
gpt4free-362f9877cedd92681dec32997f9ea555d0f8d228.tar.zst
gpt4free-362f9877cedd92681dec32997f9ea555d0f8d228.zip
-rw-r--r--gpt4free/gptworldAi/README.md25
-rw-r--r--gpt4free/gptworldAi/__init__.py103
-rw-r--r--gpt4free/hpgptai/README.md39
-rw-r--r--gpt4free/hpgptai/__init__.py84
-rw-r--r--testing/gptworldai_test.py18
-rw-r--r--testing/hpgptai_test.py41
6 files changed, 310 insertions, 0 deletions
diff --git a/gpt4free/gptworldAi/README.md b/gpt4free/gptworldAi/README.md
new file mode 100644
index 00000000..a6b07f86
--- /dev/null
+++ b/gpt4free/gptworldAi/README.md
@@ -0,0 +1,25 @@
+# gptworldAi
+Written by [hp_mzx](https://github.com/hpsj).
+
+## Examples:
+### Completion:
+```python
+for chunk in gptworldAi.Completion.create("你是谁", "127.0.0.1:7890"):
+ print(chunk, end="", flush=True)
+ print()
+```
+
+### Chat Completion:
+Support context
+```python
+message = []
+while True:
+ prompt = input("请输入问题:")
+ message.append({"role": "user","content": prompt})
+ text = ""
+ for chunk in gptworldAi.ChatCompletion.create(message,'127.0.0.1:7890'):
+ text = text+chunk
+ print(chunk, end="", flush=True)
+ print()
+ message.append({"role": "assistant", "content": text})
+``` \ No newline at end of file
diff --git a/gpt4free/gptworldAi/__init__.py b/gpt4free/gptworldAi/__init__.py
new file mode 100644
index 00000000..a729fdf8
--- /dev/null
+++ b/gpt4free/gptworldAi/__init__.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+"""
+@Time : 2023/5/23 13:37
+@Auth : Hp_mzx
+@File :__init__.py.py
+@IDE :PyCharm
+"""
+import json
+import random
+import binascii
+import requests
+import Crypto.Cipher.AES as AES
+from fake_useragent import UserAgent
+
+class ChatCompletion:
+ @staticmethod
+ def create(messages:[],proxy: str = None):
+ url = "https://chat.getgpt.world/api/chat/stream"
+ headers = {
+ "Content-Type": "application/json",
+ "Referer": "https://chat.getgpt.world/",
+ 'user-agent': UserAgent().random,
+ }
+ proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
+ data = json.dumps({
+ "messages": messages,
+ "frequency_penalty": 0,
+ "max_tokens": 4000,
+ "model": "gpt-3.5-turbo",
+ "presence_penalty": 0,
+ "temperature": 1,
+ "top_p": 1,
+ "stream": True
+ })
+ signature = ChatCompletion.encrypt(data)
+ res = requests.post(url, headers=headers, data=json.dumps({"signature": signature}), proxies=proxies,stream=True)
+ for chunk in res.iter_content(chunk_size=None):
+ res.raise_for_status()
+ datas = chunk.decode('utf-8').split('data: ')
+ for data in datas:
+ if not data or "[DONE]" in data:
+ continue
+ data_json = json.loads(data)
+ content = data_json['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+
+
+ @staticmethod
+ def random_token(e):
+ token = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+ n = len(token)
+ return "".join([token[random.randint(0, n - 1)] for i in range(e)])
+
+ @staticmethod
+ def encrypt(e):
+ t = ChatCompletion.random_token(16).encode('utf-8')
+ n = ChatCompletion.random_token(16).encode('utf-8')
+ r = e.encode('utf-8')
+ cipher = AES.new(t, AES.MODE_CBC, n)
+ ciphertext = cipher.encrypt(ChatCompletion.__pad_data(r))
+ return binascii.hexlify(ciphertext).decode('utf-8') + t.decode('utf-8') + n.decode('utf-8')
+
+ @staticmethod
+ def __pad_data(data: bytes) -> bytes:
+ block_size = AES.block_size
+ padding_size = block_size - len(data) % block_size
+ padding = bytes([padding_size] * padding_size)
+ return data + padding
+
+
+class Completion:
+ @staticmethod
+ def create(prompt:str,proxy:str=None):
+ return ChatCompletion.create([
+ {
+ "content": "You are ChatGPT, a large language model trained by OpenAI.\nCarefully heed the user's instructions. \nRespond using Markdown.",
+ "role": "system"
+ },
+ {"role": "user", "content": prompt}
+ ], proxy)
+
+
+if __name__ == '__main__':
+ # single completion
+ text = ""
+ for chunk in Completion.create("你是谁", "127.0.0.1:7890"):
+ text = text + chunk
+ print(chunk, end="", flush=True)
+ print()
+
+
+ #chat completion
+ message = []
+ while True:
+ prompt = input("请输入问题:")
+ message.append({"role": "user","content": prompt})
+ text = ""
+ for chunk in ChatCompletion.create(message,'127.0.0.1:7890'):
+ text = text+chunk
+ print(chunk, end="", flush=True)
+ print()
+ message.append({"role": "assistant", "content": text}) \ No newline at end of file
diff --git a/gpt4free/hpgptai/README.md b/gpt4free/hpgptai/README.md
new file mode 100644
index 00000000..2735902f
--- /dev/null
+++ b/gpt4free/hpgptai/README.md
@@ -0,0 +1,39 @@
+# HpgptAI
+Written by [hp_mzx](https://github.com/hpsj).
+
+## Examples:
+### Completion:
+```python
+res = hpgptai.Completion.create("你是谁","127.0.0.1:7890")
+print(res["reply"])
+```
+
+### Chat Completion:
+Support context
+```python
+messages = [
+ {
+ "content": "你是谁",
+ "html": "你是谁",
+ "id": hpgptai.ChatCompletion.randomStr(),
+ "role": "user",
+ "who": "User: ",
+ },
+ {
+ "content": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
+ "html": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
+ "id": hpgptai.ChatCompletion.randomStr(),
+ "role": "assistant",
+ "who": "AI: ",
+ },
+ {
+ "content": "我上一句问的是什么?",
+ "html": "我上一句问的是什么?",
+ "id": hpgptai.ChatCompletion.randomStr(),
+ "role": "user",
+ "who": "User: ",
+ },
+]
+res = hpgptai.ChatCompletion.create(messages,proxy="127.0.0.1:7890")
+print(res["reply"])
+``` \ No newline at end of file
diff --git a/gpt4free/hpgptai/__init__.py b/gpt4free/hpgptai/__init__.py
new file mode 100644
index 00000000..c8772a19
--- /dev/null
+++ b/gpt4free/hpgptai/__init__.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+"""
+@Time : 2023/5/22 14:04
+@Auth : Hp_mzx
+@File :__init__.py.py
+@IDE :PyCharm
+"""
+import json
+import requests
+import random
+import string
+
+class ChatCompletion:
+ @staticmethod
+ def create(
+ messages: list,
+ context: str="Converse as if you were an AI assistant. Be friendly, creative.",
+ restNonce:str="9d6d743bd3",
+ proxy:str=None
+ ):
+ url = "https://chatgptlogin.ac/wp-json/ai-chatbot/v1/chat"
+ headers = {
+ "Content-Type": "application/json",
+ "X-Wp-Nonce": restNonce
+ }
+ proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
+ data = {
+ "env": "chatbot",
+ "session": "N/A",
+ "prompt": ChatCompletion.__build_prompt(context,messages),
+ "context": context,
+ "messages": messages,
+ "newMessage": messages[-1]["content"],
+ "userName": "<div class=\"mwai-name-text\">User:</div>",
+ "aiName": "<div class=\"mwai-name-text\">AI:</div>",
+ "model": "gpt-3.5-turbo",
+ "temperature": 0.8,
+ "maxTokens": 1024,
+ "maxResults": 1,
+ "apiKey": "",
+ "service": "openai",
+ "embeddingsIndex": "",
+ "stop": "",
+ "clientId": ChatCompletion.randomStr(),
+ }
+ res = requests.post(url=url, data=json.dumps(data), headers=headers, proxies=proxies)
+ if res.status_code == 200:
+ return res.json()
+ return res.text
+
+
+ @staticmethod
+ def randomStr():
+ return ''.join(random.choices(string.ascii_lowercase + string.digits, k=34))[:11]
+
+ @classmethod
+ def __build_prompt(cls, context: str, message: list, isCasuallyFineTuned=False, last=15):
+ prompt = context + '\n\n' if context else ''
+ message = message[-last:]
+ if isCasuallyFineTuned:
+ lastLine = message[-1]
+ prompt = lastLine.content + ""
+ return prompt
+ conversation = [x["who"] + x["content"] for x in message]
+ prompt += '\n'.join(conversation)
+ prompt += '\n' + "AI: "
+ return prompt
+
+
+
+
+class Completion:
+ @staticmethod
+ def create(prompt: str,proxy:str):
+ messages = [
+ {
+ "content": prompt,
+ "html": prompt,
+ "id": ChatCompletion.randomStr(),
+ "role": "user",
+ "who": "User: ",
+ },
+ ]
+ return ChatCompletion.create(messages=messages,proxy=proxy) \ No newline at end of file
diff --git a/testing/gptworldai_test.py b/testing/gptworldai_test.py
new file mode 100644
index 00000000..3dfb32ce
--- /dev/null
+++ b/testing/gptworldai_test.py
@@ -0,0 +1,18 @@
+import gptworldAi
+
+# single completion
+for chunk in gptworldAi.Completion.create("你是谁", "127.0.0.1:7890"):
+ print(chunk, end="", flush=True)
+print()
+
+# chat completion
+message = []
+while True:
+ prompt = input("请输入问题:")
+ message.append({"role": "user", "content": prompt})
+ text = ""
+ for chunk in gptworldAi.ChatCompletion.create(message, '127.0.0.1:7890'):
+ text = text + chunk
+ print(chunk, end="", flush=True)
+ print()
+ message.append({"role": "assistant", "content": text})
diff --git a/testing/hpgptai_test.py b/testing/hpgptai_test.py
new file mode 100644
index 00000000..cdd146dd
--- /dev/null
+++ b/testing/hpgptai_test.py
@@ -0,0 +1,41 @@
+import hpgptai
+
+#single completion
+res = hpgptai.Completion.create("你是谁","127.0.0.1:7890")
+print(res["reply"])
+
+
+#chat completion
+messages = [
+ {
+ "content": "你是谁",
+ "html": "你是谁",
+ "id": hpgptai.ChatCompletion.randomStr(),
+ "role": "user",
+ "who": "User: ",
+ },
+ {
+ "content": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
+ "html": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
+ "id": hpgptai.ChatCompletion.randomStr(),
+ "role": "assistant",
+ "who": "AI: ",
+ },
+ {
+ "content": "我上一句问的是什么?",
+ "html": "我上一句问的是什么?",
+ "id": hpgptai.ChatCompletion.randomStr(),
+ "role": "user",
+ "who": "User: ",
+ },
+]
+res = hpgptai.ChatCompletion.create(messages,proxy="127.0.0.1:7890")
+print(res["reply"])
+
+
+
+
+
+
+
+