summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorhp256 <971748116@qq.com>2023-05-23 11:03:26 +0200
committerhp256 <971748116@qq.com>2023-05-23 11:03:26 +0200
commitf545f4b479d3d8732828f758efaac20152b4c104 (patch)
tree2c3ae6ce2d7c16c3c00010e72a3d9c8cb54c832e
parentadd hpgptai (diff)
downloadgpt4free-f545f4b479d3d8732828f758efaac20152b4c104.tar
gpt4free-f545f4b479d3d8732828f758efaac20152b4c104.tar.gz
gpt4free-f545f4b479d3d8732828f758efaac20152b4c104.tar.bz2
gpt4free-f545f4b479d3d8732828f758efaac20152b4c104.tar.lz
gpt4free-f545f4b479d3d8732828f758efaac20152b4c104.tar.xz
gpt4free-f545f4b479d3d8732828f758efaac20152b4c104.tar.zst
gpt4free-f545f4b479d3d8732828f758efaac20152b4c104.zip
-rw-r--r--gpt4free/gptworldAi/README.md25
-rw-r--r--gpt4free/gptworldAi/__init__.py103
-rw-r--r--testing/gptworldai_test.py18
3 files changed, 146 insertions, 0 deletions
diff --git a/gpt4free/gptworldAi/README.md b/gpt4free/gptworldAi/README.md
new file mode 100644
index 00000000..a6b07f86
--- /dev/null
+++ b/gpt4free/gptworldAi/README.md
@@ -0,0 +1,25 @@
+# gptworldAi
+Written by [hp_mzx](https://github.com/hpsj).
+
+## Examples:
+### Completion:
+```python
+for chunk in gptworldAi.Completion.create("你是谁", "127.0.0.1:7890"):
+ print(chunk, end="", flush=True)
+ print()
+```
+
+### Chat Completion:
+Support context
+```python
+message = []
+while True:
+ prompt = input("请输入问题:")
+ message.append({"role": "user","content": prompt})
+ text = ""
+ for chunk in gptworldAi.ChatCompletion.create(message,'127.0.0.1:7890'):
+ text = text+chunk
+ print(chunk, end="", flush=True)
+ print()
+ message.append({"role": "assistant", "content": text})
+``` \ No newline at end of file
diff --git a/gpt4free/gptworldAi/__init__.py b/gpt4free/gptworldAi/__init__.py
new file mode 100644
index 00000000..a729fdf8
--- /dev/null
+++ b/gpt4free/gptworldAi/__init__.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+"""
+@Time : 2023/5/23 13:37
+@Auth : Hp_mzx
+@File :__init__.py.py
+@IDE :PyCharm
+"""
+import json
+import random
+import binascii
+import requests
+import Crypto.Cipher.AES as AES
+from fake_useragent import UserAgent
+
+class ChatCompletion:
+ @staticmethod
+ def create(messages:[],proxy: str = None):
+ url = "https://chat.getgpt.world/api/chat/stream"
+ headers = {
+ "Content-Type": "application/json",
+ "Referer": "https://chat.getgpt.world/",
+ 'user-agent': UserAgent().random,
+ }
+ proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
+ data = json.dumps({
+ "messages": messages,
+ "frequency_penalty": 0,
+ "max_tokens": 4000,
+ "model": "gpt-3.5-turbo",
+ "presence_penalty": 0,
+ "temperature": 1,
+ "top_p": 1,
+ "stream": True
+ })
+ signature = ChatCompletion.encrypt(data)
+ res = requests.post(url, headers=headers, data=json.dumps({"signature": signature}), proxies=proxies,stream=True)
+ for chunk in res.iter_content(chunk_size=None):
+ res.raise_for_status()
+ datas = chunk.decode('utf-8').split('data: ')
+ for data in datas:
+ if not data or "[DONE]" in data:
+ continue
+ data_json = json.loads(data)
+ content = data_json['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+
+
+ @staticmethod
+ def random_token(e):
+ token = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+ n = len(token)
+ return "".join([token[random.randint(0, n - 1)] for i in range(e)])
+
+ @staticmethod
+ def encrypt(e):
+ t = ChatCompletion.random_token(16).encode('utf-8')
+ n = ChatCompletion.random_token(16).encode('utf-8')
+ r = e.encode('utf-8')
+ cipher = AES.new(t, AES.MODE_CBC, n)
+ ciphertext = cipher.encrypt(ChatCompletion.__pad_data(r))
+ return binascii.hexlify(ciphertext).decode('utf-8') + t.decode('utf-8') + n.decode('utf-8')
+
+ @staticmethod
+ def __pad_data(data: bytes) -> bytes:
+ block_size = AES.block_size
+ padding_size = block_size - len(data) % block_size
+ padding = bytes([padding_size] * padding_size)
+ return data + padding
+
+
+class Completion:
+ @staticmethod
+ def create(prompt:str,proxy:str=None):
+ return ChatCompletion.create([
+ {
+ "content": "You are ChatGPT, a large language model trained by OpenAI.\nCarefully heed the user's instructions. \nRespond using Markdown.",
+ "role": "system"
+ },
+ {"role": "user", "content": prompt}
+ ], proxy)
+
+
+if __name__ == '__main__':
+ # single completion
+ text = ""
+ for chunk in Completion.create("你是谁", "127.0.0.1:7890"):
+ text = text + chunk
+ print(chunk, end="", flush=True)
+ print()
+
+
+ #chat completion
+ message = []
+ while True:
+ prompt = input("请输入问题:")
+ message.append({"role": "user","content": prompt})
+ text = ""
+ for chunk in ChatCompletion.create(message,'127.0.0.1:7890'):
+ text = text+chunk
+ print(chunk, end="", flush=True)
+ print()
+ message.append({"role": "assistant", "content": text}) \ No newline at end of file
diff --git a/testing/gptworldai_test.py b/testing/gptworldai_test.py
new file mode 100644
index 00000000..3dfb32ce
--- /dev/null
+++ b/testing/gptworldai_test.py
@@ -0,0 +1,18 @@
+import gptworldAi
+
+# single completion
+for chunk in gptworldAi.Completion.create("你是谁", "127.0.0.1:7890"):
+ print(chunk, end="", flush=True)
+print()
+
+# chat completion
+message = []
+while True:
+ prompt = input("请输入问题:")
+ message.append({"role": "user", "content": prompt})
+ text = ""
+ for chunk in gptworldAi.ChatCompletion.create(message, '127.0.0.1:7890'):
+ text = text + chunk
+ print(chunk, end="", flush=True)
+ print()
+ message.append({"role": "assistant", "content": text})