summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authort.me/xtekky <98614666+xtekky@users.noreply.github.com>2023-04-30 13:13:56 +0200
committerGitHub <noreply@github.com>2023-04-30 13:13:56 +0200
commitef9127d876914813868536597926e2a7dd37bbf7 (patch)
tree2dc9e9cf81b78af88f7279146578856ec8af0768
parentfixed: bug fix on GUI (diff)
parentMerge pull request #304 from taiyi747/main (diff)
downloadgpt4free-ef9127d876914813868536597926e2a7dd37bbf7.tar
gpt4free-ef9127d876914813868536597926e2a7dd37bbf7.tar.gz
gpt4free-ef9127d876914813868536597926e2a7dd37bbf7.tar.bz2
gpt4free-ef9127d876914813868536597926e2a7dd37bbf7.tar.lz
gpt4free-ef9127d876914813868536597926e2a7dd37bbf7.tar.xz
gpt4free-ef9127d876914813868536597926e2a7dd37bbf7.tar.zst
gpt4free-ef9127d876914813868536597926e2a7dd37bbf7.zip
-rw-r--r--gpt4free/forefront/__init__.py6
-rw-r--r--gpt4free/quora/__init__.py8
-rw-r--r--gpt4free/quora/backup-mail.py37
-rw-r--r--gpt4free/theb/__init__.py11
-rw-r--r--gpt4free/you/__init__.py4
-rw-r--r--gui/query_methods.py29
-rw-r--r--gui/streamlit_chat_app.py5
-rw-r--r--testing/usesless_test.py13
-rw-r--r--unfinished/usesless/README.md23
-rw-r--r--unfinished/usesless/__init__.py51
10 files changed, 168 insertions, 19 deletions
diff --git a/gpt4free/forefront/__init__.py b/gpt4free/forefront/__init__.py
index f0ca1a15..aa78cfa7 100644
--- a/gpt4free/forefront/__init__.py
+++ b/gpt4free/forefront/__init__.py
@@ -98,12 +98,15 @@ class StreamingCompletion:
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
+ proxy=None
) -> Generator[ForeFrontResponse, None, None]:
if not token:
raise Exception('Token is required!')
if not chat_id:
chat_id = str(uuid4())
+ proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else None
+
headers = {
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
'accept': '*/*',
@@ -135,6 +138,7 @@ class StreamingCompletion:
for chunk in post(
'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
headers=headers,
+ proxies=proxies,
json=json_data,
stream=True,
).iter_lines():
@@ -169,6 +173,7 @@ class Completion:
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
+ proxy=None
) -> ForeFrontResponse:
text = ''
final_response = None
@@ -179,6 +184,7 @@ class Completion:
action_type=action_type,
default_persona=default_persona,
model=model,
+ proxy=proxy
):
if response:
final_response = response
diff --git a/gpt4free/quora/__init__.py b/gpt4free/quora/__init__.py
index f548ff41..afbfb68d 100644
--- a/gpt4free/quora/__init__.py
+++ b/gpt4free/quora/__init__.py
@@ -187,7 +187,7 @@ class Account:
enable_bot_creation: bool = False,
):
client = TLS(client_identifier='chrome110')
- client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
+ client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else {}
mail_client = Emailnator()
mail_address = mail_client.get_mail()
@@ -293,10 +293,13 @@ class StreamingCompletion:
custom_model: bool = None,
prompt: str = 'hello world',
token: str = '',
+ proxy: Optional[str] = None
) -> Generator[PoeResponse, None, None]:
_model = MODELS[model] if not custom_model else custom_model
+ proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else False
client = PoeClient(token)
+ client.proxy = proxies
for chunk in client.send_message(_model, prompt):
yield PoeResponse(
@@ -330,10 +333,13 @@ class Completion:
custom_model: str = None,
prompt: str = 'hello world',
token: str = '',
+ proxy: Optional[str] = None
) -> PoeResponse:
_model = MODELS[model] if not custom_model else custom_model
+ proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
client = PoeClient(token)
+ client.proxy = proxies
chunk = None
for response in client.send_message(_model, prompt):
diff --git a/gpt4free/quora/backup-mail.py b/gpt4free/quora/backup-mail.py
new file mode 100644
index 00000000..0a2a5e94
--- /dev/null
+++ b/gpt4free/quora/backup-mail.py
@@ -0,0 +1,37 @@
+from requests import Session
+from time import sleep
+from json import loads
+from re import findall
+class Mail:
+ def __init__(self) -> None:
+ self.client = Session()
+ self.client.post("https://etempmail.com/")
+ self.cookies = {'acceptcookie': 'true'}
+ self.cookies["ci_session"] = self.client.cookies.get_dict()["ci_session"]
+ self.email = None
+ def get_mail(self):
+ respone=self.client.post("https://etempmail.com/getEmailAddress")
+ #cookies
+ self.cookies["lisansimo"] = eval(respone.text)["recover_key"]
+ self.email = eval(respone.text)["address"]
+ return self.email
+ def get_message(self):
+ print("Waiting for message...")
+ while True:
+ sleep(5)
+ respone=self.client.post("https://etempmail.com/getInbox")
+ mail_token=loads(respone.text)
+ print(self.client.cookies.get_dict())
+ if len(mail_token) == 1:
+ break
+
+ params = {'id': '1',}
+ self.mail_context = self.client.post("https://etempmail.com/getInbox",params=params)
+ self.mail_context = eval(self.mail_context.text)[0]["body"]
+ return self.mail_context
+ #,cookies=self.cookies
+ def get_verification_code(self):
+ message = self.mail_context
+ code = findall(r';">(\d{6,7})</div>', message)[0]
+ print(f"Verification code: {code}")
+ return code \ No newline at end of file
diff --git a/gpt4free/theb/__init__.py b/gpt4free/theb/__init__.py
index 96053877..75a15068 100644
--- a/gpt4free/theb/__init__.py
+++ b/gpt4free/theb/__init__.py
@@ -2,7 +2,7 @@ from json import loads
from queue import Queue, Empty
from re import findall
from threading import Thread
-from typing import Generator
+from typing import Generator, Optional
from curl_cffi import requests
from fake_useragent import UserAgent
@@ -19,7 +19,7 @@ class Completion:
stream_completed = False
@staticmethod
- def request(prompt: str):
+ def request(prompt: str, proxy: Optional[str]=None):
headers = {
'authority': 'chatbot.theb.ai',
'content-type': 'application/json',
@@ -27,9 +27,12 @@ class Completion:
'user-agent': UserAgent().random,
}
+ proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
+
requests.post(
'https://chatbot.theb.ai/api/chat-process',
headers=headers,
+ proxies=proxies,
content_callback=Completion.handle_stream_response,
json={'prompt': prompt, 'options': {}},
)
@@ -37,8 +40,8 @@ class Completion:
Completion.stream_completed = True
@staticmethod
- def create(prompt: str) -> Generator[str, None, None]:
- Thread(target=Completion.request, args=[prompt]).start()
+ def create(prompt: str, proxy: Optional[str]=None) -> Generator[str, None, None]:
+ Thread(target=Completion.request, args=[prompt, proxy]).start()
while not Completion.stream_completed or not Completion.message_queue.empty():
try:
diff --git a/gpt4free/you/__init__.py b/gpt4free/you/__init__.py
index 97b48464..d084a842 100644
--- a/gpt4free/you/__init__.py
+++ b/gpt4free/you/__init__.py
@@ -30,12 +30,16 @@ class Completion:
include_links: bool = False,
detailed: bool = False,
debug: bool = False,
+ proxy: Optional[str] = None
) -> PoeResponse:
if chat is None:
chat = []
+ proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else {}
+
client = Session(client_identifier='chrome_108')
client.headers = Completion.__get_headers()
+ client.proxies = proxies
response = client.get(
f'https://you.com/api/streamingSearch',
diff --git a/gui/query_methods.py b/gui/query_methods.py
index 6225453b..2d6adacd 100644
--- a/gui/query_methods.py
+++ b/gui/query_methods.py
@@ -1,5 +1,6 @@
import os
import sys
+from typing import Optional
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
@@ -7,14 +8,14 @@ from gpt4free import quora, forefront, theb, you
import random
-def query_forefront(question: str) -> str:
+def query_forefront(question: str, proxy: Optional[str] = None) -> str:
# create an account
- token = forefront.Account.create(logging=False)
+ token = forefront.Account.create(logging=False, proxy=proxy)
response = ""
# get a response
try:
- return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4').text
+ return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4', proxy=proxy).text
except Exception as e:
# Return error message if an exception occurs
return (
@@ -22,16 +23,16 @@ def query_forefront(question: str) -> str:
)
-def query_quora(question: str) -> str:
- token = quora.Account.create(logging=False, enable_bot_creation=True)
- return quora.Completion.create(model='gpt-4', prompt=question, token=token).text
+def query_quora(question: str, proxy: Optional[str] = None) -> str:
+ token = quora.Account.create(logging=False, enable_bot_creation=True, proxy=proxy)
+ return quora.Completion.create(model='gpt-4', prompt=question, token=token, proxy=proxy).text
-def query_theb(question: str) -> str:
+def query_theb(question: str, proxy: Optional[str] = None) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model
response = ""
try:
- return ''.join(theb.Completion.create(prompt=question))
+ return ''.join(theb.Completion.create(prompt=question, proxy=proxy))
except Exception as e:
# Return error message if an exception occurs
@@ -40,11 +41,11 @@ def query_theb(question: str) -> str:
)
-def query_you(question: str) -> str:
+def query_you(question: str, proxy: Optional[str] = None) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model
try:
- result = you.Completion.create(prompt=question)
- return result["response"]
+ result = you.Completion.create(prompt=question, proxy=proxy)
+ return result.text
except Exception as e:
# Return error message if an exception occurs
@@ -66,11 +67,11 @@ avail_query_methods = {
}
-def query(user_input: str, selected_method: str = "Random") -> str:
+def query(user_input: str, selected_method: str = "Random", proxy: Optional[str] = None) -> str:
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
if selected_method != "Random" and selected_method in avail_query_methods:
try:
- return avail_query_methods[selected_method](user_input)
+ return avail_query_methods[selected_method](user_input, proxy=proxy)
except Exception as e:
print(f"Error with {selected_method}: {e}")
return "😵 Sorry, some error occurred please try again."
@@ -89,7 +90,7 @@ def query(user_input: str, selected_method: str = "Random") -> str:
chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
try:
# Try to call the chosen method with the user input
- result = chosen_query(user_input)
+ result = chosen_query(user_input, proxy=proxy)
success = True
except Exception as e:
print(f"Error with {chosen_query_name}: {e}")
diff --git a/gui/streamlit_chat_app.py b/gui/streamlit_chat_app.py
index cd8c652e..fc5c8d8e 100644
--- a/gui/streamlit_chat_app.py
+++ b/gui/streamlit_chat_app.py
@@ -75,8 +75,10 @@ user_input = input_placeholder.text_input(
)
submit_button = st.button("Submit")
+
if (user_input and user_input != st.session_state['input_text']) or submit_button:
output = query(user_input, st.session_state['query_method'])
+
escaped_output = output.encode('utf-8').decode('unicode-escape')
st.session_state.current_conversation['user_inputs'].append(user_input)
@@ -95,6 +97,9 @@ if st.sidebar.button("New Conversation"):
st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
+# Proxy
+st.session_state['proxy'] = st.sidebar.text_input("Proxy: ")
+
# Sidebar
st.sidebar.header("Conversation History")
diff --git a/testing/usesless_test.py b/testing/usesless_test.py
new file mode 100644
index 00000000..e2e35547
--- /dev/null
+++ b/testing/usesless_test.py
@@ -0,0 +1,13 @@
+import usesless
+
+question1 = "Who won the world series in 2020?"
+req = usesless.Completion.create(prompt=question1)
+answer = req["text"]
+message_id = req["parentMessageId"]
+
+question2 = "Where was it played?"
+req2 = usesless.Completion.create(prompt=question2, parentMessageId=message_id)
+answer2 = req2["text"]
+
+print(answer)
+print(answer2)
diff --git a/unfinished/usesless/README.md b/unfinished/usesless/README.md
new file mode 100644
index 00000000..13e9df8c
--- /dev/null
+++ b/unfinished/usesless/README.md
@@ -0,0 +1,23 @@
+ai.usesless.com
+
+to do:
+
+- use random user agent in header
+- make the code better I guess (?)
+
+### Example: `usesless` <a name="example-usesless"></a>
+
+```python
+import usesless
+
+message_id = ""
+while True:
+ prompt = input("Question: ")
+ if prompt == "!stop":
+ break
+
+ req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id)
+
+ print(f"Answer: {req['text']}")
+ message_id = req["id"]
+```
diff --git a/unfinished/usesless/__init__.py b/unfinished/usesless/__init__.py
new file mode 100644
index 00000000..6f9a47ef
--- /dev/null
+++ b/unfinished/usesless/__init__.py
@@ -0,0 +1,51 @@
+import requests
+import json
+
+
+class Completion:
+ headers = {
+ "authority": "ai.usesless.com",
+ "accept": "application/json, text/plain, */*",
+ "accept-language": "en-US,en;q=0.5",
+ "cache-control": "no-cache",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0",
+ }
+
+ @staticmethod
+ def create(
+ systemMessage: str = "You are a helpful assistant",
+ prompt: str = "",
+ parentMessageId: str = "",
+ presence_penalty: float = 1,
+ temperature: float = 1,
+ model: str = "gpt-3.5-turbo",
+ ):
+ json_data = {
+ "openaiKey": "",
+ "prompt": prompt,
+ "options": {
+ "parentMessageId": parentMessageId,
+ "systemMessage": systemMessage,
+ "completionParams": {
+ "presence_penalty": presence_penalty,
+ "temperature": temperature,
+ "model": model,
+ },
+ },
+ }
+
+ url = "https://ai.usesless.com/api/chat-process"
+ request = requests.post(url, headers=Completion.headers, json=json_data)
+ content = request.content
+ response = Completion.__response_to_json(content)
+ return response
+
+ @classmethod
+ def __response_to_json(cls, text) -> dict:
+ text = str(text.decode("utf-8"))
+ split_text = text.rsplit("\n", 1)[1]
+ to_json = json.loads(split_text)
+ return to_json