summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authort.me/xtekky <98614666+xtekky@users.noreply.github.com>2023-04-27 17:28:36 +0200
committerGitHub <noreply@github.com>2023-04-27 17:28:36 +0200
commitbbb4d69a933ff90e33e072ecba32519db7a22612 (patch)
tree6fec52831117ee43fe7141a153b7e123ff09d469
parentMerge pull request #201 from DanielShemesh/patch-4 (diff)
parentReformat code using PyCharm (diff)
downloadgpt4free-bbb4d69a933ff90e33e072ecba32519db7a22612.tar
gpt4free-bbb4d69a933ff90e33e072ecba32519db7a22612.tar.gz
gpt4free-bbb4d69a933ff90e33e072ecba32519db7a22612.tar.bz2
gpt4free-bbb4d69a933ff90e33e072ecba32519db7a22612.tar.lz
gpt4free-bbb4d69a933ff90e33e072ecba32519db7a22612.tar.xz
gpt4free-bbb4d69a933ff90e33e072ecba32519db7a22612.tar.zst
gpt4free-bbb4d69a933ff90e33e072ecba32519db7a22612.zip
-rw-r--r--forefront/__init__.py185
-rw-r--r--forefront/mail.py25
-rw-r--r--forefront/typing.py37
-rw-r--r--phind/__init__.py226
-rw-r--r--quora/__init__.py56
-rw-r--r--quora/api.py62
-rw-r--r--testing/forefront_test.py7
-rw-r--r--testing/phind_test.py30
-rw-r--r--testing/poe_account_create_test.py88
-rw-r--r--testing/poe_test.py14
-rw-r--r--testing/quora_test_2.py17
-rw-r--r--testing/sqlchat_test.py7
-rw-r--r--testing/t3nsor_test.py5
-rw-r--r--testing/writesonic_test.py34
-rw-r--r--unfinished/bard/__init__.py31
-rw-r--r--unfinished/bard/typings.py2
-rw-r--r--unfinished/bing/__ini__.py39
-rw-r--r--unfinished/cocalc/__init__.py1
-rw-r--r--unfinished/cocalc/cocalc_test.py5
-rw-r--r--unfinished/easyai/main.py5
-rw-r--r--unfinished/gptbz/__init__.py4
-rw-r--r--unfinished/openai/__ini__.py3
-rw-r--r--unfinished/openaihosted/__init__.py5
-rw-r--r--unfinished/openprompt/create.py20
-rw-r--r--unfinished/openprompt/mail.py6
-rw-r--r--unfinished/openprompt/main.py5
-rw-r--r--unfinished/openprompt/test.py5
-rw-r--r--unfinished/t3nsor/__init__.py131
-rw-r--r--unfinished/test.py8
-rw-r--r--unfinished/theb.ai/__init__.py45
-rw-r--r--unfinished/vercelai/v2.py6
-rw-r--r--unfinished/writesonic/__init__.py195
-rw-r--r--you/__init__.py26
33 files changed, 672 insertions, 663 deletions
diff --git a/forefront/__init__.py b/forefront/__init__.py
index 9899d991..44f5d44d 100644
--- a/forefront/__init__.py
+++ b/forefront/__init__.py
@@ -1,51 +1,54 @@
-from tls_client import Session
-from forefront.mail import Mail
-from time import time, sleep
+from json import loads
from re import match
-from forefront.typing import ForeFrontResponse
+from time import time, sleep
from uuid import uuid4
+
from requests import post
-from json import loads
+from tls_client import Session
+
+from forefront.mail import Mail
+from forefront.typing import ForeFrontResponse
class Account:
@staticmethod
- def create(proxy = None, logging = False):
-
+ def create(proxy=None, logging=False):
+
proxies = {
'http': 'http://' + proxy,
- 'https': 'http://' + proxy } if proxy else False
-
+ 'https': 'http://' + proxy} if proxy else False
+
start = time()
- mail = Mail(proxies)
- mail_token = None
- mail_adress = mail.get_mail()
-
- #print(mail_adress)
-
- client = Session(client_identifier='chrome110')
+ mail = Mail(proxies)
+ mail_token = None
+ mail_adress = mail.get_mail()
+
+ # print(mail_adress)
+
+ client = Session(client_identifier='chrome110')
client.proxies = proxies
client.headers = {
"origin": "https://accounts.forefront.ai",
- "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
}
-
- response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
- data = {
- "email_address": mail_adress
- }
- )
-
+
+ response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
+ data={
+ "email_address": mail_adress
+ }
+ )
+
trace_token = response.json()['response']['id']
if logging: print(trace_token)
- response = client.post(f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
- data = {
- "strategy" : "email_code",
+ response = client.post(
+ f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
+ data={
+ "strategy": "email_code",
}
- )
-
+ )
+
if logging: print(response.text)
if not 'sign_up_attempt' in response.text:
@@ -59,89 +62,91 @@ class Account:
if mail_token:
break
-
+
if logging: print(mail_token)
-
- response = client.post(f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4', data = {
- 'code': mail_token,
- 'strategy': 'email_code'
- })
-
+
+ response = client.post(
+ f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
+ data={
+ 'code': mail_token,
+ 'strategy': 'email_code'
+ })
+
if logging: print(response.json())
-
- token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
-
+
+ token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
+
with open('accounts.txt', 'a') as f:
f.write(f'{mail_adress}:{token}\n')
-
+
if logging: print(time() - start)
-
+
return token
class StreamingCompletion:
@staticmethod
def create(
- token = None,
- chatId = None,
- prompt = '',
- actionType = 'new',
- defaultPersona = '607e41fe-95be-497e-8e97-010a59b2e2c0', # default
- model = 'gpt-4') -> ForeFrontResponse:
-
+ token=None,
+ chatId=None,
+ prompt='',
+ actionType='new',
+ defaultPersona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
+ model='gpt-4') -> ForeFrontResponse:
+
if not token: raise Exception('Token is required!')
if not chatId: chatId = str(uuid4())
-
+
headers = {
- 'authority' : 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
- 'accept' : '*/*',
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'authorization' : 'Bearer ' + token,
- 'cache-control' : 'no-cache',
- 'content-type' : 'application/json',
- 'origin' : 'https://chat.forefront.ai',
- 'pragma' : 'no-cache',
- 'referer' : 'https://chat.forefront.ai/',
- 'sec-ch-ua' : '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
- 'sec-ch-ua-mobile' : '?0',
+ 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': 'Bearer ' + token,
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.forefront.ai',
+ 'pragma': 'no-cache',
+ 'referer': 'https://chat.forefront.ai/',
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest' : 'empty',
- 'sec-fetch-mode' : 'cors',
- 'sec-fetch-site' : 'cross-site',
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
json_data = {
- 'text' : prompt,
- 'action' : actionType,
- 'parentId' : chatId,
- 'workspaceId' : chatId,
- 'messagePersona' : defaultPersona,
- 'model' : model
+ 'text': prompt,
+ 'action': actionType,
+ 'parentId': chatId,
+ 'workspaceId': chatId,
+ 'messagePersona': defaultPersona,
+ 'model': model
}
for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
- headers=headers, json=json_data, stream=True).iter_lines():
-
+ headers=headers, json=json_data, stream=True).iter_lines():
+
if b'finish_reason":null' in chunk:
- data = loads(chunk.decode('utf-8').split('data: ')[1])
+ data = loads(chunk.decode('utf-8').split('data: ')[1])
token = data['choices'][0]['delta'].get('content')
-
+
if token != None:
yield ForeFrontResponse({
- 'id' : chatId,
- 'object' : 'text_completion',
- 'created': int(time()),
- 'model' : model,
- 'choices': [{
- 'text' : token,
- 'index' : 0,
- 'logprobs' : None,
- 'finish_reason' : 'stop'
- }],
- 'usage': {
- 'prompt_tokens' : len(prompt),
- 'completion_tokens' : len(token),
- 'total_tokens' : len(prompt) + len(token)
- }
- }) \ No newline at end of file
+ 'id': chatId,
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'model': model,
+ 'choices': [{
+ 'text': token,
+ 'index': 0,
+ 'logprobs': None,
+ 'finish_reason': 'stop'
+ }],
+ 'usage': {
+ 'prompt_tokens': len(prompt),
+ 'completion_tokens': len(token),
+ 'total_tokens': len(prompt) + len(token)
+ }
+ })
diff --git a/forefront/mail.py b/forefront/mail.py
index 64694e74..41c2a647 100644
--- a/forefront/mail.py
+++ b/forefront/mail.py
@@ -1,6 +1,8 @@
-from requests import Session
-from string import ascii_letters
from random import choices
+from string import ascii_letters
+
+from requests import Session
+
class Mail:
def __init__(self, proxies: dict = None) -> None:
@@ -23,27 +25,27 @@ class Mail:
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-GB,en-US;q=0.9,en;q=0.8"
}
-
+
def get_mail(self) -> str:
token = ''.join(choices(ascii_letters, k=14)).lower()
- init = self.client.post("https://api.mail.tm/accounts", json={
- "address" : f"{token}@bugfoo.com",
+ init = self.client.post("https://api.mail.tm/accounts", json={
+ "address": f"{token}@bugfoo.com",
"password": token
})
-
+
if init.status_code == 201:
- resp = self.client.post("https://api.mail.tm/token", json = {
+ resp = self.client.post("https://api.mail.tm/token", json={
**init.json(),
"password": token
})
-
+
self.client.headers['authorization'] = 'Bearer ' + resp.json()['token']
-
+
return f"{token}@bugfoo.com"
-
+
else:
raise Exception("Failed to create email")
-
+
def fetch_inbox(self):
return self.client.get(f"https://api.mail.tm/messages").json()["hydra:member"]
@@ -52,4 +54,3 @@ class Mail:
def get_message_content(self, message_id: str):
return self.get_message(message_id)["text"]
-
diff --git a/forefront/typing.py b/forefront/typing.py
index 0fff6b18..a11ac49f 100644
--- a/forefront/typing.py
+++ b/forefront/typing.py
@@ -2,12 +2,12 @@ class ForeFrontResponse:
class Completion:
class Choices:
def __init__(self, choice: dict) -> None:
- self.text = choice['text']
- self.content = self.text.encode()
- self.index = choice['index']
- self.logprobs = choice['logprobs']
- self.finish_reason = choice['finish_reason']
-
+ self.text = choice['text']
+ self.content = self.text.encode()
+ self.index = choice['index']
+ self.logprobs = choice['logprobs']
+ self.finish_reason = choice['finish_reason']
+
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
@@ -16,22 +16,21 @@ class ForeFrontResponse:
class Usage:
def __init__(self, usage_dict: dict) -> None:
- self.prompt_tokens = usage_dict['prompt_tokens']
- self.completion_tokens = usage_dict['completion_tokens']
- self.total_tokens = usage_dict['total_tokens']
+ self.prompt_tokens = usage_dict['prompt_tokens']
+ self.completion_tokens = usage_dict['completion_tokens']
+ self.total_tokens = usage_dict['total_tokens']
def __repr__(self):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
-
+
def __init__(self, response_dict: dict) -> None:
-
- self.response_dict = response_dict
- self.id = response_dict['id']
- self.object = response_dict['object']
- self.created = response_dict['created']
- self.model = response_dict['model']
- self.completion = self.Completion(response_dict['choices'])
- self.usage = self.Usage(response_dict['usage'])
+ self.response_dict = response_dict
+ self.id = response_dict['id']
+ self.object = response_dict['object']
+ self.created = response_dict['created']
+ self.model = response_dict['model']
+ self.completion = self.Completion(response_dict['choices'])
+ self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
- return self.response_dict \ No newline at end of file
+ return self.response_dict
diff --git a/phind/__init__.py b/phind/__init__.py
index e7009d67..863360cb 100644
--- a/phind/__init__.py
+++ b/phind/__init__.py
@@ -1,27 +1,25 @@
+from datetime import datetime
+from queue import Queue, Empty
+from threading import Thread
+from time import time
from urllib.parse import quote
-from time import time
-from datetime import datetime
-from queue import Queue, Empty
-from threading import Thread
-from re import findall
from curl_cffi.requests import post
cf_clearance = ''
-user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+
class PhindResponse:
-
class Completion:
-
class Choices:
def __init__(self, choice: dict) -> None:
- self.text = choice['text']
- self.content = self.text.encode()
- self.index = choice['index']
- self.logprobs = choice['logprobs']
- self.finish_reason = choice['finish_reason']
-
+ self.text = choice['text']
+ self.content = self.text.encode()
+ self.index = choice['index']
+ self.logprobs = choice['logprobs']
+ self.finish_reason = choice['finish_reason']
+
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
@@ -30,34 +28,33 @@ class PhindResponse:
class Usage:
def __init__(self, usage_dict: dict) -> None:
- self.prompt_tokens = usage_dict['prompt_tokens']
- self.completion_tokens = usage_dict['completion_tokens']
- self.total_tokens = usage_dict['total_tokens']
+ self.prompt_tokens = usage_dict['prompt_tokens']
+ self.completion_tokens = usage_dict['completion_tokens']
+ self.total_tokens = usage_dict['total_tokens']
def __repr__(self):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
-
+
def __init__(self, response_dict: dict) -> None:
-
- self.response_dict = response_dict
- self.id = response_dict['id']
- self.object = response_dict['object']
- self.created = response_dict['created']
- self.model = response_dict['model']
- self.completion = self.Completion(response_dict['choices'])
- self.usage = self.Usage(response_dict['usage'])
+ self.response_dict = response_dict
+ self.id = response_dict['id']
+ self.object = response_dict['object']
+ self.created = response_dict['created']
+ self.model = response_dict['model']
+ self.completion = self.Completion(response_dict['choices'])
+ self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict
class Search:
- def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
+ def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
if user_agent == '':
raise ValueError('user_agent must be set, refer to documentation')
- if cf_clearance == '' :
+ if cf_clearance == '':
raise ValueError('cf_clearance must be set, refer to documentation')
-
+
if not actualSearch:
return {
'_type': 'SearchResponse',
@@ -75,7 +72,7 @@ class Search:
}
}
}
-
+
headers = {
'authority': 'www.phind.com',
'accept': '*/*',
@@ -91,8 +88,8 @@ class Search:
'sec-fetch-site': 'same-origin',
'user-agent': user_agent
}
-
- return post('https://www.phind.com/api/bing/search', headers = headers, json = {
+
+ return post('https://www.phind.com/api/bing/search', headers=headers, json={
'q': prompt,
'userRankList': {},
'browserLanguage': language}).json()['rawBingResults']
@@ -100,45 +97,45 @@ class Search:
class Completion:
def create(
- model = 'gpt-4',
- prompt: str = '',
- results: dict = None,
- creative: bool = False,
- detailed: bool = False,
- codeContext: str = '',
- language: str = 'en') -> PhindResponse:
-
- if user_agent == '' :
+ model='gpt-4',
+ prompt: str = '',
+ results: dict = None,
+ creative: bool = False,
+ detailed: bool = False,
+ codeContext: str = '',
+ language: str = 'en') -> PhindResponse:
+
+ if user_agent == '':
raise ValueError('user_agent must be set, refer to documentation')
- if cf_clearance == '' :
+ if cf_clearance == '':
raise ValueError('cf_clearance must be set, refer to documentation')
-
+
if results is None:
- results = Search.create(prompt, actualSearch = True)
-
+ results = Search.create(prompt, actualSearch=True)
+
if len(codeContext) > 2999:
raise ValueError('codeContext must be less than 3000 characters')
-
+
models = {
- 'gpt-4' : 'expert',
- 'gpt-3.5-turbo' : 'intermediate',
+ 'gpt-4': 'expert',
+ 'gpt-3.5-turbo': 'intermediate',
'gpt-3.5': 'intermediate',
}
-
+
json_data = {
- 'question' : prompt,
- 'bingResults' : results, #response.json()['rawBingResults'],
- 'codeContext' : codeContext,
+ 'question': prompt,
+ 'bingResults': results, # response.json()['rawBingResults'],
+ 'codeContext': codeContext,
'options': {
- 'skill' : models[model],
- 'date' : datetime.now().strftime("%d/%m/%Y"),
+ 'skill': models[model],
+ 'date': datetime.now().strftime("%d/%m/%Y"),
'language': language,
'detailed': detailed,
'creative': creative
}
}
-
+
headers = {
'authority': 'www.phind.com',
'accept': '*/*',
@@ -155,50 +152,51 @@ class Completion:
'sec-fetch-site': 'same-origin',
'user-agent': user_agent
}
-
+
completion = ''
- response = post('https://www.phind.com/api/infer/answer', headers = headers, json = json_data, timeout=99999, impersonate='chrome110')
+ response = post('https://www.phind.com/api/infer/answer', headers=headers, json=json_data, timeout=99999,
+ impersonate='chrome110')
for line in response.text.split('\r\n\r\n'):
completion += (line.replace('data: ', ''))
-
+
return PhindResponse({
- 'id' : f'cmpl-1337-{int(time())}',
- 'object' : 'text_completion',
- 'created': int(time()),
- 'model' : models[model],
+ 'id': f'cmpl-1337-{int(time())}',
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'model': models[model],
'choices': [{
- 'text' : completion,
- 'index' : 0,
- 'logprobs' : None,
- 'finish_reason' : 'stop'
- }],
+ 'text': completion,
+ 'index': 0,
+ 'logprobs': None,
+ 'finish_reason': 'stop'
+ }],
'usage': {
- 'prompt_tokens' : len(prompt),
- 'completion_tokens' : len(completion),
- 'total_tokens' : len(prompt) + len(completion)
+ 'prompt_tokens': len(prompt),
+ 'completion_tokens': len(completion),
+ 'total_tokens': len(prompt) + len(completion)
}
})
-
+
class StreamingCompletion:
- message_queue = Queue()
+ message_queue = Queue()
stream_completed = False
-
+
def request(model, prompt, results, creative, detailed, codeContext, language) -> None:
-
+
models = {
- 'gpt-4' : 'expert',
- 'gpt-3.5-turbo' : 'intermediate',
+ 'gpt-4': 'expert',
+ 'gpt-3.5-turbo': 'intermediate',
'gpt-3.5': 'intermediate',
}
json_data = {
- 'question' : prompt,
- 'bingResults' : results,
- 'codeContext' : codeContext,
+ 'question': prompt,
+ 'bingResults': results,
+ 'codeContext': codeContext,
'options': {
- 'skill' : models[model],
- 'date' : datetime.now().strftime("%d/%m/%Y"),
+ 'skill': models[model],
+ 'date': datetime.now().strftime("%d/%m/%Y"),
'language': language,
'detailed': detailed,
'creative': creative
@@ -221,65 +219,65 @@ class StreamingCompletion:
'sec-fetch-site': 'same-origin',
'user-agent': user_agent
}
-
- response = post('https://www.phind.com/api/infer/answer',
- headers = headers, json = json_data, timeout=99999, impersonate='chrome110', content_callback=StreamingCompletion.handle_stream_response)
+ response = post('https://www.phind.com/api/infer/answer',
+ headers=headers, json=json_data, timeout=99999, impersonate='chrome110',
+ content_callback=StreamingCompletion.handle_stream_response)
StreamingCompletion.stream_completed = True
@staticmethod
def create(
- model : str = 'gpt-4',
- prompt : str = '',
- results : dict = None,
- creative : bool = False,
- detailed : bool = False,
- codeContext : str = '',
- language : str = 'en'):
-
+ model: str = 'gpt-4',
+ prompt: str = '',
+ results: dict = None,
+ creative: bool = False,
+ detailed: bool = False,
+ codeContext: str = '',
+ language: str = 'en'):
+
if user_agent == '':
raise ValueError('user_agent must be set, refer to documentation')
- if cf_clearance == '' :
+ if cf_clearance == '':
raise ValueError('cf_clearance must be set, refer to documentation')
-
+
if results is None:
- results = Search.create(prompt, actualSearch = True)
-
+ results = Search.create(prompt, actualSearch=True)
+
if len(codeContext) > 2999:
raise ValueError('codeContext must be less than 3000 characters')
-
- Thread(target = StreamingCompletion.request, args = [
+
+ Thread(target=StreamingCompletion.request, args=[
model, prompt, results, creative, detailed, codeContext, language]).start()
-
+
while StreamingCompletion.stream_completed != True or not StreamingCompletion.message_queue.empty():
try:
chunk = StreamingCompletion.message_queue.get(timeout=0)
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
chunk = b'data: \n\n\r\n\r\n'
-
+
chunk = chunk.decode()
-
+
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\n\r\n\r\n')
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
-
+
yield PhindResponse({
- 'id' : f'cmpl-1337-{int(time())}',
- 'object' : 'text_completion',
- 'created': int(time()),
- 'model' : model,
+ 'id': f'cmpl-1337-{int(time())}',
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'model': model,
'choices': [{
- 'text' : chunk,
- 'index' : 0,
- 'logprobs' : None,
- 'finish_reason' : 'stop'
- }],
+ 'text': chunk,
+ 'index': 0,
+ 'logprobs': None,
+ 'finish_reason': 'stop'
+ }],
'usage': {
- 'prompt_tokens' : len(prompt),
- 'completion_tokens' : len(chunk),
- 'total_tokens' : len(prompt) + len(chunk)
+ 'prompt_tokens': len(prompt),
+ 'completion_tokens': len(chunk),
+ 'total_tokens': len(prompt) + len(chunk)
}
})
diff --git a/quora/__init__.py b/quora/__init__.py
index d0ed302f..cd5ec8f9 100644
--- a/quora/__init__.py
+++ b/quora/__init__.py
@@ -116,11 +116,11 @@ class ModelResponse:
class Model:
@staticmethod
def create(
- token: str,
- model: str = 'gpt-3.5-turbo', # claude-instant
- system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
- description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
- handle: str = None,
+ token: str,
+ model: str = 'gpt-3.5-turbo', # claude-instant
+ system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
+ description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
+ handle: str = None,
) -> ModelResponse:
models = {
'gpt-3.5-turbo': 'chinchilla',
@@ -202,9 +202,9 @@ class Model:
class Account:
@staticmethod
def create(
- proxy: Optional[str] = None,
- logging: bool = False,
- enable_bot_creation: bool = False,
+ proxy: Optional[str] = None,
+ logging: bool = False,
+ enable_bot_creation: bool = False,
):
client = TLS(client_identifier='chrome110')
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
@@ -309,10 +309,10 @@ class Account:
class StreamingCompletion:
@staticmethod
def create(
- model: str = 'gpt-4',
- custom_model: bool = None,
- prompt: str = 'hello world',
- token: str = '',
+ model: str = 'gpt-4',
+ custom_model: bool = None,
+ prompt: str = 'hello world',
+ token: str = '',
):
_model = MODELS[model] if not custom_model else custom_model
@@ -344,10 +344,10 @@ class StreamingCompletion:
class Completion:
def create(
- model: str = 'gpt-4',
- custom_model: str = None,
- prompt: str = 'hello world',
- token: str = '',
+ model: str = 'gpt-4',
+ custom_model: str = None,
+ prompt: str = 'hello world',
+ token: str = '',
):
models = {
'sage': 'capybara',
@@ -389,12 +389,12 @@ class Completion:
class Poe:
def __init__(
- self,
- model: str = 'ChatGPT',
- driver: str = 'firefox',
- download_driver: bool = False,
- driver_path: Optional[str] = None,
- cookie_path: str = './quora/cookie.json',
+ self,
+ model: str = 'ChatGPT',
+ driver: str = 'firefox',
+ download_driver: bool = False,
+ driver_path: Optional[str] = None,
+ cookie_path: str = './quora/cookie.json',
):
# validating the model
if model and model not in MODELS:
@@ -473,12 +473,12 @@ class Poe:
return response
def create_bot(
- self,
- name: str,
- /,
- prompt: str = '',
- base_model: str = 'ChatGPT',
- description: str = '',
+ self,
+ name: str,
+ /,
+ prompt: str = '',
+ base_model: str = 'ChatGPT',
+ description: str = '',
) -> None:
if base_model not in MODELS:
raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
diff --git a/quora/api.py b/quora/api.py
index 42814f2c..697f6663 100644
--- a/quora/api.py
+++ b/quora/api.py
@@ -384,7 +384,7 @@ class Client:
continue
# update info about response
- message["text_new"] = message["text"][len(last_text) :]
+ message["text_new"] = message["text"][len(last_text):]
last_text = message["text"]
message_id = message["messageId"]
@@ -456,21 +456,21 @@ class Client:
logger.info(f"No more messages left to delete.")
def create_bot(
- self,
- handle,
- prompt="",
- base_model="chinchilla",
- description="",
- intro_message="",
- api_key=None,
- api_bot=False,
- api_url=None,
- prompt_public=True,
- pfp_url=None,
- linkification=False,
- markdown_rendering=True,
- suggested_replies=False,
- private=False,
+ self,
+ handle,
+ prompt="",
+ base_model="chinchilla",
+ description="",
+ intro_message="",
+ api_key=None,
+ api_bot=False,
+ api_url=None,
+ prompt_public=True,
+ pfp_url=None,
+ linkification=False,
+ markdown_rendering=True,
+ suggested_replies=False,
+ private=False,
):
result = self.send_query(
"PoeBotCreateMutation",
@@ -499,21 +499,21 @@ class Client:
return data
def edit_bot(
- self,
- bot_id,
- handle,
- prompt="",
- base_model="chinchilla",
- description="",
- intro_message="",
- api_key=None,
- api_url=None,
- private=False,
- prompt_public=True,
- pfp_url=None,
- linkification=False,
- markdown_rendering=True,
- suggested_replies=False,
+ self,
+ bot_id,
+ handle,
+ prompt="",
+ base_model="chinchilla",
+ description="",
+ intro_message="",
+ api_key=None,
+ api_url=None,
+ private=False,
+ prompt_public=True,
+ pfp_url=None,
+ linkification=False,
+ markdown_rendering=True,
+ suggested_replies=False,
):
result = self.send_query(
"PoeBotEditMutation",
diff --git a/testing/forefront_test.py b/testing/forefront_test.py
index b5c682b8..4d5f4bc1 100644
--- a/testing/forefront_test.py
+++ b/testing/forefront_test.py
@@ -5,7 +5,6 @@ token = forefront.Account.create(logging=True)
print(token)
# get a response
-for response in forefront.StreamingCompletion.create(token = token,
- prompt = 'hello world', model='gpt-4'):
-
- print(response.completion.choices[0].text, end = '') \ No newline at end of file
+for response in forefront.StreamingCompletion.create(token=token,
+ prompt='hello world', model='gpt-4'):
+ print(response.completion.choices[0].text, end='')
diff --git a/testing/phind_test.py b/testing/phind_test.py
index 2f2560a1..ed8ff65e 100644
--- a/testing/phind_test.py
+++ b/testing/phind_test.py
@@ -2,18 +2,19 @@ import phind
# set cf_clearance cookie ( not needed at the moment)
phind.cf_clearance = 'MDzwnr3ZWk_ap8u.iwwMR5F3WccfOkhUy_zGNDpcF3s-1682497341-0-160'
-phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
prompt = 'hello world'
# normal completion
result = phind.Completion.create(
- model = 'gpt-4',
- prompt = prompt,
- results = phind.Search.create(prompt, actualSearch = False), # create search (set actualSearch to False to disable internet)
- creative = False,
- detailed = False,
- codeContext = '') # up to 3000 chars of code
+ model='gpt-4',
+ prompt=prompt,
+ results=phind.Search.create(prompt, actualSearch=False),
+ # create search (set actualSearch to False to disable internet)
+ creative=False,
+ detailed=False,
+ codeContext='') # up to 3000 chars of code
print(result.completion.choices[0].text)
@@ -22,11 +23,12 @@ prompt = 'who won the quatar world cup'
# help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
# stream completion
for result in phind.StreamingCompletion.create(
- model = 'gpt-4',
- prompt = prompt,
- results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
- creative = False,
- detailed = False,
- codeContext = ''): # up to 3000 chars of code
+ model='gpt-4',
+ prompt=prompt,
+ results=phind.Search.create(prompt, actualSearch=True),
+ # create search (set actualSearch to False to disable internet)
+ creative=False,
+ detailed=False,
+ codeContext=''): # up to 3000 chars of code
- print(result.completion.choices[0].text, end='', flush=True) \ No newline at end of file
+ print(result.completion.choices[0].text, end='', flush=True)
diff --git a/testing/poe_account_create_test.py b/testing/poe_account_create_test.py
index 627520ca..5d435b1f 100644
--- a/testing/poe_account_create_test.py
+++ b/testing/poe_account_create_test.py
@@ -1,90 +1,90 @@
-from requests import Session
-from tls_client import Session as TLS
-from json import dumps
from hashlib import md5
-from time import sleep
+from json import dumps
from re import findall
-from pypasser import reCaptchaV3
+
+from tls_client import Session as TLS
+from twocaptcha import TwoCaptcha
+
from quora import extract_formkey
from quora.mail import Emailnator
-from twocaptcha import TwoCaptcha
solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
+
class Account:
def create(proxy: None or str = None, logging: bool = False, enable_bot_creation: bool = False):
- client = TLS(client_identifier='chrome110')
+ client = TLS(client_identifier='chrome110')
client.proxies = {
'http': f'http://{proxy}',
'https': f'http://{proxy}'} if proxy else None
- mail_client = Emailnator()
- mail_address = mail_client.get_mail()
+ mail_client = Emailnator()
+ mail_address = mail_client.get_mail()
if logging: print('email', mail_address)
client.headers = {
- 'authority' : 'poe.com',
- 'accept' : '*/*',
+ 'authority': 'poe.com',
+ 'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'content-type' : 'application/json',
- 'origin' : 'https://poe.com',
- 'poe-formkey' : 'null',
- 'poe-tag-id' : 'null',
- 'poe-tchannel' : 'null',
- 'referer' : 'https://poe.com/login',
- 'sec-ch-ua' : '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
- 'sec-ch-ua-mobile' : '?0',
+ 'content-type': 'application/json',
+ 'origin': 'https://poe.com',
+ 'poe-formkey': 'null',
+ 'poe-tag-id': 'null',
+ 'poe-tchannel': 'null',
+ 'referer': 'https://poe.com/login',
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}
- client.headers["poe-formkey"] = extract_formkey(client.get('https://poe.com/login').text)
+ client.headers["poe-formkey"] = extract_formkey(client.get('https://poe.com/login').text)
client.headers["poe-tchannel"] = client.get('https://poe.com/api/settings').json()['tchannelData']['channel']
- #token = reCaptchaV3('https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal')
+ # token = reCaptchaV3('https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal')
token = solver.recaptcha(sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
- url = 'https://poe.com/login?redirect_url=%2F',
- version = 'v3',
- enterprise = 1,
- invisible = 1,
- action = 'login',)['code']
+ url='https://poe.com/login?redirect_url=%2F',
+ version='v3',
+ enterprise=1,
+ invisible=1,
+ action='login', )['code']
- payload = dumps(separators = (',', ':'), obj = {
+ payload = dumps(separators=(',', ':'), obj={
'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
'variables': {
- 'emailAddress' : mail_address,
- 'phoneNumber' : None,
+ 'emailAddress': mail_address,
+ 'phoneNumber': None,
'recaptchaToken': token
},
'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
})
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
- client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
-
+ client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
+
print(dumps(client.headers, indent=4))
-
+
response = client.post('https://poe.com/api/gql_POST', data=payload)
-
+
if 'automated_request_detected' in response.text:
print('please try using a proxy / wait for fix')
-
+
if 'Bad Request' in response.text:
- if logging: print('bad request, retrying...' , response.json())
+ if logging: print('bad request, retrying...', response.json())
quit()
- if logging: print('send_code' ,response.json())
-
+ if logging: print('send_code', response.json())
+
mail_content = mail_client.get_message()
- mail_token = findall(r';">(\d{6,7})</div>', mail_content)[0]
+ mail_token = findall(r';">(\d{6,7})</div>', mail_content)[0]
if logging: print('code', mail_token)
- payload = dumps(separators = (',', ':'), obj={
+ payload = dumps(separators=(',', ':'), obj={
"queryName": "SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation",
"variables": {
"verificationCode": str(mail_token),
@@ -95,10 +95,10 @@ class Account:
})
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
- client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
+ client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
- response = client.post('https://poe.com/api/gql_POST', data = payload)
+ response = client.post('https://poe.com/api/gql_POST', data=payload)
if logging: print('verify_code', response.json())
-
-Account.create(proxy = 'xtekky:wegwgwegwed_streaming-1@geo.iproyal.com:12321', logging = True) \ No newline at end of file
+
+Account.create(proxy='xtekky:wegwgwegwed_streaming-1@geo.iproyal.com:12321', logging=True)
diff --git a/testing/poe_test.py b/testing/poe_test.py
index 122f19c7..8d527879 100644
--- a/testing/poe_test.py
+++ b/testing/poe_test.py
@@ -1,13 +1,13 @@
-import quora
from time import sleep
-token = quora.Account.create(proxy = None,logging = True)
+import quora
+
+token = quora.Account.create(proxy=None, logging=True)
print('token', token)
sleep(2)
-for response in quora.StreamingCompletion.create(model = 'gpt-3.5-turbo',
- prompt = 'hello world',
- token = token):
-
- print(response.completion.choices[0].text, end="", flush=True) \ No newline at end of file
+for response in quora.StreamingCompletion.create(model='gpt-3.5-turbo',
+ prompt='hello world',
+ token=token):
+ print(response.completion.choices[0].text, end="", flush=True)
diff --git a/testing/quora_test_2.py b/testing/quora_test_2.py
index c51b8478..d5316946 100644
--- a/testing/quora_test_2.py
+++ b/testing/quora_test_2.py
@@ -1,18 +1,17 @@
import quora
-token = quora.Account.create(logging = True, enable_bot_creation=True)
+token = quora.Account.create(logging=True, enable_bot_creation=True)
model = quora.Model.create(
- token = token,
- model = 'gpt-3.5-turbo', # or claude-instant-v1.0
- system_prompt = 'you are ChatGPT a large language model ...'
+ token=token,
+ model='gpt-3.5-turbo', # or claude-instant-v1.0
+ system_prompt='you are ChatGPT a large language model ...'
)
print(model.name)
for response in quora.StreamingCompletion.create(
- custom_model = model.name,
- prompt ='hello world',
- token = token):
-
- print(response.completion.choices[0].text) \ No newline at end of file
+ custom_model=model.name,
+ prompt='hello world',
+ token=token):
+ print(response.completion.choices[0].text)
diff --git a/testing/sqlchat_test.py b/testing/sqlchat_test.py
index 28e9d2e3..577d85a6 100644
--- a/testing/sqlchat_test.py
+++ b/testing/sqlchat_test.py
@@ -1,7 +1,6 @@
import sqlchat
for response in sqlchat.StreamCompletion.create(
- prompt = 'write python code to reverse a string',
- messages = []):
-
- print(response.completion.choices[0].text, end='') \ No newline at end of file
+ prompt='write python code to reverse a string',
+ messages=[]):
+ print(response.completion.choices[0].text, end='')
diff --git a/testing/t3nsor_test.py b/testing/t3nsor_test.py
index eb8e2ae8..1506a1b4 100644
--- a/testing/t3nsor_test.py
+++ b/testing/t3nsor_test.py
@@ -1,7 +1,6 @@
import t3nsor
for response in t3nsor.StreamCompletion.create(
- prompt = 'write python code to reverse a string',
- messages = []):
-
+ prompt='write python code to reverse a string',
+ messages=[]):
print(response.completion.choices[0].text)
diff --git a/testing/writesonic_test.py b/testing/writesonic_test.py
index e652877d..5c68bbe0 100644
--- a/testing/writesonic_test.py
+++ b/testing/writesonic_test.py
@@ -2,29 +2,29 @@
import writesonic
# create account (3-4s)
-account = writesonic.Account.create(logging = True)
+account = writesonic.Account.create(logging=True)
# with loging:
- # 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
- # 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
- # 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
- # 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
+# 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
+# 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
+# 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
+# 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
# simple completion
response = writesonic.Completion.create(
- api_key = account.key,
- prompt = 'hello world'
+ api_key=account.key,
+ prompt='hello world'
)
-print(response.completion.choices[0].text) # Hello! How may I assist you today?
+print(response.completion.choices[0].text) # Hello! How may I assist you today?
# conversation
response = writesonic.Completion.create(
- api_key = account.key,
- prompt = 'what is my name ?',
- enable_memory = True,
- history_data = [
+ api_key=account.key,
+ prompt='what is my name ?',
+ enable_memory=True,
+ history_data=[
{
'is_sent': True,
'message': 'my name is Tekky'
@@ -36,14 +36,14 @@ response = writesonic.Completion.create(
]
)
-print(response.completion.choices[0].text) # Your name is Tekky.
+print(response.completion.choices[0].text) # Your name is Tekky.
# enable internet
response = writesonic.Completion.create(
- api_key = account.key,
- prompt = 'who won the quatar world cup ?',
- enable_google_results = True
+ api_key=account.key,
+ prompt='who won the quatar world cup ?',
+ enable_google_results=True
)
-print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ... \ No newline at end of file
+print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ...
diff --git a/unfinished/bard/__init__.py b/unfinished/bard/__init__.py
index ef8980d7..f1d68b92 100644
--- a/unfinished/bard/__init__.py
+++ b/unfinished/bard/__init__.py
@@ -1,12 +1,12 @@
-from requests import Session
-from re import search
-from random import randint
from json import dumps, loads
-from urllib.parse import urlencode
-from dotenv import load_dotenv
from os import getenv
+from random import randint
+from re import search
+from urllib.parse import urlencode
from bard.typings import BardResponse
+from dotenv import load_dotenv
+from requests import Session
load_dotenv()
token = getenv('1psid')
@@ -62,16 +62,17 @@ class Completion:
'rt': 'c',
})
- response = client.post(f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}',
- data={
- 'at': snlm0e,
- 'f.req': dumps([None, dumps([
- [prompt],
- None,
- [conversation_id, response_id, choice_id],
- ])])
- }
- )
+ response = client.post(
+ f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}',
+ data={
+ 'at': snlm0e,
+ 'f.req': dumps([None, dumps([
+ [prompt],
+ None,
+ [conversation_id, response_id, choice_id],
+ ])])
+ }
+ )
chat_data = loads(response.content.splitlines()[3])[0][2]
if not chat_data:
diff --git a/unfinished/bard/typings.py b/unfinished/bard/typings.py
index ddf803b6..75b73bf9 100644
--- a/unfinished/bard/typings.py
+++ b/unfinished/bard/typings.py
@@ -1,4 +1,4 @@
-from typing import Dict, List, Optional, Union
+from typing import Dict, List, Union
class BardResponse:
diff --git a/unfinished/bing/__ini__.py b/unfinished/bing/__ini__.py
index 508b1067..1e4fd149 100644
--- a/unfinished/bing/__ini__.py
+++ b/unfinished/bing/__ini__.py
@@ -1,14 +1,12 @@
# Import necessary libraries
-from requests import get
-from browser_cookie3 import edge, chrome
-from ssl import create_default_context
-from certifi import where
-from uuid import uuid4
-from random import randint
+import asyncio
from json import dumps, loads
+from ssl import create_default_context
-import asyncio
import websockets
+from browser_cookie3 import edge
+from certifi import where
+from requests import get
# Set up SSL context
ssl_context = create_default_context()
@@ -28,14 +26,14 @@ def get_token():
class AsyncCompletion:
async def create(
- prompt: str = 'hello world',
- optionSets: list = [
- 'deepleo',
- 'enable_debug_commands',
- 'disable_emoji_spoken_text',
- 'enablemm',
- 'h3relaxedimg'
- ],
+ prompt: str = 'hello world',
+ optionSets: list = [
+ 'deepleo',
+ 'enable_debug_commands',
+ 'disable_emoji_spoken_text',
+ 'enablemm',
+ 'h3relaxedimg'
+ ],
token: str = get_token()):
"""Create a connection to Bing AI and send the prompt."""
@@ -83,7 +81,7 @@ class AsyncCompletion:
continue
response = loads(obj)
- if response.get('type') == 1 and response['arguments'][0].get('messages',):
+ if response.get('type') == 1 and response['arguments'][0].get('messages', ):
response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
'text')
@@ -99,11 +97,12 @@ class AsyncCompletion:
async def run():
"""Run the async completion and print the result."""
async for value in AsyncCompletion.create(
- prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z',
- optionSets=[
- "galileo",
- ]
+ prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z',
+ optionSets=[
+ "galileo",
+ ]
):
print(value, end='', flush=True)
+
asyncio.run(run())
diff --git a/unfinished/cocalc/__init__.py b/unfinished/cocalc/__init__.py
index 3ad9937a..2b73fc9e 100644
--- a/unfinished/cocalc/__init__.py
+++ b/unfinished/cocalc/__init__.py
@@ -6,7 +6,6 @@ class Completion:
system_prompt=("ASSUME I HAVE FULL ACCESS TO COCALC. ENCLOSE MATH IN $. "
"INCLUDE THE LANGUAGE DIRECTLY AFTER THE TRIPLE BACKTICKS "
"IN ALL MARKDOWN CODE BLOCKS. How can I do the following using CoCalc?")) -> str:
-
# Initialize a session with custom headers
session = self._initialize_session()
diff --git a/unfinished/cocalc/cocalc_test.py b/unfinished/cocalc/cocalc_test.py
index 0e1a7b3b..d6266518 100644
--- a/unfinished/cocalc/cocalc_test.py
+++ b/unfinished/cocalc/cocalc_test.py
@@ -1,8 +1,7 @@
import cocalc
-
response = cocalc.Completion.create(
- prompt = 'hello world'
+ prompt='hello world'
)
-print(response) \ No newline at end of file
+print(response)
diff --git a/unfinished/easyai/main.py b/unfinished/easyai/main.py
index 07adfd72..451adb3b 100644
--- a/unfinished/easyai/main.py
+++ b/unfinished/easyai/main.py
@@ -1,7 +1,8 @@
# Import necessary libraries
-from requests import get
-from os import urandom
from json import loads
+from os import urandom
+
+from requests import get
# Generate a random session ID
sessionId = urandom(10).hex()
diff --git a/unfinished/gptbz/__init__.py b/unfinished/gptbz/__init__.py
index 44bfcd19..e95d5716 100644
--- a/unfinished/gptbz/__init__.py
+++ b/unfinished/gptbz/__init__.py
@@ -1,6 +1,8 @@
-import websockets
from json import dumps, loads
+import websockets
+
+
# Define the asynchronous function to test the WebSocket connection
diff --git a/unfinished/openai/__ini__.py b/unfinished/openai/__ini__.py
index 71ec4623..f0894e1b 100644
--- a/unfinished/openai/__ini__.py
+++ b/unfinished/openai/__ini__.py
@@ -1,7 +1,8 @@
# Import required libraries
-from tls_client import Session
from uuid import uuid4
+
from browser_cookie3 import chrome
+from tls_client import Session
class OpenAIChat:
diff --git a/unfinished/openaihosted/__init__.py b/unfinished/openaihosted/__init__.py
index e857b475..ba4d3982 100644
--- a/unfinished/openaihosted/__init__.py
+++ b/unfinished/openaihosted/__init__.py
@@ -1,7 +1,8 @@
-import requests
import json
import re
+import requests
+
headers = {
'authority': 'openai.a2hosted.com',
'accept': 'text/event-stream',
@@ -13,10 +14,12 @@ headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.0.0',
}
+
def create_query_param(conversation):
encoded_conversation = json.dumps(conversation)
return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")
+
user_input = input("Enter your message: ")
data = [
diff --git a/unfinished/openprompt/create.py b/unfinished/openprompt/create.py
index 6ccb66f4..c968c162 100644
--- a/unfinished/openprompt/create.py
+++ b/unfinished/openprompt/create.py
@@ -1,9 +1,9 @@
-from requests import post, get
from json import dumps
-#from mail import MailClient
-from time import sleep
+# from mail import MailClient
from re import findall
+from requests import post, get
+
html = get('https://developermail.com/mail/')
print(html.cookies.get('mailboxId'))
email = findall(r'mailto:(.*)">', html.text)[0]
@@ -15,9 +15,9 @@ headers = {
}
json_data = {
- 'email' : email,
+ 'email': email,
'password': 'T4xyt4Yn6WWQ4NC',
- 'data' : {},
+ 'data': {},
'gotrue_meta_security': {},
}
@@ -27,20 +27,20 @@ print(response.json())
# email_link = None
# while not email_link:
# sleep(1)
-
+
# mails = mailbox.getmails()
# print(mails)
quit()
-url = input("Enter the url: ")
+url = input("Enter the url: ")
response = get(url, allow_redirects=False)
# https://openprompt.co/#access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8&expires_in=604800&refresh_token=_Zp8uXIA2InTDKYgo8TCqA&token_type=bearer&type=signup
-redirect = response.headers.get('location')
-access_token = redirect.split('&')[0].split('=')[1]
+redirect = response.headers.get('location')
+access_token = redirect.split('&')[0].split('=')[1]
refresh_token = redirect.split('&')[2].split('=')[1]
supabase_auth_token = dumps([access_token, refresh_token, None, None, None], separators=(',', ':'))
@@ -61,4 +61,4 @@ json_data = {
response = post('https://openprompt.co/api/chat2', cookies=cookies, json=json_data, stream=True)
for chunk in response.iter_content(chunk_size=1024):
- print(chunk) \ No newline at end of file
+ print(chunk)
diff --git a/unfinished/openprompt/mail.py b/unfinished/openprompt/mail.py
index 082ac9fb..1130e7df 100644
--- a/unfinished/openprompt/mail.py
+++ b/unfinished/openprompt/mail.py
@@ -1,6 +1,8 @@
-import requests
import email
+import requests
+
+
class MailClient:
def __init__(self):
@@ -106,4 +108,4 @@ class MailClient:
client = MailClient()
client.newtoken()
-print(client.getmails()) \ No newline at end of file
+print(client.getmails())
diff --git a/unfinished/openprompt/main.py b/unfinished/openprompt/main.py
index 2fa4508e..e68a3b63 100644
--- a/unfinished/openprompt/main.py
+++ b/unfinished/openprompt/main.py
@@ -30,8 +30,7 @@ json_data = {
],
}
-response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data, stream=True)
+response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data,
+ stream=True)
for chunk in response.iter_content(chunk_size=1024):
print(chunk)
-
-
diff --git a/unfinished/openprompt/test.py b/unfinished/openprompt/test.py
index d178462e..65319cb6 100644
--- a/unfinished/openprompt/test.py
+++ b/unfinished/openprompt/test.py
@@ -1,7 +1,6 @@
access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV'
-supabase_auth_token= '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D'
-
+supabase_auth_token = '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D'
idk = [
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8",
- "_Zp8uXIA2InTDKYgo8TCqA",None,None,None] \ No newline at end of file
+ "_Zp8uXIA2InTDKYgo8TCqA", None, None, None]
diff --git a/unfinished/t3nsor/__init__.py b/unfinished/t3nsor/__init__.py
index aec45dcf..9b588e98 100644
--- a/unfinished/t3nsor/__init__.py
+++ b/unfinished/t3nsor/__init__.py
@@ -1,5 +1,6 @@
+from time import time
+
from requests import post
-from time import time
headers = {
'authority': 'www.t3nsor.tech',
@@ -19,18 +20,17 @@ headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
+
class T3nsorResponse:
-
class Completion:
-
class Choices:
def __init__(self, choice: dict) -> None:
- self.text = choice['text']
- self.content = self.text.encode()
- self.index = choice['index']
- self.logprobs = choice['logprobs']
- self.finish_reason = choice['finish_reason']
-
+ self.text = choice['text']
+ self.content = self.text.encode()
+ self.index = choice['index']
+ self.logprobs = choice['logprobs']
+ self.finish_reason = choice['finish_reason']
+
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
@@ -39,99 +39,98 @@ class T3nsorResponse:
class Usage:
def __init__(self, usage_dict: dict) -> None:
- self.prompt_tokens = usage_dict['prompt_chars']
- self.completion_tokens = usage_dict['completion_chars']
- self.total_tokens = usage_dict['total_chars']
+ self.prompt_tokens = usage_dict['prompt_chars']
+ self.completion_tokens = usage_dict['completion_chars']
+ self.total_tokens = usage_dict['total_chars']
def __repr__(self):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
-
+
def __init__(self, response_dict: dict) -> None:
-
- self.response_dict = response_dict
- self.id = response_dict['id']
- self.object = response_dict['object']
- self.created = response_dict['created']
- self.model = response_dict['model']
- self.completion = self.Completion(response_dict['choices'])
- self.usage = self.Usage(response_dict['usage'])
+ self.response_dict = response_dict
+ self.id = response_dict['id']
+ self.object = response_dict['object']
+ self.created = response_dict['created']
+ self.model = response_dict['model']
+ self.completion = self.Completion(response_dict['choices'])
+ self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict
+
class Completion:
model = {
'model': {
- 'id' : 'gpt-3.5-turbo',
- 'name' : 'Default (GPT-3.5)'
+ 'id': 'gpt-3.5-turbo',
+ 'name': 'Default (GPT-3.5)'
}
}
def create(
- prompt: str = 'hello world',
- messages: list = []) -> T3nsorResponse:
-
- response = post('https://www.t3nsor.tech/api/chat', headers = headers, json = Completion.model | {
- 'messages' : messages,
- 'key' : '',
- 'prompt' : prompt
+ prompt: str = 'hello world',
+ messages: list = []) -> T3nsorResponse:
+ response = post('https://www.t3nsor.tech/api/chat', headers=headers, json=Completion.model | {
+ 'messages': messages,
+ 'key': '',
+ 'prompt': prompt
})
return T3nsorResponse({
- 'id' : f'cmpl-1337-{int(time())}',
- 'object' : 'text_completion',
- 'created': int(time()),
- 'model' : Completion.model,
+ 'id': f'cmpl-1337-{int(time())}',
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'model': Completion.model,
'choices': [{
- 'text' : response.text,
- 'index' : 0,
- 'logprobs' : None,
- 'finish_reason' : 'stop'
- }],
+ 'text': response.text,
+ 'index': 0,
+ 'logprobs': None,
+ 'finish_reason': 'stop'
+ }],
'usage': {
- 'prompt_chars' : len(prompt),
- 'completion_chars' : len(response.text),
- 'total_chars' : len(prompt) + len(response.text)
+ 'prompt_chars': len(prompt),
+ 'completion_chars': len(response.text),
+ 'total_chars': len(prompt) + len(response.text)
}
})
+
class StreamCompletion:
model = {
'model': {
- 'id' : 'gpt-3.5-turbo',
- 'name' : 'Default (GPT-3.5)'
+ 'id': 'gpt-3.5-turbo',
+ 'name': 'Default (GPT-3.5)'
}
}
def create(
- prompt: str = 'hello world',
- messages: list = []) -> T3nsorResponse:
-
+ prompt: str = 'hello world',
+ messages: list = []) -> T3nsorResponse:
print('t3nsor api is down, this may not work, refer to another module')
- response = post('https://www.t3nsor.tech/api/chat', headers = headers, stream = True, json = Completion.model | {
- 'messages' : messages,
- 'key' : '',
- 'prompt' : prompt
+ response = post('https://www.t3nsor.tech/api/chat', headers=headers, stream=True, json=Completion.model | {
+ 'messages': messages,
+ 'key': '',
+ 'prompt': prompt
})
-
- for chunk in response.iter_content(chunk_size = 2046):
+
+ for chunk in response.iter_content(chunk_size=2046):
yield T3nsorResponse({
- 'id' : f'cmpl-1337-{int(time())}',
- 'object' : 'text_completion',
- 'created': int(time()),
- 'model' : Completion.model,
-
+ 'id': f'cmpl-1337-{int(time())}',
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'model': Completion.model,
+
'choices': [{
- 'text' : chunk.decode(),
- 'index' : 0,
- 'logprobs' : None,
- 'finish_reason' : 'stop'
+ 'text': chunk.decode(),
+ 'index': 0,
+ 'logprobs': None,
+ 'finish_reason': 'stop'
}],
-
+
'usage': {
- 'prompt_chars' : len(prompt),
- 'completion_chars' : len(chunk.decode()),
- 'total_chars' : len(prompt) + len(chunk.decode())
+ 'prompt_chars': len(prompt),
+ 'completion_chars': len(chunk.decode()),
+ 'total_chars': len(prompt) + len(chunk.decode())
}
})
diff --git a/unfinished/test.py b/unfinished/test.py
index 93e39cc9..a5f038c5 100644
--- a/unfinished/test.py
+++ b/unfinished/test.py
@@ -1,12 +1,8 @@
-import gptbz
-import asyncio
-
-
# asyncio.run(gptbz.test())
import requests
image = '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAAoALQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigDkZP+EhS4W0k1S+VntQPtEWmRsgkNwBu4ZsHYQNvTbls5BA6DS7uW6S6E0VwjQ3UsQM0Pl71DZUrydy4IAbvg8CsTx3DbHQLi4uVs9scWzdd+dsAaWI4PlfNjKjpzkDtmpoNSgbWYpLR7Ty5bq5trw/vd3nIowBxtzti53Y6fKT3z2djra56fNbv07HR1z13ZRX/jDyby0+02f9nfdmsEeHd5o/5anndwPkxjjPWuhrh9Mvra88RLqccmnOHtvLEqfaN+1r1lUcjbg4PbO4H+Cqk+hnRi9ZI29E0uC2N1eG3Am+13DITZRwuqlsYG0ZYEKCGJywwT2AtWTapcW1vcPPCiyrE5ils2SRQV+dW/ecMT/3zgj5utZtpdwL4e190e02W9xeb9vm7FOWY78/NnnJ28f3ahkgtptD8JRlbMos9s8QPnbcrEzDy/4sgDjzOMdeaSZbi23f8vmbfn6hBFuktmuWWPJWCNELNuxgbpcDj1Pbr2qJ9bMVyIZNK1JVLyr5qwB1AjUNu+Uk4bovGSRjAqCTwdoElv5B02MReT5G1HZfk8zzMcEfx81YlsJ7NJX0tolZzNK8dyZJA8jDIwd3yjcBkAHjOAM09SP3b/q36mkjiSNXAYBgCNykH8QeRWdfaw1ldSW66XqN0UgE++3iBRsvt2BiQN/8WPQZqharF9oN5osVml1NLbLqUbmUFY/L4CrgYYKy4yoGM5xjhlnc2OoeMrfULV7aQXGkExyYlErJ5oPQ/Jtye/zZ9qLgqaTba0NyzvPtizH7NcQeVM8OJ49u/acbl9VPY96s1geFjF/xOhF9m41Wfd9n8z73BO7f/Fzzt+X0q7c6mWvRY2DwSXcUsQuUff8Auo2ySflB+YqrYyQOmTyARPQmVP32kLqF1cbmsrJZkuni3rcfZ98UfzKvJJUE4JOM5wpODwDl3Meuf2rHbRatcBJXuj5iachjhUovlBmZudrNkEZ3HIOMGlhREhbS9He2a8MO6a4fzmGDMQ3zAk5yZ8DzMgj0yRuWdha2CzLawrEJpnnkx/G7HLMfc0bl3VNf5pff/kVLS8uxFHHJZ3s5Xyo2mZI4y2VBZyN44B6gDrwAcVZ069Go2EV2Le5t/MBPlXMZjkXnGGU9OlULSdbfTt8LWy5mt0JAkK4YRLjnnODx26Z71TXULEWn/CUWDwmxeDbM4WbkCXJbaB23SnlM5PUDNF7CcObZf12OlpCcDoTz2oVlcZVgRkjIPccGo7hgsSk7ceYg+bP94elUYpamda64915GdH1SESxiTM0KjZmTZtbDHB53Y/u89eK1qw4xD9l0mIC3wLdCg/eYwHh+73x0+9znb71uUkXUSWyCiiimZhRRRQBieL5Hj8LXjxySxuNmGivFtWHzr0lbhfx69O9MvHdZpbKKWYnUluNji+VGikVFULHnkdGbjO05JHPEviyF5/DF7HGkjuQpCx2i3THDA8RNw3Tv069qR0kk0i4uFilF3bSXTwE2a+YGzIAUQnnIPByN46kbjUPc6YNKC9X+SLtjeB9Mt5ZyqzbI1lQzK5R2C/KWGAT8w6dcjHUVzemSyxeCba9e5uWfzIgxl1aOTgXPebGw5BwR3ACdalna8+0R3Kx3nk6jc2MvkjTI2MH97zDnI+4uWOSny4z2Lqxmt/hytvHHIZhFHJsj0yJnyXDEfZ87M9cjPB56ik2y4xSsu7XcnjMsejeJszXBZZrgozaihZAYwQFfGIQM8Bvu9ehrTKuJtOg3y5gKs/8ApAy2Y5B846uMj8Tz/CaqzROH1C3EchW6uHGRZIVx9nHXs4yPvN1PydBV2Lc+u3eUkCJBDtZoAFJzJna/VjgjI/h/4EaaM5PS/wDXRF+iiirOcy7RZE8RanukmKPFA6q9yHVfvg7Y+qfd5J4Y9OhrJ8Nm4FxYJNNdORaXCsJtTS4yVnAyQoG5sfxfw/dPJrUslmGt6rcymQxM0MMStahMALk4cfM65c9cBSGA7mqmi2k9t/ZZuDJJKbSdpHNjHEdzyRvhtv3G5PyjIbBJOVqDpurP5d+zGWtzeLdahZQLNK895PiV7+N/IURKQQMEqNzKAm1tucnggG4Fkhs4INNuJL145oEuHa7BcIAuWOQRkrhiAFzkkEE8rNDJPczWtnG1rG7yfapvsqESsY1AIJPP3hztbPllTjHKvpv2CWKbTUSHdJCk8cVtH+8jUFOSNpGAynOTgJgL1BNRNxf9fmWNGa3fR7U2ty9zDswJZJxMzHvlwSCc5BwccVerBZ3tLf8Atqyguvsxt/n02OyUSsxk3FsHa24bnyM4ycgE9d1WDDIz1I5BHQ471SM6i1uY8cjjSIWLyFjLbDJu1J5Mefn6HryP4snH3hRdmTS5f7T82aS2WBY5Y5LpVjX94Pn+YYzhmydw4UDB4wio/wDY8K+XLuE1qcfY1B4MWfk6DHOT/Bg4+6K1zGkkHlSoroy7WVlGCCOQRSsU5JGUrPo96EZ5p7O7mmmlubm7XFqQoYIobB2fK3Aztwe3TQvX2QKQSMyxDiQJ1dR1P8u/TvWb5bWty2m3KTXlvqMs7Ky2ieVbqVBKSEcHJL4JB3ZwfeLfcQRnTpY7mT7PLZiOdbJSkillzgA44KMScLsBBAOBkuNxu0/6epcQv9s0+LfJzauxBuVJJDRckdXPJ+YcDJH8QrTrN2sNcsxsk2LZyjd9nXaCWj439VPH3RwcZ/hFaVNGc+gUUUUyAooooAxfFVxZxeG9RS7ltVQ25ytwzbCCQBkJ82MkD5eeah0G7tYLi/sZJrKO4fUbjy4oncM/SQ5D9Ww4J25Xniiis2/eO2FNOhf1/CxmamsEGp2+nzx2CwxajYyWKN9o3KdpX+Ebd2I2287ePm973i3UdMg0W+0y4mtUkNqJPKuBJ5ewuEBYx8gbiBxz+FFFS3ZM1p01OdNN/wBaFfVtU0qHxHplx9qsSkEl2853SvIjxwjdtCZXIX7wbt05q7YJdS6nc6vYxWEtpfi2KS+bKsjQhCSWBBG4bhtAAyCcmiinF3k0RWgqdKMl1VvxZfM2s+VkWFh5nl5x9tfG/djGfK6bec468Y/irN1CeUCeHXbrTItPc3O6GN5PNltxHx0I+YKXLYB42455ooqpaIwo2lO1rE1rZjUYrcCO2Giw/Zp7BYzKrkKu4bh8oAB2EA56HIz0u3uxL+1kbygQpQFt2fmki4GOOuOvfHbNFFPpcTu6nKFpsTU75V8oNJKXIXduOI4hk54zjHTjGO+a0KKKaM59PQxLqNNBMuoQpDFYJEfPQLISp8zcWAXIxh5CcLnOMnHQaFNKkkvtOFoli0k9xqP32Zn24LIFyM7kwRg98c5yUVL3No6xTfV2/IrxyW0vh21kQ2phaexKn97s5aErj+LPTbnj7u7+KujoopxZNZW+9/oQXdpBfWk1rcxiSGVGjdSSMhgQeRyOCRxWOtvbXU0Ol6mIHksJbea0IMoJYISGy3U5ST+JuB83uUUMVJuz121JnaL/AITOBSYPOGnyEA7/ADdvmJnH8G3IHX5s4xxmtmiihdRVFZR9AoooqjI//9k='
-response = requests.get('https://ocr.holey.cc/ncku?base64_str=%s' % image) #.split('base64,')[1])
-print(response.content) \ No newline at end of file
+response = requests.get('https://ocr.holey.cc/ncku?base64_str=%s' % image) # .split('base64,')[1])
+print(response.content)
diff --git a/unfinished/theb.ai/__init__.py b/unfinished/theb.ai/__init__.py
index a90a32f5..e6bcb8c0 100644
--- a/unfinished/theb.ai/__init__.py
+++ b/unfinished/theb.ai/__init__.py
@@ -1,46 +1,49 @@
-from curl_cffi import requests
-from json import loads
-from re import findall
-from threading import Thread
-from queue import Queue, Empty
+from json import loads
+from queue import Queue, Empty
+from re import findall
+from threading import Thread
+
+from curl_cffi import requests
+
class Completion:
# experimental
part1 = '{"role":"assistant","id":"chatcmpl'
part2 = '"},"index":0,"finish_reason":null}]}}'
regex = rf'{part1}(.*){part2}'
-
- timer = None
- message_queue = Queue()
+
+ timer = None
+ message_queue = Queue()
stream_completed = False
-
+
def request():
headers = {
- 'authority' : 'chatbot.theb.ai',
+ 'authority': 'chatbot.theb.ai',
'content-type': 'application/json',
- 'origin' : 'https://chatbot.theb.ai',
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
+ 'origin': 'https://chatbot.theb.ai',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
- requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers, content_callback=Completion.handle_stream_response,
- json = {
- 'prompt' : 'hello world',
- 'options': {}
- }
- )
+ requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers,
+ content_callback=Completion.handle_stream_response,
+ json={
+ 'prompt': 'hello world',
+ 'options': {}
+ }
+ )
Completion.stream_completed = True
@staticmethod
def create():
Thread(target=Completion.request).start()
-
+
while Completion.stream_completed != True or not Completion.message_queue.empty():
try:
message = Completion.message_queue.get(timeout=0.01)
for message in findall(Completion.regex, message):
yield loads(Completion.part1 + message + Completion.part2)
-
+
except Empty:
pass
@@ -48,10 +51,12 @@ class Completion:
def handle_stream_response(response):
Completion.message_queue.put(response.decode())
+
def start():
for message in Completion.create():
yield message['delta']
+
if __name__ == '__main__':
for message in start():
print(message)
diff --git a/unfinished/vercelai/v2.py b/unfinished/vercelai/v2.py
index d1a4ad05..176ee342 100644
--- a/unfinished/vercelai/v2.py
+++ b/unfinished/vercelai/v2.py
@@ -1,6 +1,5 @@
import requests
-
token = requests.get('https://play.vercel.ai/openai.jpeg', headers={
'authority': 'play.vercel.ai',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
@@ -15,7 +14,7 @@ headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}
-for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream = True, json = {
+for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream=True, json={
'prompt': 'hi',
'model': 'openai:gpt-3.5-turbo',
'temperature': 0.7,
@@ -25,5 +24,4 @@ for chunk in requests.post('https://play.vercel.ai/api/generate', headers=header
'frequencyPenalty': 1,
'presencePenalty': 1,
'stopSequences': []}).iter_lines():
-
- print(chunk) \ No newline at end of file
+ print(chunk)
diff --git a/unfinished/writesonic/__init__.py b/unfinished/writesonic/__init__.py
index 7df6f393..ce684912 100644
--- a/unfinished/writesonic/__init__.py
+++ b/unfinished/writesonic/__init__.py
@@ -1,29 +1,33 @@
-from requests import Session
-from names import get_first_name, get_last_name
-from random import choice
-from requests import post
-from time import time
-from colorama import Fore, init; init()
+from random import choice
+from time import time
+
+from colorama import Fore, init;
+from names import get_first_name, get_last_name
+from requests import Session
+from requests import post
+
+init()
+
class logger:
@staticmethod
def info(string) -> print:
import datetime
now = datetime.datetime.now()
- return print(f"{Fore.CYAN}{now.strftime('%Y-%m-%d %H:%M:%S')} {Fore.BLUE}INFO {Fore.MAGENTA}__main__ -> {Fore.RESET}{string}")
+ return print(
+ f"{Fore.CYAN}{now.strftime('%Y-%m-%d %H:%M:%S')} {Fore.BLUE}INFO {Fore.MAGENTA}__main__ -> {Fore.RESET}{string}")
+
class SonicResponse:
-
class Completion:
-
class Choices:
def __init__(self, choice: dict) -> None:
- self.text = choice['text']
- self.content = self.text.encode()
- self.index = choice['index']
- self.logprobs = choice['logprobs']
- self.finish_reason = choice['finish_reason']
-
+ self.text = choice['text']
+ self.content = self.text.encode()
+ self.index = choice['index']
+ self.logprobs = choice['logprobs']
+ self.finish_reason = choice['finish_reason']
+
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
@@ -32,127 +36,128 @@ class SonicResponse:
class Usage:
def __init__(self, usage_dict: dict) -> None:
- self.prompt_tokens = usage_dict['prompt_chars']
- self.completion_tokens = usage_dict['completion_chars']
- self.total_tokens = usage_dict['total_chars']
+ self.prompt_tokens = usage_dict['prompt_chars']
+ self.completion_tokens = usage_dict['completion_chars']
+ self.total_tokens = usage_dict['total_chars']
def __repr__(self):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
-
+
def __init__(self, response_dict: dict) -> None:
-
- self.response_dict = response_dict
- self.id = response_dict['id']
- self.object = response_dict['object']
- self.created = response_dict['created']
- self.model = response_dict['model']
- self.completion = self.Completion(response_dict['choices'])
- self.usage = self.Usage(response_dict['usage'])
+ self.response_dict = response_dict
+ self.id = response_dict['id']
+ self.object = response_dict['object']
+ self.created = response_dict['created']
+ self.model = response_dict['model']
+ self.completion = self.Completion(response_dict['choices'])
+ self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict
-
+
+
class Account:
session = Session()
session.headers = {
- "connection" : "keep-alive",
- "sec-ch-ua" : "\"Not_A Brand\";v=\"99\", \"Google Chrome\";v=\"109\", \"Chromium\";v=\"109\"",
- "accept" : "application/json, text/plain, */*",
- "content-type" : "application/json",
- "sec-ch-ua-mobile" : "?0",
- "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
+ "connection": "keep-alive",
+ "sec-ch-ua": "\"Not_A Brand\";v=\"99\", \"Google Chrome\";v=\"109\", \"Chromium\";v=\"109\"",
+ "accept": "application/json, text/plain, */*",
+ "content-type": "application/json",
+ "sec-ch-ua-mobile": "?0",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
"sec-ch-ua-platform": "\"Windows\"",
- "sec-fetch-site" : "same-origin",
- "sec-fetch-mode" : "cors",
- "sec-fetch-dest" : "empty",
+ "sec-fetch-site": "same-origin",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-dest": "empty",
# "accept-encoding" : "gzip, deflate, br",
- "accept-language" : "en-GB,en-US;q=0.9,en;q=0.8",
- "cookie" : ""
+ "accept-language": "en-GB,en-US;q=0.9,en;q=0.8",
+ "cookie": ""
}
-
+
@staticmethod
def get_user():
password = f'0opsYouGoTme@1234'
- f_name = get_first_name()
- l_name = get_last_name()
- hosts = ['gmail.com', 'protonmail.com', 'proton.me', 'outlook.com']
-
+ f_name = get_first_name()
+ l_name = get_last_name()
+ hosts = ['gmail.com', 'protonmail.com', 'proton.me', 'outlook.com']
+
return {
- "email" : f"{f_name.lower()}.{l_name.lower()}@{choice(hosts)}",
- "password" : password,
- "confirm_password" : password,
- "full_name" : f'{f_name} {l_name}'
+ "email": f"{f_name.lower()}.{l_name.lower()}@{choice(hosts)}",
+ "password": password,
+ "confirm_password": password,
+ "full_name": f'{f_name} {l_name}'
}
@staticmethod
def create(logging: bool = False):
while True:
try:
- user = Account.get_user()
- start = time()
- response = Account.session.post("https://app.writesonic.com/api/session-login", json = user | {
- "utmParams" : "{}",
- "visitorId" : "0",
- "locale" : "en",
- "userAgent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
- "signInWith" : "password",
- "request_type" : "signup",
+ user = Account.get_user()
+ start = time()
+ response = Account.session.post("https://app.writesonic.com/api/session-login", json=user | {
+ "utmParams": "{}",
+ "visitorId": "0",
+ "locale": "en",
+ "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
+ "signInWith": "password",
+ "request_type": "signup",
})
-
+
if logging:
logger.info(f"\x1b[31mregister success\x1b[0m : '{response.text[:30]}...' ({int(time() - start)}s)")
logger.info(f"\x1b[31mid\x1b[0m : '{response.json()['id']}'")
logger.info(f"\x1b[31mtoken\x1b[0m : '{response.json()['token'][:30]}...'")
-
+
start = time()
- response = Account.session.post("https://api.writesonic.com/v1/business/set-business-active", headers={"authorization": "Bearer " + response.json()['token']})
+ response = Account.session.post("https://api.writesonic.com/v1/business/set-business-active",
+ headers={"authorization": "Bearer " + response.json()['token']})
key = response.json()["business"]["api_key"]
if logging: logger.info(f"\x1b[31mgot key\x1b[0m : '{key}' ({int(time() - start)}s)")
return Account.AccountResponse(user['email'], user['password'], key)
-
+
except Exception as e:
if logging: logger.info(f"\x1b[31merror\x1b[0m : '{e}'")
continue
-
+
class AccountResponse:
def __init__(self, email, password, key):
- self.email = email
+ self.email = email
self.password = password
- self.key = key
-
+ self.key = key
+
class Completion:
def create(
- api_key: str,
- prompt: str,
- enable_memory: bool = False,
- enable_google_results: bool = False,
- history_data: list = []) -> SonicResponse:
-
- response = post('https://api.writesonic.com/v2/business/content/chatsonic?engine=premium', headers = {"X-API-KEY": api_key},
- json = {
- "enable_memory" : enable_memory,
- "enable_google_results" : enable_google_results,
- "input_text" : prompt,
- "history_data" : history_data}).json()
+ api_key: str,
+ prompt: str,
+ enable_memory: bool = False,
+ enable_google_results: bool = False,
+ history_data: list = []) -> SonicResponse:
+ response = post('https://api.writesonic.com/v2/business/content/chatsonic?engine=premium',
+ headers={"X-API-KEY": api_key},
+ json={
+ "enable_memory": enable_memory,
+ "enable_google_results": enable_google_results,
+ "input_text": prompt,
+ "history_data": history_data}).json()
return SonicResponse({
- 'id' : f'cmpl-premium-{int(time())}',
- 'object' : 'text_completion',
- 'created': int(time()),
- 'model' : 'premium',
-
- 'choices': [{
- 'text' : response['message'],
- 'index' : 0,
- 'logprobs' : None,
- 'finish_reason' : 'stop'
- }],
-
- 'usage': {
- 'prompt_chars' : len(prompt),
- 'completion_chars' : len(response['message']),
- 'total_chars' : len(prompt) + len(response['message'])
- }
- }) \ No newline at end of file
+ 'id': f'cmpl-premium-{int(time())}',
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'model': 'premium',
+
+ 'choices': [{
+ 'text': response['message'],
+ 'index': 0,
+ 'logprobs': None,
+ 'finish_reason': 'stop'
+ }],
+
+ 'usage': {
+ 'prompt_chars': len(prompt),
+ 'completion_chars': len(response['message']),
+ 'total_chars': len(prompt) + len(response['message'])
+ }
+ })
diff --git a/you/__init__.py b/you/__init__.py
index 397600bd..6925e79f 100644
--- a/you/__init__.py
+++ b/you/__init__.py
@@ -9,19 +9,19 @@ from tls_client import Session
class Completion:
@staticmethod
def create(
- prompt: str,
- page: int = 1,
- count: int = 10,
- safe_search: str = 'Moderate',
- on_shopping_page: bool = False,
- mkt: str = '',
- response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
- domain: str = 'youchat',
- query_trace_id: str = None,
- chat: list = None,
- include_links: bool = False,
- detailed: bool = False,
- debug: bool = False,
+ prompt: str,
+ page: int = 1,
+ count: int = 10,
+ safe_search: str = 'Moderate',
+ on_shopping_page: bool = False,
+ mkt: str = '',
+ response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
+ domain: str = 'youchat',
+ query_trace_id: str = None,
+ chat: list = None,
+ include_links: bool = False,
+ detailed: bool = False,
+ debug: bool = False,
) -> dict:
if chat is None:
chat = []