From 5db58fd87f230fbe5bae599bb4b120ab42cad3be Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Sat, 24 Jun 2023 03:47:00 +0200 Subject: gpt4free v2, first release --- g4f/.v1/unfinished/bard/README.md | 2 + g4f/.v1/unfinished/bard/__init__.py | 93 ++++++++++++++++++++++ g4f/.v1/unfinished/bard/typings.py | 54 +++++++++++++ g4f/.v1/unfinished/bing/README.md | 2 + g4f/.v1/unfinished/bing/__ini__.py | 108 +++++++++++++++++++++++++ g4f/.v1/unfinished/chatpdf/__init__.py | 82 +++++++++++++++++++ g4f/.v1/unfinished/gptbz/README.md | 4 + g4f/.v1/unfinished/gptbz/__init__.py | 46 +++++++++++ g4f/.v1/unfinished/openprompt/README.md | 5 ++ g4f/.v1/unfinished/openprompt/create.py | 64 +++++++++++++++ g4f/.v1/unfinished/openprompt/mail.py | 111 ++++++++++++++++++++++++++ g4f/.v1/unfinished/openprompt/main.py | 36 +++++++++ g4f/.v1/unfinished/openprompt/test.py | 6 ++ g4f/.v1/unfinished/t3nsor/README.md | 44 +++++++++++ g4f/.v1/unfinished/t3nsor/__init__.py | 136 ++++++++++++++++++++++++++++++++ 15 files changed, 793 insertions(+) create mode 100644 g4f/.v1/unfinished/bard/README.md create mode 100644 g4f/.v1/unfinished/bard/__init__.py create mode 100644 g4f/.v1/unfinished/bard/typings.py create mode 100644 g4f/.v1/unfinished/bing/README.md create mode 100644 g4f/.v1/unfinished/bing/__ini__.py create mode 100644 g4f/.v1/unfinished/chatpdf/__init__.py create mode 100644 g4f/.v1/unfinished/gptbz/README.md create mode 100644 g4f/.v1/unfinished/gptbz/__init__.py create mode 100644 g4f/.v1/unfinished/openprompt/README.md create mode 100644 g4f/.v1/unfinished/openprompt/create.py create mode 100644 g4f/.v1/unfinished/openprompt/mail.py create mode 100644 g4f/.v1/unfinished/openprompt/main.py create mode 100644 g4f/.v1/unfinished/openprompt/test.py create mode 100644 g4f/.v1/unfinished/t3nsor/README.md create mode 100644 g4f/.v1/unfinished/t3nsor/__init__.py (limited to 'g4f/.v1/unfinished') diff --git a/g4f/.v1/unfinished/bard/README.md b/g4f/.v1/unfinished/bard/README.md new file mode 100644 index 00000000..67e8645c --- /dev/null +++ b/g4f/.v1/unfinished/bard/README.md @@ -0,0 +1,2 @@ +to do: +- code refractoring \ No newline at end of file diff --git a/g4f/.v1/unfinished/bard/__init__.py b/g4f/.v1/unfinished/bard/__init__.py new file mode 100644 index 00000000..f1d68b92 --- /dev/null +++ b/g4f/.v1/unfinished/bard/__init__.py @@ -0,0 +1,93 @@ +from json import dumps, loads +from os import getenv +from random import randint +from re import search +from urllib.parse import urlencode + +from bard.typings import BardResponse +from dotenv import load_dotenv +from requests import Session + +load_dotenv() +token = getenv('1psid') +proxy = getenv('proxy') + +temperatures = { + 0: "Generate text strictly following known patterns, with no creativity.", + 0.1: "Produce text adhering closely to established patterns, allowing minimal creativity.", + 0.2: "Create text with modest deviations from familiar patterns, injecting a slight creative touch.", + 0.3: "Craft text with a mild level of creativity, deviating somewhat from common patterns.", + 0.4: "Formulate text balancing creativity and recognizable patterns for coherent results.", + 0.5: "Generate text with a moderate level of creativity, allowing for a mix of familiarity and novelty.", + 0.6: "Compose text with an increased emphasis on creativity, while partially maintaining familiar patterns.", + 0.7: "Produce text favoring creativity over typical patterns for more original results.", + 0.8: "Create text heavily focused on creativity, with limited concern for familiar patterns.", + 0.9: "Craft text with a strong emphasis on unique and inventive ideas, largely ignoring established patterns.", + 1: "Generate text with maximum creativity, disregarding any constraints of known patterns or structures." +} + + +class Completion: + def create( + prompt: str = 'hello world', + temperature: int = None, + conversation_id: str = '', + response_id: str = '', + choice_id: str = '') -> BardResponse: + + if temperature: + prompt = f'''settings: follow these settings for your response: [temperature: {temperature} - {temperatures[temperature]}] | prompt : {prompt}''' + + client = Session() + client.proxies = { + 'http': f'http://{proxy}', + 'https': f'http://{proxy}'} if proxy else None + + client.headers = { + 'authority': 'bard.google.com', + 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8', + 'origin': 'https://bard.google.com', + 'referer': 'https://bard.google.com/', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + 'x-same-domain': '1', + 'cookie': f'__Secure-1PSID={token}' + } + + snlm0e = search(r'SNlM0e\":\"(.*?)\"', + client.get('https://bard.google.com/').text).group(1) + + params = urlencode({ + 'bl': 'boq_assistant-bard-web-server_20230326.21_p0', + '_reqid': randint(1111, 9999), + 'rt': 'c', + }) + + response = client.post( + f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}', + data={ + 'at': snlm0e, + 'f.req': dumps([None, dumps([ + [prompt], + None, + [conversation_id, response_id, choice_id], + ])]) + } + ) + + chat_data = loads(response.content.splitlines()[3])[0][2] + if not chat_data: + print('error, retrying') + Completion.create(prompt, temperature, + conversation_id, response_id, choice_id) + + json_chat_data = loads(chat_data) + results = { + 'content': json_chat_data[0][0], + 'conversation_id': json_chat_data[1][0], + 'response_id': json_chat_data[1][1], + 'factualityQueries': json_chat_data[3], + 'textQuery': json_chat_data[2][0] if json_chat_data[2] is not None else '', + 'choices': [{'id': i[0], 'content': i[1]} for i in json_chat_data[4]], + } + + return BardResponse(results) diff --git a/g4f/.v1/unfinished/bard/typings.py b/g4f/.v1/unfinished/bard/typings.py new file mode 100644 index 00000000..75b73bf9 --- /dev/null +++ b/g4f/.v1/unfinished/bard/typings.py @@ -0,0 +1,54 @@ +from typing import Dict, List, Union + + +class BardResponse: + def __init__(self, json_dict: Dict[str, Union[str, List]]) -> None: + """ + Initialize a BardResponse object. + + :param json_dict: A dictionary containing the JSON response data. + """ + self.json = json_dict + + self.content = json_dict.get('content') + self.conversation_id = json_dict.get('conversation_id') + self.response_id = json_dict.get('response_id') + self.factuality_queries = json_dict.get('factualityQueries', []) + self.text_query = json_dict.get('textQuery', []) + self.choices = [self.BardChoice(choice) + for choice in json_dict.get('choices', [])] + + def __repr__(self) -> str: + """ + Return a string representation of the BardResponse object. + + :return: A string representation of the BardResponse object. + """ + return f"BardResponse(conversation_id={self.conversation_id}, response_id={self.response_id}, content={self.content})" + + def filter_choices(self, keyword: str) -> List['BardChoice']: + """ + Filter the choices based on a keyword. + + :param keyword: The keyword to filter choices by. + :return: A list of filtered BardChoice objects. + """ + return [choice for choice in self.choices if keyword.lower() in choice.content.lower()] + + class BardChoice: + def __init__(self, choice_dict: Dict[str, str]) -> None: + """ + Initialize a BardChoice object. + + :param choice_dict: A dictionary containing the choice data. + """ + self.id = choice_dict.get('id') + self.content = choice_dict.get('content')[0] + + def __repr__(self) -> str: + """ + Return a string representation of the BardChoice object. + + :return: A string representation of the BardChoice object. + """ + return f"BardChoice(id={self.id}, content={self.content})" diff --git a/g4f/.v1/unfinished/bing/README.md b/g4f/.v1/unfinished/bing/README.md new file mode 100644 index 00000000..67e8645c --- /dev/null +++ b/g4f/.v1/unfinished/bing/README.md @@ -0,0 +1,2 @@ +to do: +- code refractoring \ No newline at end of file diff --git a/g4f/.v1/unfinished/bing/__ini__.py b/g4f/.v1/unfinished/bing/__ini__.py new file mode 100644 index 00000000..1e4fd149 --- /dev/null +++ b/g4f/.v1/unfinished/bing/__ini__.py @@ -0,0 +1,108 @@ +# Import necessary libraries +import asyncio +from json import dumps, loads +from ssl import create_default_context + +import websockets +from browser_cookie3 import edge +from certifi import where +from requests import get + +# Set up SSL context +ssl_context = create_default_context() +ssl_context.load_verify_locations(where()) + + +def format(msg: dict) -> str: + """Format message as JSON string with delimiter.""" + return dumps(msg) + '\x1e' + + +def get_token(): + """Retrieve token from browser cookies.""" + cookies = {c.name: c.value for c in edge(domain_name='bing.com')} + return cookies['_U'] + + +class AsyncCompletion: + async def create( + prompt: str = 'hello world', + optionSets: list = [ + 'deepleo', + 'enable_debug_commands', + 'disable_emoji_spoken_text', + 'enablemm', + 'h3relaxedimg' + ], + token: str = get_token()): + """Create a connection to Bing AI and send the prompt.""" + + # Send create request + create = get('https://edgeservices.bing.com/edgesvc/turing/conversation/create', + headers={ + 'host': 'edgeservices.bing.com', + 'authority': 'edgeservices.bing.com', + 'cookie': f'_U={token}', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', + } + ) + + # Extract conversation data + conversationId = create.json()['conversationId'] + clientId = create.json()['clientId'] + conversationSignature = create.json()['conversationSignature'] + + # Connect to WebSocket + wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size=None, ssl=ssl_context, + extra_headers={ + # Add necessary headers + } + ) + + # Send JSON protocol version + await wss.send(format({'protocol': 'json', 'version': 1})) + await wss.recv() + + # Define message structure + struct = { + # Add necessary message structure + } + + # Send message + await wss.send(format(struct)) + + # Process responses + base_string = '' + final = False + while not final: + objects = str(await wss.recv()).split('\x1e') + for obj in objects: + if obj is None or obj == '': + continue + + response = loads(obj) + if response.get('type') == 1 and response['arguments'][0].get('messages', ): + response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get( + 'text') + + yield (response_text.replace(base_string, '')) + base_string = response_text + + elif response.get('type') == 2: + final = True + + await wss.close() + + +async def run(): + """Run the async completion and print the result.""" + async for value in AsyncCompletion.create( + prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z', + optionSets=[ + "galileo", + ] + ): + print(value, end='', flush=True) + + +asyncio.run(run()) diff --git a/g4f/.v1/unfinished/chatpdf/__init__.py b/g4f/.v1/unfinished/chatpdf/__init__.py new file mode 100644 index 00000000..30dc1d3e --- /dev/null +++ b/g4f/.v1/unfinished/chatpdf/__init__.py @@ -0,0 +1,82 @@ +import requests +import json + +from queue import Queue, Empty +from threading import Thread +from json import loads +from re import findall + + +class Completion: + + def request(prompt: str): + '''TODO: some sort of authentication + upload PDF from URL or local file + Then you should get the atoken and chat ID + ''' + + token = "your_token_here" + chat_id = "your_chat_id_here" + + url = "https://chat-pr4yueoqha-ue.a.run.app/" + + payload = json.dumps({ + "v": 2, + "chatSession": { + "type": "join", + "chatId": chat_id + }, + "history": [ + { + "id": "VNsSyJIq_0", + "author": "p_if2GPSfyN8hjDoA7unYe", + "msg": "", + "time": 1682672009270 + }, + { + "id": "Zk8DRUtx_6", + "author": "uplaceholder", + "msg": prompt, + "time": 1682672181339 + } + ] + }) + + # TODO: fix headers, use random user-agent, streaming response, etc + headers = { + 'authority': 'chat-pr4yueoqha-ue.a.run.app', + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'atoken': token, + 'content-type': 'application/json', + 'origin': 'https://www.chatpdf.com', + 'referer': 'https://www.chatpdf.com/', + 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' + } + + response = requests.request( + "POST", url, headers=headers, data=payload).text + Completion.stream_completed = True + return {'response': response} + + @staticmethod + def create(prompt: str): + Thread(target=Completion.request, args=[prompt]).start() + + while Completion.stream_completed != True or not Completion.message_queue.empty(): + try: + message = Completion.message_queue.get(timeout=0.01) + for message in findall(Completion.regex, message): + yield loads(Completion.part1 + message + Completion.part2)['delta'] + + except Empty: + pass + + @staticmethod + def handle_stream_response(response): + Completion.message_queue.put(response.decode()) diff --git a/g4f/.v1/unfinished/gptbz/README.md b/g4f/.v1/unfinished/gptbz/README.md new file mode 100644 index 00000000..05bc2770 --- /dev/null +++ b/g4f/.v1/unfinished/gptbz/README.md @@ -0,0 +1,4 @@ +https://chat.gpt.bz + +to do: +- code refractoring \ No newline at end of file diff --git a/g4f/.v1/unfinished/gptbz/__init__.py b/g4f/.v1/unfinished/gptbz/__init__.py new file mode 100644 index 00000000..e95d5716 --- /dev/null +++ b/g4f/.v1/unfinished/gptbz/__init__.py @@ -0,0 +1,46 @@ +from json import dumps, loads + +import websockets + + +# Define the asynchronous function to test the WebSocket connection + + +async def test(): + # Establish a WebSocket connection with the specified URL + async with websockets.connect('wss://chatgpt.func.icu/conversation+ws') as wss: + + # Prepare the message payload as a JSON object + payload = { + 'content_type': 'text', + 'engine': 'chat-gpt', + 'parts': ['hello world'], + 'options': {} + } + + # Send the payload to the WebSocket server + await wss.send(dumps(obj=payload, separators=(',', ':'))) + + # Initialize a variable to track the end of the conversation + ended = None + + # Continuously receive and process messages until the conversation ends + while not ended: + try: + # Receive and parse the JSON response from the server + response = await wss.recv() + json_response = loads(response) + + # Print the entire JSON response + print(json_response) + + # Check for the end of the conversation + ended = json_response.get('eof') + + # If the conversation has not ended, print the received message + if not ended: + print(json_response['content']['parts'][0]) + + # Handle cases when the connection is closed by the server + except websockets.ConnectionClosed: + break diff --git a/g4f/.v1/unfinished/openprompt/README.md b/g4f/.v1/unfinished/openprompt/README.md new file mode 100644 index 00000000..489d054a --- /dev/null +++ b/g4f/.v1/unfinished/openprompt/README.md @@ -0,0 +1,5 @@ +https://openprompt.co/ + +to do: +- finish integrating email client +- code refractoring \ No newline at end of file diff --git a/g4f/.v1/unfinished/openprompt/create.py b/g4f/.v1/unfinished/openprompt/create.py new file mode 100644 index 00000000..c968c162 --- /dev/null +++ b/g4f/.v1/unfinished/openprompt/create.py @@ -0,0 +1,64 @@ +from json import dumps +# from mail import MailClient +from re import findall + +from requests import post, get + +html = get('https://developermail.com/mail/') +print(html.cookies.get('mailboxId')) +email = findall(r'mailto:(.*)">', html.text)[0] + +headers = { + 'apikey': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InVzanNtdWZ1emRjcnJjZXVobnlqIiwicm9sZSI6ImFub24iLCJpYXQiOjE2NzgyODYyMzYsImV4cCI6MTk5Mzg2MjIzNn0.2MQ9Lkh-gPqQwV08inIgqozfbYm5jdYWtf-rn-wfQ7U', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', + 'x-client-info': '@supabase/auth-helpers-nextjs@0.5.6', +} + +json_data = { + 'email': email, + 'password': 'T4xyt4Yn6WWQ4NC', + 'data': {}, + 'gotrue_meta_security': {}, +} + +response = post('https://usjsmufuzdcrrceuhnyj.supabase.co/auth/v1/signup', headers=headers, json=json_data) +print(response.json()) + +# email_link = None +# while not email_link: +# sleep(1) + +# mails = mailbox.getmails() +# print(mails) + + +quit() + +url = input("Enter the url: ") +response = get(url, allow_redirects=False) + +# https://openprompt.co/#access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8&expires_in=604800&refresh_token=_Zp8uXIA2InTDKYgo8TCqA&token_type=bearer&type=signup + +redirect = response.headers.get('location') +access_token = redirect.split('&')[0].split('=')[1] +refresh_token = redirect.split('&')[2].split('=')[1] + +supabase_auth_token = dumps([access_token, refresh_token, None, None, None], separators=(',', ':')) +print(supabase_auth_token) + +cookies = { + 'supabase-auth-token': supabase_auth_token +} + +json_data = { + 'messages': [ + { + 'role': 'user', + 'content': 'how do I reverse a string in python?' + } + ] +} + +response = post('https://openprompt.co/api/chat2', cookies=cookies, json=json_data, stream=True) +for chunk in response.iter_content(chunk_size=1024): + print(chunk) diff --git a/g4f/.v1/unfinished/openprompt/mail.py b/g4f/.v1/unfinished/openprompt/mail.py new file mode 100644 index 00000000..1130e7df --- /dev/null +++ b/g4f/.v1/unfinished/openprompt/mail.py @@ -0,0 +1,111 @@ +import email + +import requests + + +class MailClient: + + def __init__(self): + self.username = None + self.token = None + self.raw = None + self.mailids = None + self.mails = None + self.mail = None + + def create(self, force=False): + headers = { + 'accept': 'application/json', + } + + if self.username: + pass + else: + self.response = requests.put( + 'https://www.developermail.com/api/v1/mailbox', headers=headers) + self.response = self.response.json() + self.username = self.response['result']['name'] + self.token = self.response['result']['token'] + + return {'username': self.username, 'token': self.token} + + def destroy(self): + headers = { + 'accept': 'application/json', + 'X-MailboxToken': self.token, + } + self.response = requests.delete( + f'https://www.developermail.com/api/v1/mailbox/{self.username}', headers=headers) + self.response = self.response.json() + self.username = None + self.token = None + return self.response + + def newtoken(self): + headers = { + 'accept': 'application/json', + 'X-MailboxToken': self.token, + } + self.response = requests.put( + f'https://www.developermail.com/api/v1/mailbox/{self.username}/token', headers=headers) + self.response = self.response.json() + self.token = self.response['result']['token'] + return {'username': self.username, 'token': self.token} + + def getmailids(self): + headers = { + 'accept': 'application/json', + 'X-MailboxToken': self.token, + } + + self.response = requests.get( + f'https://www.developermail.com/api/v1/mailbox/{self.username}', headers=headers) + self.response = self.response.json() + self.mailids = self.response['result'] + return self.mailids + + def getmails(self, mailids: list = None): + headers = { + 'accept': 'application/json', + 'X-MailboxToken': self.token, + 'Content-Type': 'application/json', + } + + if mailids is None: + mailids = self.mailids + + data = str(mailids) + + self.response = requests.post( + f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages', headers=headers, data=data) + self.response = self.response.json() + self.mails = self.response['result'] + return self.mails + + def getmail(self, mailid: str, raw=False): + headers = { + 'accept': 'application/json', + 'X-MailboxToken': self.token, + } + self.response = requests.get( + f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages/{mailid}', headers=headers) + self.response = self.response.json() + self.mail = self.response['result'] + if raw is False: + self.mail = email.message_from_string(self.mail) + return self.mail + + def delmail(self, mailid: str): + headers = { + 'accept': 'application/json', + 'X-MailboxToken': self.token, + } + self.response = requests.delete( + f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages/{mailid}', headers=headers) + self.response = self.response.json() + return self.response + + +client = MailClient() +client.newtoken() +print(client.getmails()) diff --git a/g4f/.v1/unfinished/openprompt/main.py b/g4f/.v1/unfinished/openprompt/main.py new file mode 100644 index 00000000..e68a3b63 --- /dev/null +++ b/g4f/.v1/unfinished/openprompt/main.py @@ -0,0 +1,36 @@ +import requests + +cookies = { + 'supabase-auth-token': '["eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk1NzQyLCJzdWIiOiJlOGExOTdiNS03YTAxLTQ3MmEtODQ5My1mNGUzNTNjMzIwNWUiLCJlbWFpbCI6InFlY3RncHZhamlibGNjQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTA5NDJ9XSwic2Vzc2lvbl9pZCI6IjIwNTg5MmE5LWU5YTAtNDk2Yi1hN2FjLWEyMWVkMTkwZDA4NCJ9.o7UgHpiJMfa6W-UKCSCnAncIfeOeiHz-51sBmokg0MA","RtPKeb7KMMC9Dn2fZOfiHA",null,null,null]', +} + +headers = { + 'authority': 'openprompt.co', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'content-type': 'application/json', + # 'cookie': 'supabase-auth-token=%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjkzMjQ4LCJzdWIiOiJlODQwNTZkNC0xZWJhLTQwZDktOWU1Mi1jMTc4MTUwN2VmNzgiLCJlbWFpbCI6InNia2didGJnZHB2bHB0ZUBidWdmb28uY29tIiwicGhvbmUiOiIiLCJhcHBfbWV0YWRhdGEiOnsicHJvdmlkZXIiOiJlbWFpbCIsInByb3ZpZGVycyI6WyJlbWFpbCJdfSwidXNlcl9tZXRhZGF0YSI6e30sInJvbGUiOiJhdXRoZW50aWNhdGVkIiwiYWFsIjoiYWFsMSIsImFtciI6W3sibWV0aG9kIjoib3RwIiwidGltZXN0YW1wIjoxNjgxNjg4NDQ4fV0sInNlc3Npb25faWQiOiJiNDhlMmU3NS04NzlhLTQxZmEtYjQ4MS01OWY0OTgxMzg3YWQifQ.5-3E7WvMMVkXewD1qA26Rv4OFSTT82wYUBXNGcYaYfQ%22%2C%22u5TGGMMeT3zZA0agm5HGuA%22%2Cnull%2Cnull%2Cnull%5D', + 'origin': 'https://openprompt.co', + 'referer': 'https://openprompt.co/ChatGPT', + 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', +} + +json_data = { + 'messages': [ + { + 'role': 'user', + 'content': 'hello world', + }, + ], +} + +response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data, + stream=True) +for chunk in response.iter_content(chunk_size=1024): + print(chunk) diff --git a/g4f/.v1/unfinished/openprompt/test.py b/g4f/.v1/unfinished/openprompt/test.py new file mode 100644 index 00000000..65319cb6 --- /dev/null +++ b/g4f/.v1/unfinished/openprompt/test.py @@ -0,0 +1,6 @@ +access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV' +supabase_auth_token = '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D' + +idk = [ + "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8", + "_Zp8uXIA2InTDKYgo8TCqA", None, None, None] diff --git a/g4f/.v1/unfinished/t3nsor/README.md b/g4f/.v1/unfinished/t3nsor/README.md new file mode 100644 index 00000000..2790bf6e --- /dev/null +++ b/g4f/.v1/unfinished/t3nsor/README.md @@ -0,0 +1,44 @@ +### note: currently patched + +### Example: `t3nsor` (use like openai pypi package) + +```python +# Import t3nsor +import t3nsor + +# t3nsor.Completion.create +# t3nsor.StreamCompletion.create + +[...] + +``` + +#### Example Chatbot +```python +messages = [] + +while True: + user = input('you: ') + + t3nsor_cmpl = t3nsor.Completion.create( + prompt = user, + messages = messages + ) + + print('gpt:', t3nsor_cmpl.completion.choices[0].text) + + messages.extend([ + {'role': 'user', 'content': user }, + {'role': 'assistant', 'content': t3nsor_cmpl.completion.choices[0].text} + ]) +``` + +#### Streaming Response: + +```python +for response in t3nsor.StreamCompletion.create( + prompt = 'write python code to reverse a string', + messages = []): + + print(response.completion.choices[0].text) +``` diff --git a/g4f/.v1/unfinished/t3nsor/__init__.py b/g4f/.v1/unfinished/t3nsor/__init__.py new file mode 100644 index 00000000..9b588e98 --- /dev/null +++ b/g4f/.v1/unfinished/t3nsor/__init__.py @@ -0,0 +1,136 @@ +from time import time + +from requests import post + +headers = { + 'authority': 'www.t3nsor.tech', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://www.t3nsor.tech', + 'pragma': 'no-cache', + 'referer': 'https://www.t3nsor.tech/', + 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', +} + + +class T3nsorResponse: + class Completion: + class Choices: + def __init__(self, choice: dict) -> None: + self.text = choice['text'] + self.content = self.text.encode() + self.index = choice['index'] + self.logprobs = choice['logprobs'] + self.finish_reason = choice['finish_reason'] + + def __repr__(self) -> str: + return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>''' + + def __init__(self, choices: dict) -> None: + self.choices = [self.Choices(choice) for choice in choices] + + class Usage: + def __init__(self, usage_dict: dict) -> None: + self.prompt_tokens = usage_dict['prompt_chars'] + self.completion_tokens = usage_dict['completion_chars'] + self.total_tokens = usage_dict['total_chars'] + + def __repr__(self): + return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>''' + + def __init__(self, response_dict: dict) -> None: + self.response_dict = response_dict + self.id = response_dict['id'] + self.object = response_dict['object'] + self.created = response_dict['created'] + self.model = response_dict['model'] + self.completion = self.Completion(response_dict['choices']) + self.usage = self.Usage(response_dict['usage']) + + def json(self) -> dict: + return self.response_dict + + +class Completion: + model = { + 'model': { + 'id': 'gpt-3.5-turbo', + 'name': 'Default (GPT-3.5)' + } + } + + def create( + prompt: str = 'hello world', + messages: list = []) -> T3nsorResponse: + response = post('https://www.t3nsor.tech/api/chat', headers=headers, json=Completion.model | { + 'messages': messages, + 'key': '', + 'prompt': prompt + }) + + return T3nsorResponse({ + 'id': f'cmpl-1337-{int(time())}', + 'object': 'text_completion', + 'created': int(time()), + 'model': Completion.model, + 'choices': [{ + 'text': response.text, + 'index': 0, + 'logprobs': None, + 'finish_reason': 'stop' + }], + 'usage': { + 'prompt_chars': len(prompt), + 'completion_chars': len(response.text), + 'total_chars': len(prompt) + len(response.text) + } + }) + + +class StreamCompletion: + model = { + 'model': { + 'id': 'gpt-3.5-turbo', + 'name': 'Default (GPT-3.5)' + } + } + + def create( + prompt: str = 'hello world', + messages: list = []) -> T3nsorResponse: + print('t3nsor api is down, this may not work, refer to another module') + + response = post('https://www.t3nsor.tech/api/chat', headers=headers, stream=True, json=Completion.model | { + 'messages': messages, + 'key': '', + 'prompt': prompt + }) + + for chunk in response.iter_content(chunk_size=2046): + yield T3nsorResponse({ + 'id': f'cmpl-1337-{int(time())}', + 'object': 'text_completion', + 'created': int(time()), + 'model': Completion.model, + + 'choices': [{ + 'text': chunk.decode(), + 'index': 0, + 'logprobs': None, + 'finish_reason': 'stop' + }], + + 'usage': { + 'prompt_chars': len(prompt), + 'completion_chars': len(chunk.decode()), + 'total_chars': len(prompt) + len(chunk.decode()) + } + }) -- cgit v1.2.3