diff options
author | t.me/xtekky <98614666+xtekky@users.noreply.github.com> | 2023-04-29 00:54:35 +0200 |
---|---|---|
committer | t.me/xtekky <98614666+xtekky@users.noreply.github.com> | 2023-04-29 00:54:35 +0200 |
commit | dc912e0fc985c0d8b17bb29a27bb6ee62dfb7bc8 (patch) | |
tree | 28161f968e3dc2f824bcc3b4884439e34ae373c7 /unfinished/vercelai | |
parent | Merge pull request #241 from ethanx40/main (diff) | |
download | gpt4free-dc912e0fc985c0d8b17bb29a27bb6ee62dfb7bc8.tar gpt4free-dc912e0fc985c0d8b17bb29a27bb6ee62dfb7bc8.tar.gz gpt4free-dc912e0fc985c0d8b17bb29a27bb6ee62dfb7bc8.tar.bz2 gpt4free-dc912e0fc985c0d8b17bb29a27bb6ee62dfb7bc8.tar.lz gpt4free-dc912e0fc985c0d8b17bb29a27bb6ee62dfb7bc8.tar.xz gpt4free-dc912e0fc985c0d8b17bb29a27bb6ee62dfb7bc8.tar.zst gpt4free-dc912e0fc985c0d8b17bb29a27bb6ee62dfb7bc8.zip |
Diffstat (limited to 'unfinished/vercelai')
-rw-r--r-- | unfinished/vercelai/__init__.py | 41 | ||||
-rw-r--r-- | unfinished/vercelai/test.js | 33 | ||||
-rw-r--r-- | unfinished/vercelai/test.py | 67 | ||||
-rw-r--r-- | unfinished/vercelai/token.py | 0 | ||||
-rw-r--r-- | unfinished/vercelai/v2.py | 27 | ||||
-rw-r--r-- | unfinished/vercelai/vercelai_test.py | 5 |
6 files changed, 146 insertions, 27 deletions
diff --git a/unfinished/vercelai/__init__.py b/unfinished/vercelai/__init__.py new file mode 100644 index 00000000..1dcb5b39 --- /dev/null +++ b/unfinished/vercelai/__init__.py @@ -0,0 +1,41 @@ +import requests + +class Completion: + def create(prompt: str, + model: str = 'openai:gpt-3.5-turbo', + temperature: float = 0.7, + max_tokens: int = 200, + top_p: float = 1, + top_k: int = 1, + frequency_penalty: float = 1, + presence_penalty: float = 1, + stopSequences: list = []): + + token = requests.get('https://play.vercel.ai/openai.jpeg', headers={ + 'authority': 'play.vercel.ai', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'referer': 'https://play.vercel.ai/', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'}).text.replace('=','') + + print(token) + + headers = { + 'authority': 'play.vercel.ai', + 'custom-encoding': token, + 'origin': 'https://play.vercel.ai', + 'referer': 'https://play.vercel.ai/', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' + } + + for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream=True, json={ + 'prompt': prompt, + 'model': model, + 'temperature': temperature, + 'maxTokens': max_tokens, + 'topK': top_p, + 'topP': top_k, + 'frequencyPenalty': frequency_penalty, + 'presencePenalty': presence_penalty, + 'stopSequences': stopSequences}).iter_lines(): + + yield (chunk)
\ No newline at end of file diff --git a/unfinished/vercelai/test.js b/unfinished/vercelai/test.js new file mode 100644 index 00000000..0f822cfd --- /dev/null +++ b/unfinished/vercelai/test.js @@ -0,0 +1,33 @@ +(async () => { + + let response = await fetch("https://play.vercel.ai/openai.jpeg", { + "headers": { + "accept": "*/*", + "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "sec-ch-ua": "\"Chromium\";v=\"112\", \"Google Chrome\";v=\"112\", \"Not:A-Brand\";v=\"99\"", + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": "\"macOS\"", + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin" + }, + "referrer": "https://play.vercel.ai/", + "referrerPolicy": "strict-origin-when-cross-origin", + "body": null, + "method": "GET", + "mode": "cors", + "credentials": "omit" + }); + + + let data = JSON.parse(atob(await response.text())) + let ret = eval("(".concat(data.c, ")(data.a)")); + + botPreventionToken = btoa(JSON.stringify({ + r: ret, + t: data.t + })) + + console.log(botPreventionToken); + +})()
\ No newline at end of file diff --git a/unfinished/vercelai/test.py b/unfinished/vercelai/test.py new file mode 100644 index 00000000..318e71c3 --- /dev/null +++ b/unfinished/vercelai/test.py @@ -0,0 +1,67 @@ +import requests +from base64 import b64decode, b64encode +from json import loads +from json import dumps + +headers = { + 'Accept': '*/*', + 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8', + 'Connection': 'keep-alive', + 'Referer': 'https://play.vercel.ai/', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36', + 'sec-ch-ua': '"Chromium";v="110", "Google Chrome";v="110", "Not:A-Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', +} + +response = requests.get('https://play.vercel.ai/openai.jpeg', headers=headers) + +token_data = loads(b64decode(response.text)) +print(token_data) + +raw_token = { + 'a': token_data['a'] * .1 * .2, + 't': token_data['t'] +} + +print(raw_token) + +new_token = b64encode(dumps(raw_token, separators=(',', ':')).encode()).decode() +print(new_token) + +import requests + +headers = { + 'authority': 'play.vercel.ai', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'content-type': 'application/json', + 'custom-encoding': new_token, + 'origin': 'https://play.vercel.ai', + 'referer': 'https://play.vercel.ai/', + 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', +} + +json_data = { + 'prompt': 'hello\n', + 'model': 'openai:gpt-3.5-turbo', + 'temperature': 0.7, + 'maxTokens': 200, + 'topK': 1, + 'topP': 1, + 'frequencyPenalty': 1, + 'presencePenalty': 1, + 'stopSequences': [], +} + +response = requests.post('https://play.vercel.ai/api/generate', headers=headers, json=json_data) +print(response.text)
\ No newline at end of file diff --git a/unfinished/vercelai/token.py b/unfinished/vercelai/token.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/unfinished/vercelai/token.py diff --git a/unfinished/vercelai/v2.py b/unfinished/vercelai/v2.py deleted file mode 100644 index 176ee342..00000000 --- a/unfinished/vercelai/v2.py +++ /dev/null @@ -1,27 +0,0 @@ -import requests - -token = requests.get('https://play.vercel.ai/openai.jpeg', headers={ - 'authority': 'play.vercel.ai', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'referer': 'https://play.vercel.ai/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'}).text + '.' - -headers = { - 'authority': 'play.vercel.ai', - 'custom-encoding': token, - 'origin': 'https://play.vercel.ai', - 'referer': 'https://play.vercel.ai/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' -} - -for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream=True, json={ - 'prompt': 'hi', - 'model': 'openai:gpt-3.5-turbo', - 'temperature': 0.7, - 'maxTokens': 200, - 'topK': 1, - 'topP': 1, - 'frequencyPenalty': 1, - 'presencePenalty': 1, - 'stopSequences': []}).iter_lines(): - print(chunk) diff --git a/unfinished/vercelai/vercelai_test.py b/unfinished/vercelai/vercelai_test.py new file mode 100644 index 00000000..24cbe0bc --- /dev/null +++ b/unfinished/vercelai/vercelai_test.py @@ -0,0 +1,5 @@ +import vercelai + +for token in vercelai.Completion.create('summarize the gnu gpl 1.0'): + print(token, end='', flush=True) + |