summaryrefslogtreecommitdiffstats
path: root/gpt4free
diff options
context:
space:
mode:
authort.me/xtekky <98614666+xtekky@users.noreply.github.com>2023-04-30 23:09:25 +0200
committert.me/xtekky <98614666+xtekky@users.noreply.github.com>2023-04-30 23:09:25 +0200
commitb4aadbbac588edff8902fa802024779f51a5ae39 (patch)
tree4985f76ccfe8ee4b2200c591d629f5ac8be3d96c /gpt4free
parentforefront takedown (diff)
downloadgpt4free-b4aadbbac588edff8902fa802024779f51a5ae39.tar
gpt4free-b4aadbbac588edff8902fa802024779f51a5ae39.tar.gz
gpt4free-b4aadbbac588edff8902fa802024779f51a5ae39.tar.bz2
gpt4free-b4aadbbac588edff8902fa802024779f51a5ae39.tar.lz
gpt4free-b4aadbbac588edff8902fa802024779f51a5ae39.tar.xz
gpt4free-b4aadbbac588edff8902fa802024779f51a5ae39.tar.zst
gpt4free-b4aadbbac588edff8902fa802024779f51a5ae39.zip
Diffstat (limited to 'gpt4free')
-rw-r--r--gpt4free/forefront/README.md13
-rw-r--r--gpt4free/forefront/__init__.py194
-rw-r--r--gpt4free/forefront/typing.py25
3 files changed, 232 insertions, 0 deletions
diff --git a/gpt4free/forefront/README.md b/gpt4free/forefront/README.md
new file mode 100644
index 00000000..35ba9897
--- /dev/null
+++ b/gpt4free/forefront/README.md
@@ -0,0 +1,13 @@
+### Example: `forefront` (use like openai pypi package) <a name="example-forefront"></a>
+
+```python
+from gpt4free import forefront
+# create an account
+token = forefront.Account.create(logging=False)
+print(token)
+# get a response
+for response in forefront.StreamingCompletion.create(token=token,
+ prompt='hello world', model='gpt-4'):
+ print(response.completion.choices[0].text, end='')
+print("")
+``` \ No newline at end of file
diff --git a/gpt4free/forefront/__init__.py b/gpt4free/forefront/__init__.py
new file mode 100644
index 00000000..969b33b8
--- /dev/null
+++ b/gpt4free/forefront/__init__.py
@@ -0,0 +1,194 @@
+from json import loads
+from re import findall
+from time import time, sleep
+from typing import Generator, Optional
+from uuid import uuid4
+
+from fake_useragent import UserAgent
+from requests import post
+from pymailtm import MailTm, Message
+from tls_client import Session
+
+from .typing import ForeFrontResponse
+
+
+class Account:
+ @staticmethod
+ def create(proxy: Optional[str] = None, logging: bool = False):
+ proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
+
+ start = time()
+
+ mail_client = MailTm().get_account()
+ mail_address = mail_client.address
+
+ client = Session(client_identifier='chrome110')
+ client.proxies = proxies
+ client.headers = {
+ 'origin': 'https://accounts.forefront.ai',
+ 'user-agent': UserAgent().random,
+ }
+
+ response = client.post(
+ 'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.38.4',
+ data={'email_address': mail_address},
+ )
+
+ try:
+ trace_token = response.json()['response']['id']
+ if logging:
+ print(trace_token)
+ except KeyError:
+ return 'Failed to create account!'
+
+ response = client.post(
+ f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.38.4',
+ data={
+ 'strategy': 'email_link',
+ 'redirect_url': 'https://accounts.forefront.ai/sign-up/verify'
+ },
+ )
+
+ if logging:
+ print(response.text)
+
+ if 'sign_up_attempt' not in response.text:
+ return 'Failed to create account!'
+
+ while True:
+ sleep(1)
+ new_message: Message = mail_client.wait_for_message()
+ if logging:
+ print(new_message.data['id'])
+
+ verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', new_message.text)[0]
+
+ if verification_url:
+ break
+
+ if logging:
+ print(verification_url)
+
+ response = client.get(verification_url)
+
+ response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4')
+
+ token = response.json()['response']['sessions'][0]['last_active_token']['jwt']
+
+ with open('accounts.txt', 'a') as f:
+ f.write(f'{mail_address}:{token}\n')
+
+ if logging:
+ print(time() - start)
+
+ return token
+
+
+class StreamingCompletion:
+ @staticmethod
+ def create(
+ token=None,
+ chat_id=None,
+ prompt='',
+ action_type='new',
+ default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
+ model='gpt-4',
+ proxy=None
+ ) -> Generator[ForeFrontResponse, None, None]:
+ if not token:
+ raise Exception('Token is required!')
+ if not chat_id:
+ chat_id = str(uuid4())
+
+ proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else None
+
+ headers = {
+ 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': 'Bearer ' + token,
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.forefront.ai',
+ 'pragma': 'no-cache',
+ 'referer': 'https://chat.forefront.ai/',
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': UserAgent().random,
+ }
+
+ json_data = {
+ 'text': prompt,
+ 'action': action_type,
+ 'parentId': chat_id,
+ 'workspaceId': chat_id,
+ 'messagePersona': default_persona,
+ 'model': model,
+ }
+
+ for chunk in post(
+ 'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
+ headers=headers,
+ proxies=proxies,
+ json=json_data,
+ stream=True,
+ ).iter_lines():
+ if b'finish_reason":null' in chunk:
+ data = loads(chunk.decode('utf-8').split('data: ')[1])
+ token = data['choices'][0]['delta'].get('content')
+
+ if token is not None:
+ yield ForeFrontResponse(
+ **{
+ 'id': chat_id,
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'text': token,
+ 'model': model,
+ 'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}],
+ 'usage': {
+ 'prompt_tokens': len(prompt),
+ 'completion_tokens': len(token),
+ 'total_tokens': len(prompt) + len(token),
+ },
+ }
+ )
+
+
+class Completion:
+ @staticmethod
+ def create(
+ token=None,
+ chat_id=None,
+ prompt='',
+ action_type='new',
+ default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
+ model='gpt-4',
+ proxy=None
+ ) -> ForeFrontResponse:
+ text = ''
+ final_response = None
+ for response in StreamingCompletion.create(
+ token=token,
+ chat_id=chat_id,
+ prompt=prompt,
+ action_type=action_type,
+ default_persona=default_persona,
+ model=model,
+ proxy=proxy
+ ):
+ if response:
+ final_response = response
+ text += response.text
+
+ if final_response:
+ final_response.text = text
+ else:
+ raise Exception('Unable to get the response, Please try again')
+
+ return final_response
+ \ No newline at end of file
diff --git a/gpt4free/forefront/typing.py b/gpt4free/forefront/typing.py
new file mode 100644
index 00000000..a9025419
--- /dev/null
+++ b/gpt4free/forefront/typing.py
@@ -0,0 +1,25 @@
+from typing import Any, List
+from pydantic import BaseModel
+
+
+class Choice(BaseModel):
+ text: str
+ index: int
+ logprobs: Any
+ finish_reason: str
+
+
+class Usage(BaseModel):
+ prompt_tokens: int
+ completion_tokens: int
+ total_tokens: int
+
+
+class ForeFrontResponse(BaseModel):
+ id: str
+ object: str
+ created: int
+ model: str
+ choices: List[Choice]
+ usage: Usage
+ text: str \ No newline at end of file