summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authort.me/xtekky <98614666+xtekky@users.noreply.github.com>2023-06-24 03:47:00 +0200
committert.me/xtekky <98614666+xtekky@users.noreply.github.com>2023-06-24 03:47:00 +0200
commit5db58fd87f230fbe5bae599bb4b120ab42cad3be (patch)
tree770be13bca77c5d04dfe3265f378431df788706f /g4f/Provider
parentMerge pull request #664 from LopeKinz/main (diff)
downloadgpt4free-5db58fd87f230fbe5bae599bb4b120ab42cad3be.tar
gpt4free-5db58fd87f230fbe5bae599bb4b120ab42cad3be.tar.gz
gpt4free-5db58fd87f230fbe5bae599bb4b120ab42cad3be.tar.bz2
gpt4free-5db58fd87f230fbe5bae599bb4b120ab42cad3be.tar.lz
gpt4free-5db58fd87f230fbe5bae599bb4b120ab42cad3be.tar.xz
gpt4free-5db58fd87f230fbe5bae599bb4b120ab42cad3be.tar.zst
gpt4free-5db58fd87f230fbe5bae599bb4b120ab42cad3be.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Provider.py15
-rw-r--r--g4f/Provider/Providers/Aichat.py44
-rw-r--r--g4f/Provider/Providers/Ails.py91
-rw-r--r--g4f/Provider/Providers/Bard.py74
-rw-r--r--g4f/Provider/Providers/Bing.py350
-rw-r--r--g4f/Provider/Providers/ChatgptAi.py51
-rw-r--r--g4f/Provider/Providers/ChatgptLogin.py96
-rw-r--r--g4f/Provider/Providers/DeepAi.py46
-rw-r--r--g4f/Provider/Providers/Forefront.py30
-rw-r--r--g4f/Provider/Providers/GetGpt.py57
-rw-r--r--g4f/Provider/Providers/H2o.py106
-rw-r--r--g4f/Provider/Providers/Liaobots.py52
-rw-r--r--g4f/Provider/Providers/Lockchat.py32
-rw-r--r--g4f/Provider/Providers/Theb.py28
-rw-r--r--g4f/Provider/Providers/Vercel.py162
-rw-r--r--g4f/Provider/Providers/You.py24
-rw-r--r--g4f/Provider/Providers/Yqcloud.py37
-rw-r--r--g4f/Provider/Providers/helpers/theb.py48
-rw-r--r--g4f/Provider/Providers/helpers/you.py79
-rw-r--r--g4f/Provider/__init__.py20
20 files changed, 1442 insertions, 0 deletions
diff --git a/g4f/Provider/Provider.py b/g4f/Provider/Provider.py
new file mode 100644
index 00000000..12c23333
--- /dev/null
+++ b/g4f/Provider/Provider.py
@@ -0,0 +1,15 @@
+import os
+from ..typing import sha256, Dict, get_type_hints
+
+url = None
+model = None
+supports_stream = False
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ return
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/Aichat.py b/g4f/Provider/Providers/Aichat.py
new file mode 100644
index 00000000..e4fde8c3
--- /dev/null
+++ b/g4f/Provider/Providers/Aichat.py
@@ -0,0 +1,44 @@
+import os, requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://chat-gpt.org/chat'
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+
+
+ headers = {
+ 'authority': 'chat-gpt.org',
+ 'accept': '*/*',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat-gpt.org',
+ 'pragma': 'no-cache',
+ 'referer': 'https://chat-gpt.org/chat',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
+ }
+
+ json_data = {
+ 'message':base,
+ 'temperature': 1,
+ 'presence_penalty': 0,
+ 'top_p': 1,
+ 'frequency_penalty': 0
+ }
+
+ response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data)
+ yield response.json()['message']
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Ails.py b/g4f/Provider/Providers/Ails.py
new file mode 100644
index 00000000..1a14b2e9
--- /dev/null
+++ b/g4f/Provider/Providers/Ails.py
@@ -0,0 +1,91 @@
+import os
+import time
+import json
+import uuid
+import random
+import hashlib
+import requests
+
+from ...typing import sha256, Dict, get_type_hints
+from datetime import datetime
+
+url: str = 'https://ai.ls'
+model: str = 'gpt-3.5-turbo'
+supports_stream = True
+needs_auth = False
+
+class Utils:
+ def hash(json_data: Dict[str, str]) -> sha256:
+
+ secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83,
+ 35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76])
+
+ base_string: str = '%s:%s:%s:%s' % (
+ json_data['t'],
+ json_data['m'],
+ 'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf',
+ len(json_data['m'])
+ )
+
+ return hashlib.sha256(base_string.encode()).hexdigest()
+
+ def format_timestamp(timestamp: int) -> str:
+
+ e = timestamp
+ n = e % 10
+ r = n + 1 if n % 2 == 0 else n
+ return str(e - n + r)
+
+
+def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs):
+
+ headers = {
+ 'authority': 'api.caipacity.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': 'Bearer free',
+ 'client-id': str(uuid.uuid4()),
+ 'client-v': '0.1.217',
+ 'content-type': 'application/json',
+ 'origin': 'https://ai.ls',
+ 'referer': 'https://ai.ls/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ }
+
+ params = {
+ 'full': 'false',
+ }
+
+ timestamp = Utils.format_timestamp(int(time.time() * 1000))
+
+ sig = {
+ 'd': datetime.now().strftime('%Y-%m-%d'),
+ 't': timestamp,
+ 's': Utils.hash({
+ 't': timestamp,
+ 'm': messages[-1]['content']})}
+
+ json_data = json.dumps(separators=(',', ':'), obj={
+ 'model': 'gpt-3.5-turbo',
+ 'temperature': 0.6,
+ 'stream': True,
+ 'messages': messages} | sig)
+
+ response = requests.post('https://api.caipacity.com/v1/chat/completions',
+ headers=headers, data=json_data, stream=True)
+
+ for token in response.iter_lines():
+ if b'content' in token:
+ completion_chunk = json.loads(token.decode().replace('data: ', ''))
+ token = completion_chunk['choices'][0]['delta'].get('content')
+ if token != None:
+ yield token
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Bard.py b/g4f/Provider/Providers/Bard.py
new file mode 100644
index 00000000..4c37c4b7
--- /dev/null
+++ b/g4f/Provider/Providers/Bard.py
@@ -0,0 +1,74 @@
+import os, requests, json, browser_cookie3, re, random
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://bard.google.com'
+model = ['Palm2']
+supports_stream = False
+needs_auth = True
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
+ domain_name='.google.com')}['__Secure-1PSID']
+
+ formatted = '\n'.join([
+ '%s: %s' % (message['role'], message['content']) for message in messages
+ ])
+ prompt = f'{formatted}\nAssistant:'
+
+ proxy = kwargs.get('proxy', False)
+ if proxy == False:
+ print('warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work')
+
+ snlm0e = None
+ conversation_id = None
+ response_id = None
+ choice_id = None
+
+ client = requests.Session()
+ client.proxies = {
+ 'http': f'http://{proxy}',
+ 'https': f'http://{proxy}'} if proxy else None
+
+ client.headers = {
+ 'authority': 'bard.google.com',
+ 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
+ 'origin': 'https://bard.google.com',
+ 'referer': 'https://bard.google.com/',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
+ 'x-same-domain': '1',
+ 'cookie': f'__Secure-1PSID={psid}'
+ }
+
+ snlm0e = re.search(r'SNlM0e\":\"(.*?)\"',
+ client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e
+
+ params = {
+ 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
+ '_reqid': random.randint(1111, 9999),
+ 'rt': 'c'
+ }
+
+ data = {
+ 'at': snlm0e,
+ 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
+
+ intents = '.'.join([
+ 'assistant',
+ 'lamda',
+ 'BardFrontendService'
+ ])
+
+ response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
+ data=data, params=params)
+
+ chat_data = json.loads(response.content.splitlines()[3])[0][2]
+ if chat_data:
+ json_chat_data = json.loads(chat_data)
+
+ yield json_chat_data[0][0]
+
+ else:
+ yield 'error'
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Bing.py b/g4f/Provider/Providers/Bing.py
new file mode 100644
index 00000000..1d33cda5
--- /dev/null
+++ b/g4f/Provider/Providers/Bing.py
@@ -0,0 +1,350 @@
+import os
+import json
+import random
+import json
+import os
+import uuid
+import ssl
+import certifi
+import aiohttp
+import asyncio
+
+import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://bing.com/chat'
+model = ['gpt-4']
+supports_stream = True
+needs_auth = False
+
+ssl_context = ssl.create_default_context()
+ssl_context.load_verify_locations(certifi.where())
+
+
+class optionsSets:
+ optionSet: dict = {
+ 'tone': str,
+ 'optionsSets': list
+ }
+
+ jailbreak: dict = {
+ "optionsSets": [
+ 'saharasugg',
+ 'enablenewsfc',
+ 'clgalileo',
+ 'gencontentv3',
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise"
+ # "harmonyv3",
+ "dtappid",
+ "cricinfo",
+ "cricinfov2",
+ "dv3sugg",
+ "nojbfedge"
+ ]
+ }
+
+
+class Defaults:
+ delimiter = '\x1e'
+ ip_address = f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+
+ allowedMessageTypes = [
+ 'Chat',
+ 'Disengaged',
+ 'AdsQuery',
+ 'SemanticSerp',
+ 'GenerateContentQuery',
+ 'SearchQuery',
+ 'ActionRequest',
+ 'Context',
+ 'Progress',
+ 'AdsQuery',
+ 'SemanticSerp'
+ ]
+
+ sliceIds = [
+
+ # "222dtappid",
+ # "225cricinfo",
+ # "224locals0"
+
+ 'winmuid3tf',
+ 'osbsdusgreccf',
+ 'ttstmout',
+ 'crchatrev',
+ 'winlongmsgtf',
+ 'ctrlworkpay',
+ 'norespwtf',
+ 'tempcacheread',
+ 'temptacache',
+ '505scss0',
+ '508jbcars0',
+ '515enbotdets0',
+ '5082tsports',
+ '515vaoprvs',
+ '424dagslnv1s0',
+ 'kcimgattcf',
+ '427startpms0'
+ ]
+
+ location = {
+ 'locale': 'en-US',
+ 'market': 'en-US',
+ 'region': 'US',
+ 'locationHints': [
+ {
+ 'country': 'United States',
+ 'state': 'California',
+ 'city': 'Los Angeles',
+ 'timezoneoffset': 8,
+ 'countryConfidence': 8,
+ 'Center': {
+ 'Latitude': 34.0536909,
+ 'Longitude': -118.242766
+ },
+ 'RegionType': 2,
+ 'SourceType': 1
+ }
+ ],
+ }
+
+
+def _format(msg: dict) -> str:
+ return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
+
+
+async def create_conversation():
+ for _ in range(5):
+ create = requests.get('https://www.bing.com/turing/conversation/create',
+ headers={
+ 'authority': 'edgeservices.bing.com',
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'max-age=0',
+ 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"110.0.1587.69"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
+ 'x-edge-shopping-flag': '1',
+ 'x-forwarded-for': Defaults.ip_address
+ })
+
+ conversationId = create.json().get('conversationId')
+ clientId = create.json().get('clientId')
+ conversationSignature = create.json().get('conversationSignature')
+
+ if not conversationId or not clientId or not conversationSignature and _ == 4:
+ raise Exception('Failed to create conversation.')
+
+ return conversationId, clientId, conversationSignature
+
+
+async def stream_generate(prompt: str, mode: optionsSets.optionSet = optionsSets.jailbreak, context: bool or str = False):
+ timeout = aiohttp.ClientTimeout(total=900)
+ session = aiohttp.ClientSession(timeout=timeout)
+
+ conversationId, clientId, conversationSignature = await create_conversation()
+
+ wss = await session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', ssl=ssl_context, autoping=False,
+ headers={
+ 'accept': 'application/json',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'sec-ch-ua': '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"109.0.1518.78"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'x-ms-client-request-id': str(uuid.uuid4()),
+ 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
+ 'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx',
+ 'Referrer-Policy': 'origin-when-cross-origin',
+ 'x-forwarded-for': Defaults.ip_address
+ })
+
+ await wss.send_str(_format({'protocol': 'json', 'version': 1}))
+ await wss.receive(timeout=900)
+
+ struct = {
+ 'arguments': [
+ {
+ **mode,
+ 'source': 'cib',
+ 'allowedMessageTypes': Defaults.allowedMessageTypes,
+ 'sliceIds': Defaults.sliceIds,
+ 'traceId': os.urandom(16).hex(),
+ 'isStartOfSession': True,
+ 'message': Defaults.location | {
+ 'author': 'user',
+ 'inputMethod': 'Keyboard',
+ 'text': prompt,
+ 'messageType': 'Chat'
+ },
+ 'conversationSignature': conversationSignature,
+ 'participant': {
+ 'id': clientId
+ },
+ 'conversationId': conversationId
+ }
+ ],
+ 'invocationId': '0',
+ 'target': 'chat',
+ 'type': 4
+ }
+
+ if context:
+ struct['arguments'][0]['previousMessages'] = [
+ {
+ "author": "user",
+ "description": context,
+ "contextType": "WebPage",
+ "messageType": "Context",
+ "messageId": "discover-web--page-ping-mriduna-----"
+ }
+ ]
+
+ await wss.send_str(_format(struct))
+
+ final = False
+ draw = False
+ resp_txt = ''
+ result_text = ''
+ resp_txt_no_link = ''
+ cache_text = ''
+
+ while not final:
+ msg = await wss.receive(timeout=900)
+ objects = msg.data.split(Defaults.delimiter)
+
+ for obj in objects:
+ if obj is None or not obj:
+ continue
+
+ response = json.loads(obj)
+ if response.get('type') == 1 and response['arguments'][0].get('messages',):
+ if not draw:
+ if (response['arguments'][0]['messages'][0]['contentOrigin'] != 'Apology') and not draw:
+ resp_txt = result_text + \
+ response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
+ 'text', '')
+ resp_txt_no_link = result_text + \
+ response['arguments'][0]['messages'][0].get(
+ 'text', '')
+
+ if response['arguments'][0]['messages'][0].get('messageType',):
+ resp_txt = (
+ resp_txt
+ + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
+ + '\n'
+ )
+ result_text = (
+ result_text
+ + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
+ + '\n'
+ )
+
+ if cache_text.endswith(' '):
+ final = True
+ if wss and not wss.closed:
+ await wss.close()
+ if session and not session.closed:
+ await session.close()
+
+ yield (resp_txt.replace(cache_text, ''))
+ cache_text = resp_txt
+
+ elif response.get('type') == 2:
+ if response['item']['result'].get('error'):
+ if wss and not wss.closed:
+ await wss.close()
+ if session and not session.closed:
+ await session.close()
+
+ raise Exception(
+ f"{response['item']['result']['value']}: {response['item']['result']['message']}")
+
+ if draw:
+ cache = response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text']
+ response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text'] = (
+ cache + resp_txt)
+
+ if (response['item']['messages'][-1]['contentOrigin'] == 'Apology' and resp_txt):
+ response['item']['messages'][-1]['text'] = resp_txt_no_link
+ response['item']['messages'][-1]['adaptiveCards'][0]['body'][0]['text'] = resp_txt
+
+ # print('Preserved the message from being deleted', file=sys.stderr)
+
+ final = True
+ if wss and not wss.closed:
+ await wss.close()
+ if session and not session.closed:
+ await session.close()
+
+
+def run(generator):
+ loop = asyncio.get_event_loop()
+ gen = generator.__aiter__()
+
+ while True:
+ try:
+ next_val = loop.run_until_complete(gen.__anext__())
+ yield next_val
+
+ except StopAsyncIteration:
+ break
+
+ #print('Done')
+
+
+def convert(messages):
+ context = ""
+
+ for message in messages:
+ context += "[%s](#message)\n%s\n\n" % (message['role'],
+ message['content'])
+
+ return context
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ if len(messages) < 2:
+ prompt = messages[0]['content']
+ context = False
+
+ else:
+ prompt = messages[-1]['content']
+ context = convert(messages[:-1])
+
+ response = run(stream_generate(prompt, optionsSets.jailbreak, context))
+ for token in response:
+ yield (token)
+
+ #print('Done')
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/ChatgptAi.py b/g4f/Provider/Providers/ChatgptAi.py
new file mode 100644
index 00000000..00d4cf6f
--- /dev/null
+++ b/g4f/Provider/Providers/ChatgptAi.py
@@ -0,0 +1,51 @@
+import os
+import requests, re
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://chatgpt.ai/gpt-4/'
+model = ['gpt-4']
+supports_stream = False
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ chat = ''
+ for message in messages:
+ chat += '%s: %s\n' % (message['role'], message['content'])
+ chat += 'assistant: '
+
+ response = requests.get('https://chatgpt.ai/gpt-4/')
+
+ nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
+
+ headers = {
+ 'authority': 'chatgpt.ai',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control': 'no-cache',
+ 'origin': 'https://chatgpt.ai',
+ 'pragma': 'no-cache',
+ 'referer': 'https://chatgpt.ai/gpt-4/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ }
+ data = {
+ '_wpnonce': nonce,
+ 'post_id': post_id,
+ 'url': 'https://chatgpt.ai/gpt-4',
+ 'action': 'wpaicg_chat_shortcode_message',
+ 'message': chat,
+ 'bot_id': bot_id
+ }
+
+ response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php',
+ headers=headers, data=data)
+
+ yield (response.json()['data'])
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/ChatgptLogin.py b/g4f/Provider/Providers/ChatgptLogin.py
new file mode 100644
index 00000000..9551d15d
--- /dev/null
+++ b/g4f/Provider/Providers/ChatgptLogin.py
@@ -0,0 +1,96 @@
+import os
+from ...typing import sha256, Dict, get_type_hints
+import requests
+import re
+import base64
+
+url = 'https://chatgptlogin.ac'
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ def get_nonce():
+ res = requests.get('https://chatgptlogin.ac/use-chatgpt-free/', headers={
+ "Referer": "https://chatgptlogin.ac/use-chatgpt-free/",
+ "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+ })
+
+ src = re.search(r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">', res.text).group(1)
+ decoded_string = base64.b64decode(src.split(",")[-1]).decode('utf-8')
+ return re.search(r"let restNonce = '(.*?)';", decoded_string).group(1)
+
+ def transform(messages: list) -> list:
+ def html_encode(string: str) -> str:
+ table = {
+ '"': '&quot;',
+ "'": '&#39;',
+ '&': '&amp;',
+ '>': '&gt;',
+ '<': '&lt;',
+ '\n': '<br>',
+ '\t': '&nbsp;&nbsp;&nbsp;&nbsp;',
+ ' ': '&nbsp;'
+ }
+
+ for key in table:
+ string = string.replace(key, table[key])
+
+ return string
+
+ return [{
+ 'id': os.urandom(6).hex(),
+ 'role': message['role'],
+ 'content': message['content'],
+ 'who': 'AI: ' if message['role'] == 'assistant' else 'User: ',
+ 'html': html_encode(message['content'])} for message in messages]
+
+ headers = {
+ 'authority': 'chatgptlogin.ac',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://chatgptlogin.ac',
+ 'referer': 'https://chatgptlogin.ac/use-chatgpt-free/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ 'x-wp-nonce': get_nonce()
+ }
+
+ conversation = transform(messages)
+
+ json_data = {
+ 'env': 'chatbot',
+ 'session': 'N/A',
+ 'prompt': 'Converse as if you were an AI assistant. Be friendly, creative.',
+ 'context': 'Converse as if you were an AI assistant. Be friendly, creative.',
+ 'messages': conversation,
+ 'newMessage': messages[-1]['content'],
+ 'userName': '<div class="mwai-name-text">User:</div>',
+ 'aiName': '<div class="mwai-name-text">AI:</div>',
+ 'model': 'gpt-3.5-turbo',
+ 'temperature': 0.8,
+ 'maxTokens': 1024,
+ 'maxResults': 1,
+ 'apiKey': '',
+ 'service': 'openai',
+ 'embeddingsIndex': '',
+ 'stop': '',
+ 'clientId': os.urandom(6).hex()
+ }
+
+ response = requests.post('https://chatgptlogin.ac/wp-json/ai-chatbot/v1/chat',
+ headers=headers, json=json_data)
+
+ return response.json()['reply']
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/DeepAi.py b/g4f/Provider/Providers/DeepAi.py
new file mode 100644
index 00000000..02b08120
--- /dev/null
+++ b/g4f/Provider/Providers/DeepAi.py
@@ -0,0 +1,46 @@
+import os
+import json
+import random
+import hashlib
+import requests
+
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://deepai.org'
+model = ['gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ def md5(text: str) -> str:
+ return hashlib.md5(text.encode()).hexdigest()[::-1]
+
+
+ def get_api_key(user_agent: str) -> str:
+ part1 = str(random.randint(0, 10**11))
+ part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
+
+ return f"tryit-{part1}-{part2}"
+
+ user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+
+ headers = {
+ "api-key": get_api_key(user_agent),
+ "user-agent": user_agent
+ }
+
+ files = {
+ "chat_style": (None, "chat"),
+ "chatHistory": (None, json.dumps(messages))
+ }
+
+ r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
+
+ for chunk in r.iter_content(chunk_size=None):
+ r.raise_for_status()
+ yield chunk.decode()
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/Forefront.py b/g4f/Provider/Providers/Forefront.py
new file mode 100644
index 00000000..e7e89831
--- /dev/null
+++ b/g4f/Provider/Providers/Forefront.py
@@ -0,0 +1,30 @@
+import os
+import json
+import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://forefront.com'
+model = ['gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ json_data = {
+ 'text': messages[-1]['content'],
+ 'action': 'noauth',
+ 'id': '',
+ 'parentId': '',
+ 'workspaceId': '',
+ 'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
+ 'model': 'gpt-4',
+ 'messages': messages[:-1] if len(messages) > 1 else [],
+ 'internetMode': 'auto'
+ }
+ response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
+ json=json_data, stream=True)
+ for token in response.iter_lines():
+ if b'delta' in token:
+ token = json.loads(token.decode().split('data: ')[1])['delta']
+ yield (token)
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/GetGpt.py b/g4f/Provider/Providers/GetGpt.py
new file mode 100644
index 00000000..56a121f6
--- /dev/null
+++ b/g4f/Provider/Providers/GetGpt.py
@@ -0,0 +1,57 @@
+import os
+import json
+import uuid
+import requests
+from Crypto.Cipher import AES
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://chat.getgpt.world/'
+model = ['gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ def encrypt(e):
+ t = os.urandom(8).hex().encode('utf-8')
+ n = os.urandom(8).hex().encode('utf-8')
+ r = e.encode('utf-8')
+ cipher = AES.new(t, AES.MODE_CBC, n)
+ ciphertext = cipher.encrypt(pad_data(r))
+ return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
+
+ def pad_data(data: bytes) -> bytes:
+ block_size = AES.block_size
+ padding_size = block_size - len(data) % block_size
+ padding = bytes([padding_size] * padding_size)
+ return data + padding
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Referer': 'https://chat.getgpt.world/',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+ }
+
+ data = json.dumps({
+ 'messages': messages,
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'max_tokens': kwargs.get('max_tokens', 4000),
+ 'model': 'gpt-3.5-turbo',
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'temperature': kwargs.get('temperature', 1),
+ 'top_p': kwargs.get('top_p', 1),
+ 'stream': True,
+ 'uuid': str(uuid.uuid4())
+ })
+
+ res = requests.post('https://chat.getgpt.world/api/chat/stream',
+ headers=headers, json={'signature': encrypt(data)}, stream=True)
+
+ for line in res.iter_lines():
+ if b'content' in line:
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ yield (line_json['choices'][0]['delta']['content'])
+
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join(
+ [f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/H2o.py b/g4f/Provider/Providers/H2o.py
new file mode 100644
index 00000000..eabf94e2
--- /dev/null
+++ b/g4f/Provider/Providers/H2o.py
@@ -0,0 +1,106 @@
+from requests import Session
+from uuid import uuid4
+from json import loads
+import os
+import json
+import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://gpt-gm.h2o.ai'
+model = ['falcon-40b', 'falcon-7b', 'llama-13b']
+supports_stream = True
+needs_auth = False
+
+models = {
+ 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
+ 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
+ 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
+}
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n'
+ for message in messages:
+ conversation += '%s: %s\n' % (message['role'], message['content'])
+ conversation += 'assistant:'
+
+ client = Session()
+ client.headers = {
+ 'authority': 'gpt-gm.h2o.ai',
+ 'origin': 'https://gpt-gm.h2o.ai',
+ 'referer': 'https://gpt-gm.h2o.ai/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'same-origin',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ }
+
+ client.get('https://gpt-gm.h2o.ai/')
+ response = client.post('https://gpt-gm.h2o.ai/settings', data={
+ 'ethicsModalAccepted': 'true',
+ 'shareConversationsWithModelAuthors': 'true',
+ 'ethicsModalAcceptedAt': '',
+ 'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
+ 'searchEnabled': 'true',
+ })
+
+ headers = {
+ 'authority': 'gpt-gm.h2o.ai',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://gpt-gm.h2o.ai',
+ 'referer': 'https://gpt-gm.h2o.ai/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ }
+
+ json_data = {
+ 'model': models[model]
+ }
+
+ response = client.post('https://gpt-gm.h2o.ai/conversation',
+ headers=headers, json=json_data)
+ conversationId = response.json()['conversationId']
+
+
+ completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = {
+ 'inputs': conversation,
+ 'parameters': {
+ 'temperature': kwargs.get('temperature', 0.4),
+ 'truncate': kwargs.get('truncate', 2048),
+ 'max_new_tokens': kwargs.get('max_new_tokens', 1024),
+ 'do_sample': kwargs.get('do_sample', True),
+ 'repetition_penalty': kwargs.get('repetition_penalty', 1.2),
+ 'return_full_text': kwargs.get('return_full_text', False)
+ },
+ 'stream': True,
+ 'options': {
+ 'id': kwargs.get('id', str(uuid4())),
+ 'response_id': kwargs.get('response_id', str(uuid4())),
+ 'is_retry': False,
+ 'use_cache': False,
+ 'web_search_id': ''
+ }
+ })
+
+ for line in completion.iter_lines():
+ if b'data' in line:
+ line = loads(line.decode('utf-8').replace('data:', ''))
+ token = line['token']['text']
+
+ if token == '<|endoftext|>':
+ break
+ else:
+ yield (token)
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Liaobots.py b/g4f/Provider/Providers/Liaobots.py
new file mode 100644
index 00000000..76b13c31
--- /dev/null
+++ b/g4f/Provider/Providers/Liaobots.py
@@ -0,0 +1,52 @@
+import os, uuid, requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://liaobots.com'
+model = ['gpt-3.5-turbo', 'gpt-4']
+supports_stream = True
+needs_auth = True
+
+models = {
+ 'gpt-4': {
+ "id":"gpt-4",
+ "name":"GPT-4",
+ "maxLength":24000,
+ "tokenLimit":8000
+ },
+ 'gpt-3.5-turbo': {
+ "id":"gpt-3.5-turbo",
+ "name":"GPT-3.5",
+ "maxLength":12000,
+ "tokenLimit":4000
+ },
+}
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+
+ print(kwargs)
+
+ headers = {
+ 'authority': 'liaobots.com',
+ 'content-type': 'application/json',
+ 'origin': 'https://liaobots.com',
+ 'referer': 'https://liaobots.com/',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
+ 'x-auth-code': kwargs.get('auth')
+ }
+
+ json_data = {
+ 'conversationId': str(uuid.uuid4()),
+ 'model': models[model],
+ 'messages': messages,
+ 'key': '',
+ 'prompt': "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
+ }
+
+ response = requests.post('https://liaobots.com/api/chat',
+ headers=headers, json=json_data, stream=True)
+
+ for token in response.iter_content(chunk_size=2046):
+ yield (token.decode('utf-8'))
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Lockchat.py b/g4f/Provider/Providers/Lockchat.py
new file mode 100644
index 00000000..d97bc67b
--- /dev/null
+++ b/g4f/Provider/Providers/Lockchat.py
@@ -0,0 +1,32 @@
+import requests
+import os
+import json
+from ...typing import sha256, Dict, get_type_hints
+url = 'http://super.lockchat.app'
+model = ['gpt-4', 'gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
+
+ payload = {
+ "temperature": 0.7,
+ "messages": messages,
+ "model": model,
+ "stream": True,
+ }
+ headers = {
+ "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
+ }
+ response = requests.post("http://super.lockchat.app/v1/chat/completions?auth=FnMNPlwZEnGFqvEc9470Vw==",
+ json=payload, headers=headers, stream=True)
+ for token in response.iter_lines():
+ if b'The model: `gpt-4` does not exist' in token:
+ print('error, retrying...')
+ _create_completion(model=model, messages=messages, stream=stream, temperature=temperature, **kwargs)
+ if b"content" in token:
+ token = json.loads(token.decode('utf-8').split('data: ')[1])['choices'][0]['delta'].get('content')
+ if token: yield (token)
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Theb.py b/g4f/Provider/Providers/Theb.py
new file mode 100644
index 00000000..aa43ebc5
--- /dev/null
+++ b/g4f/Provider/Providers/Theb.py
@@ -0,0 +1,28 @@
+import os
+import json
+import time
+import subprocess
+
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://theb.ai'
+model = ['gpt-3.5-turbo']
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+
+ path = os.path.dirname(os.path.realpath(__file__))
+ config = json.dumps({
+ 'messages': messages,
+ 'model': model}, separators=(',', ':'))
+
+ cmd = ['python3', f'{path}/helpers/theb.py', config]
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ for line in iter(p.stdout.readline, b''):
+ yield line.decode('utf-8')
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Vercel.py b/g4f/Provider/Providers/Vercel.py
new file mode 100644
index 00000000..e5df9cf0
--- /dev/null
+++ b/g4f/Provider/Providers/Vercel.py
@@ -0,0 +1,162 @@
+import os
+import json
+import base64
+import execjs
+import queue
+import threading
+
+from curl_cffi import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://play.vercel.ai'
+supports_stream = True
+needs_auth = False
+
+models = {
+ 'claude-instant-v1': 'anthropic:claude-instant-v1',
+ 'claude-v1': 'anthropic:claude-v1',
+ 'alpaca-7b': 'replicate:replicate/alpaca-7b',
+ 'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b',
+ 'bloom': 'huggingface:bigscience/bloom',
+ 'bloomz': 'huggingface:bigscience/bloomz',
+ 'flan-t5-xxl': 'huggingface:google/flan-t5-xxl',
+ 'flan-ul2': 'huggingface:google/flan-ul2',
+ 'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b',
+ 'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
+ 'santacoder': 'huggingface:bigcode/santacoder',
+ 'command-medium-nightly': 'cohere:command-medium-nightly',
+ 'command-xlarge-nightly': 'cohere:command-xlarge-nightly',
+ 'code-cushman-001': 'openai:code-cushman-001',
+ 'code-davinci-002': 'openai:code-davinci-002',
+ 'gpt-3.5-turbo': 'openai:gpt-3.5-turbo',
+ 'text-ada-001': 'openai:text-ada-001',
+ 'text-babbage-001': 'openai:text-babbage-001',
+ 'text-curie-001': 'openai:text-curie-001',
+ 'text-davinci-002': 'openai:text-davinci-002',
+ 'text-davinci-003': 'openai:text-davinci-003'
+}
+model = models.keys()
+
+vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': {
+ 'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
+
+
+# based on https://github.com/ading2210/vercel-llm-api // modified
+class Client:
+ def __init__(self):
+ self.session = requests.Session()
+ self.headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Te': 'trailers',
+ 'Upgrade-Insecure-Requests': '1'
+ }
+ self.session.headers.update(self.headers)
+
+ def get_token(self):
+ b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text
+ data = json.loads(base64.b64decode(b64))
+
+ code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % (
+ data['c'], data['a'])
+
+ token_string = json.dumps(separators=(',', ':'),
+ obj={'r': execjs.compile(code).call('token'), 't': data['t']})
+
+ return base64.b64encode(token_string.encode()).decode()
+
+ def get_default_params(self, model_id):
+ return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()}
+
+ def generate(self, model_id: str, prompt: str, params: dict = {}):
+ if not ':' in model_id:
+ model_id = models[model_id]
+
+ defaults = self.get_default_params(model_id)
+
+ payload = defaults | params | {
+ 'prompt': prompt,
+ 'model': model_id,
+ }
+
+ headers = self.headers | {
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Custom-Encoding': self.get_token(),
+ 'Host': 'sdk.vercel.ai',
+ 'Origin': 'https://sdk.vercel.ai',
+ 'Referrer': 'https://sdk.vercel.ai',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ }
+
+ chunks_queue = queue.Queue()
+ error = None
+ response = None
+
+ def callback(data):
+ chunks_queue.put(data.decode())
+
+ def request_thread():
+ nonlocal response, error
+ for _ in range(3):
+ try:
+ response = self.session.post('https://sdk.vercel.ai/api/generate',
+ json=payload, headers=headers, content_callback=callback)
+ response.raise_for_status()
+
+ except Exception as e:
+ if _ == 2:
+ error = e
+
+ else:
+ continue
+
+ thread = threading.Thread(target=request_thread, daemon=True)
+ thread.start()
+
+ text = ''
+ index = 0
+ while True:
+ try:
+ chunk = chunks_queue.get(block=True, timeout=0.1)
+
+ except queue.Empty:
+ if error:
+ raise error
+
+ elif response:
+ break
+
+ else:
+ continue
+
+ text += chunk
+ lines = text.split('\n')
+
+ if len(lines) - 1 > index:
+ new = lines[index:-1]
+ for word in new:
+ yield json.loads(word)
+ index = len(lines) - 1
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ yield 'Vercel is currently not working.'
+ return
+
+ conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
+
+ for message in messages:
+ conversation += '%s: %s\n' % (message['role'], message['content'])
+
+ conversation += 'assistant: '
+
+ completion = Client().generate(model, conversation)
+
+ for token in completion:
+ yield token
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/You.py b/g4f/Provider/Providers/You.py
new file mode 100644
index 00000000..02a2774c
--- /dev/null
+++ b/g4f/Provider/Providers/You.py
@@ -0,0 +1,24 @@
+import os
+import json
+import time
+import subprocess
+
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://you.com'
+model = 'gpt-3.5-turbo'
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+
+ path = os.path.dirname(os.path.realpath(__file__))
+ config = json.dumps({
+ 'messages': messages}, separators=(',', ':'))
+
+ cmd = ['python3', f'{path}/helpers/you.py', config]
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ for line in iter(p.stdout.readline, b''):
+ yield line.decode('utf-8') #[:-1] \ No newline at end of file
diff --git a/g4f/Provider/Providers/Yqcloud.py b/g4f/Provider/Providers/Yqcloud.py
new file mode 100644
index 00000000..488951dd
--- /dev/null
+++ b/g4f/Provider/Providers/Yqcloud.py
@@ -0,0 +1,37 @@
+import os
+import time
+import requests
+
+from ...typing import sha256, Dict, get_type_hints
+url = 'https://chat9.yqcloud.top/'
+model = [
+ 'gpt-3.5-turbo',
+]
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+
+ headers = {
+ 'authority': 'api.aichatos.cloud',
+ 'origin': 'https://chat9.yqcloud.top',
+ 'referer': 'https://chat9.yqcloud.top/',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
+ }
+
+ json_data = {
+ 'prompt': 'always respond in english | %s' % messages[-1]['content'],
+ 'userId': f'#/chat/{int(time.time() * 1000)}',
+ 'network': True,
+ 'apikey': '',
+ 'system': '',
+ 'withoutContext': False,
+ }
+
+ response = requests.post('https://api.aichatos.cloud/api/generateStream', headers=headers, json=json_data, stream=True)
+ for token in response.iter_content(chunk_size=2046):
+ if not b'always respond in english' in token:
+ yield (token.decode('utf-8'))
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/helpers/theb.py b/g4f/Provider/Providers/helpers/theb.py
new file mode 100644
index 00000000..71cfd23f
--- /dev/null
+++ b/g4f/Provider/Providers/helpers/theb.py
@@ -0,0 +1,48 @@
+import json
+import sys
+from re import findall
+from curl_cffi import requests
+
+config = json.loads(sys.argv[1])
+prompt = config['messages'][-1]['content']
+
+headers = {
+ 'authority': 'chatbot.theb.ai',
+ 'accept': 'application/json, text/plain, */*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://chatbot.theb.ai',
+ 'referer': 'https://chatbot.theb.ai/',
+ 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
+}
+
+json_data = {
+ 'prompt': prompt,
+ 'options': {}
+}
+
+def format(chunk):
+ try:
+ completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
+ print(completion_chunk, flush=True, end='')
+
+ except Exception as e:
+ print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
+ return
+
+while True:
+ try:
+ response = requests.post('https://chatbot.theb.ai/api/chat-process',
+ headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
+
+ exit(0)
+
+ except Exception as e:
+ print('[ERROR] an error occured, retrying... |', e, flush=True)
+ continue \ No newline at end of file
diff --git a/g4f/Provider/Providers/helpers/you.py b/g4f/Provider/Providers/helpers/you.py
new file mode 100644
index 00000000..02985ed1
--- /dev/null
+++ b/g4f/Provider/Providers/helpers/you.py
@@ -0,0 +1,79 @@
+import sys
+import json
+import urllib.parse
+
+from curl_cffi import requests
+
+config = json.loads(sys.argv[1])
+messages = config['messages']
+prompt = ''
+
+
+def transform(messages: list) -> list:
+ result = []
+ i = 0
+
+ while i < len(messages):
+ if messages[i]['role'] == 'user':
+ question = messages[i]['content']
+ i += 1
+
+ if i < len(messages) and messages[i]['role'] == 'assistant':
+ answer = messages[i]['content']
+ i += 1
+ else:
+ answer = ''
+
+ result.append({'question': question, 'answer': answer})
+
+ elif messages[i]['role'] == 'assistant':
+ result.append({'question': '', 'answer': messages[i]['content']})
+ i += 1
+
+ elif messages[i]['role'] == 'system':
+ result.append({'question': messages[i]['content'], 'answer': ''})
+ i += 1
+
+ return result
+
+headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Accept-Language': 'en-GB,en;q=0.9',
+ 'Sec-Fetch-Mode': 'navigate',
+ 'Host': 'you.com',
+ 'Origin': 'https://you.com',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
+ 'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA',
+ 'Connection': 'keep-alive',
+ 'Sec-Fetch-Dest': 'document',
+ 'Priority': 'u=0, i',
+}
+
+if messages[-1]['role'] == 'user':
+ prompt = messages[-1]['content']
+ messages = messages[:-1]
+
+params = urllib.parse.urlencode({
+ 'q': prompt,
+ 'domain': 'youchat',
+ 'chat': transform(messages)
+})
+
+def output(chunk):
+ if b'"youChatToken"' in chunk:
+ chunk_json = json.loads(chunk.decode().split('data: ')[1])
+
+ print(chunk_json['youChatToken'], flush=True, end = '')
+
+while True:
+ try:
+ response = requests.get(f'https://you.com/api/streamingSearch?{params}',
+ headers=headers, content_callback=output, impersonate='safari15_5')
+
+ exit(0)
+
+ except Exception as e:
+ print('an error occured, retrying... |', e, flush=True)
+ continue \ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
new file mode 100644
index 00000000..269fa17e
--- /dev/null
+++ b/g4f/Provider/__init__.py
@@ -0,0 +1,20 @@
+from . import Provider
+from .Providers import (
+ Ails,
+ You,
+ Bing,
+ Yqcloud,
+ Theb,
+ Aichat,
+ Bard,
+ Vercel,
+ Forefront,
+ Lockchat,
+ Liaobots,
+ H2o,
+ ChatgptLogin,
+ DeepAi,
+ GetGpt
+)
+
+Palm = Bard