summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/openai
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-11-17 19:51:26 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-11-17 19:51:26 +0100
commitb7a8e03220970a0400cb5e071803cc5b4585d154 (patch)
treeab03a1756f69d623b3b06090d58b7667458c215c /g4f/Provider/openai
parentAdd nodriver to Gemini provider, (diff)
downloadgpt4free-b7a8e03220970a0400cb5e071803cc5b4585d154.tar
gpt4free-b7a8e03220970a0400cb5e071803cc5b4585d154.tar.gz
gpt4free-b7a8e03220970a0400cb5e071803cc5b4585d154.tar.bz2
gpt4free-b7a8e03220970a0400cb5e071803cc5b4585d154.tar.lz
gpt4free-b7a8e03220970a0400cb5e071803cc5b4585d154.tar.xz
gpt4free-b7a8e03220970a0400cb5e071803cc5b4585d154.tar.zst
gpt4free-b7a8e03220970a0400cb5e071803cc5b4585d154.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/openai/har_file.py44
-rw-r--r--g4f/Provider/openai/proofofwork.py12
2 files changed, 29 insertions, 27 deletions
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
index 7644e693..b7c36f05 100644
--- a/g4f/Provider/openai/har_file.py
+++ b/g4f/Provider/openai/har_file.py
@@ -27,16 +27,18 @@ class arkReq:
self.arkCookies = arkCookies
self.userAgent = userAgent
-arkPreURL = "https://tcr9i.chat.openai.com/fc/gt2/public_key/35536E1E-65B4-4D96-9D97-6ADB7EFF8147"
-sessionUrl = "https://chatgpt.com/"
-chatArk: arkReq = None
+arkoseURL = "https://tcr9i.chat.openai.com/fc/gt2/public_key/35536E1E-65B4-4D96-9D97-6ADB7EFF8147"
+startUrl = "https://chatgpt.com/"
+conversationUrl = "https://chatgpt.com/c/"
+arkoseRequest: arkReq = None
accessToken: str = None
cookies: dict = None
headers: dict = None
-proofTokens: list = []
+proofToken: list = []
+turnstileToken: str = None
def readHAR():
- global proofTokens
+ global arkoseRequest, accessToken, proofToken, turnstileToken
harPath = []
chatArks = []
accessToken = None
@@ -58,15 +60,17 @@ def readHAR():
v_headers = get_headers(v)
try:
if "openai-sentinel-proof-token" in v_headers:
- proofTokens.append(json.loads(base64.b64decode(
+ proofToken = json.loads(base64.b64decode(
v_headers["openai-sentinel-proof-token"].split("gAAAAAB", 1)[-1].encode()
- ).decode()))
+ ).decode())
+ if "openai-sentinel-turnstile-token" in v_headers:
+ turnstileToken = v_headers["openai-sentinel-turnstile-token"]
except Exception as e:
if debug.logging:
print(f"Read proof token: {e}")
- if arkPreURL in v['request']['url']:
- chatArks.append(parseHAREntry(v))
- elif v['request']['url'] == sessionUrl:
+ if arkoseURL in v['request']['url']:
+ arkoseRequest = parseHAREntry(v)
+ elif v['request']['url'] == startUrl or v['request']['url'].startswith(conversationUrl):
try:
match = re.search(r'"accessToken":"(.*?)"', v["response"]["content"]["text"])
if match:
@@ -78,8 +82,8 @@ def readHAR():
if not accessToken:
raise NoValidHarFileError("No accessToken found in .har files")
if not chatArks:
- return None, accessToken, cookies, headers
- return chatArks.pop(), accessToken, cookies, headers
+ return cookies, headers
+ return cookies, headers
def get_headers(entry) -> dict:
return {h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')}
@@ -110,7 +114,7 @@ def genArkReq(chatArk: arkReq) -> arkReq:
tmpArk.arkHeader['x-ark-esync-value'] = bw
return tmpArk
-async def sendRequest(tmpArk: arkReq, proxy: str = None):
+async def sendRequest(tmpArk: arkReq, proxy: str = None) -> str:
async with StreamSession(headers=tmpArk.arkHeader, cookies=tmpArk.arkCookies, proxies={"https": proxy}) as session:
async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
data = await response.json()
@@ -144,10 +148,10 @@ def getN() -> str:
return base64.b64encode(timestamp.encode()).decode()
async def getArkoseAndAccessToken(proxy: str) -> tuple[str, str, dict, dict]:
- global chatArk, accessToken, cookies, headers, proofTokens
- if chatArk is None or accessToken is None:
- chatArk, accessToken, cookies, headers = readHAR()
- if chatArk is None:
- return None, accessToken, cookies, headers, proofTokens
- newReq = genArkReq(chatArk)
- return await sendRequest(newReq, proxy), accessToken, cookies, headers, proofTokens
+ global arkoseRequest, accessToken, cookies, headers, proofToken, turnstileToken
+ if arkoseRequest is None or accessToken is None:
+ cookies, headers = readHAR()
+ if arkoseRequest is None:
+ return None, accessToken, cookies, headers, proofToken, turnstileToken
+ newReq = genArkReq(arkoseRequest)
+ return await sendRequest(newReq, proxy), accessToken, cookies, headers, proofToken, turnstileToken
diff --git a/g4f/Provider/openai/proofofwork.py b/g4f/Provider/openai/proofofwork.py
index 55603892..23e5ab6e 100644
--- a/g4f/Provider/openai/proofofwork.py
+++ b/g4f/Provider/openai/proofofwork.py
@@ -4,18 +4,16 @@ import json
import base64
from datetime import datetime, timezone
-def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proofTokens: list = None):
+def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proofToken: str = None):
if not required:
return
- if proofTokens:
- config = proofTokens[-1]
- else:
+ if proofToken is None:
screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
# Get current UTC time
now_utc = datetime.now(timezone.utc)
parse_time = now_utc.strftime('%a, %d %b %Y %H:%M:%S GMT')
- config = [
+ proofToken = [
screen, parse_time,
None, 0, user_agent,
"https://tcr9i.chat.openai.com/v2/35536E1E-65B4-4D96-9D97-6ADB7EFF8147/api.js",
@@ -28,8 +26,8 @@ def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", u
diff_len = len(difficulty)
for i in range(100000):
- config[3] = i
- json_data = json.dumps(config)
+ proofToken[3] = i
+ json_data = json.dumps(proofToken)
base = base64.b64encode(json_data.encode()).decode()
hash_value = hashlib.sha3_512((seed + base).encode()).digest()