diff options
author | abc <98614666+xtekky@users.noreply.github.com> | 2023-11-20 19:40:55 +0100 |
---|---|---|
committer | abc <98614666+xtekky@users.noreply.github.com> | 2023-11-20 19:40:55 +0100 |
commit | 9140541179e1c2fe855acf1c2743e1800fd5052e (patch) | |
tree | 774c460654335497ad272d14a6b5082717033651 /g4f | |
parent | Merge pull request #1274 from hlohaus/webdriver (diff) | |
download | gpt4free-9140541179e1c2fe855acf1c2743e1800fd5052e.tar gpt4free-9140541179e1c2fe855acf1c2743e1800fd5052e.tar.gz gpt4free-9140541179e1c2fe855acf1c2743e1800fd5052e.tar.bz2 gpt4free-9140541179e1c2fe855acf1c2743e1800fd5052e.tar.lz gpt4free-9140541179e1c2fe855acf1c2743e1800fd5052e.tar.xz gpt4free-9140541179e1c2fe855acf1c2743e1800fd5052e.tar.zst gpt4free-9140541179e1c2fe855acf1c2743e1800fd5052e.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/Chatgpt4Online.py | 4 | ||||
-rw-r--r-- | g4f/Provider/ChatgptAi.py | 4 | ||||
-rw-r--r-- | g4f/Provider/ChatgptDemo.py | 11 | ||||
-rw-r--r-- | g4f/Provider/ChatgptFree.py | 3 | ||||
-rw-r--r-- | g4f/Provider/ChatgptLogin.py | 13 | ||||
-rw-r--r-- | g4f/Provider/ChatgptX.py | 16 | ||||
-rw-r--r-- | g4f/Provider/GptGod.py | 7 | ||||
-rw-r--r-- | g4f/Provider/Vercel.py | 3 | ||||
-rw-r--r-- | g4f/Provider/deprecated/CodeLinkAva.py | 4 | ||||
-rw-r--r-- | g4f/Provider/deprecated/Equing.py | 4 | ||||
-rw-r--r-- | g4f/Provider/deprecated/FastGpt.py | 6 | ||||
-rw-r--r-- | g4f/Provider/deprecated/Lockchat.py | 5 | ||||
-rw-r--r-- | g4f/Provider/deprecated/Vitalentum.py | 4 | ||||
-rw-r--r-- | g4f/Provider/unfinished/ChatAiGpt.py | 7 | ||||
-rw-r--r-- | g4f/Provider/unfinished/MikuChat.py | 3 |
15 files changed, 65 insertions, 29 deletions
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py index 57ab9482..594f54c1 100644 --- a/g4f/Provider/Chatgpt4Online.py +++ b/g4f/Provider/Chatgpt4Online.py @@ -27,7 +27,9 @@ class Chatgpt4Online(AsyncProvider): async with session.get(f"{cls.url}/", proxy=proxy) as response: response.raise_for_status() response = await response.text() - if result := re.search(r'data-nonce="(.*?)"', response): + result = re.search(r'data-nonce="(.*?)"', response) + + if result: cls._wpnonce = result.group(1) else: raise RuntimeError("No nonce found") diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py index 40ad9481..9425dfb0 100644 --- a/g4f/Provider/ChatgptAi.py +++ b/g4f/Provider/ChatgptAi.py @@ -45,7 +45,9 @@ class ChatgptAi(AsyncGeneratorProvider): async with session.get(cls.url, proxy=proxy) as response: response.raise_for_status() text = await response.text() - if result := re.search(r"data-system='(.*?)'", text): + + result = re.search(r"data-system='(.*?)'", text) + if result : cls._system = json.loads(html.unescape(result.group(1))) if not cls._system: raise RuntimeError("System args not found") diff --git a/g4f/Provider/ChatgptDemo.py b/g4f/Provider/ChatgptDemo.py index bc592ca6..666b5753 100644 --- a/g4f/Provider/ChatgptDemo.py +++ b/g4f/Provider/ChatgptDemo.py @@ -37,10 +37,13 @@ class ChatgptDemo(AsyncGeneratorProvider): async with session.get(f"{cls.url}/", proxy=proxy) as response: response.raise_for_status() response = await response.text() - if result := re.search( + + result = re.search( r'<div id="USERID" style="display: none">(.*?)<\/div>', response, - ): + ) + + if result: user_id = result.group(1) else: raise RuntimeError("No user id found") @@ -59,5 +62,7 @@ class ChatgptDemo(AsyncGeneratorProvider): async for line in response.content: if line.startswith(b"data: "): line = json.loads(line[6:-1]) - if chunk := line["choices"][0]["delta"].get("content"): + + chunk = line["choices"][0]["delta"].get("content") + if chunk: yield chunk
\ No newline at end of file diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py index 48d6c396..b9b25447 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/ChatgptFree.py @@ -65,7 +65,8 @@ class ChatgptFree(AsyncProvider): raise RuntimeError("No post id found") cls._post_id = result.group(1) - if result := re.search(r'data-nonce="(.*?)"', response): + result = re.search(r'data-nonce="(.*?)"', response) + if result: cls._nonce = result.group(1) else: diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py index 206e4a89..037e0a6e 100644 --- a/g4f/Provider/ChatgptLogin.py +++ b/g4f/Provider/ChatgptLogin.py @@ -45,10 +45,12 @@ class ChatgptLogin(AsyncGeneratorProvider): async with session.get(f"{cls.url}/chat/", proxy=proxy) as response: response.raise_for_status() response = await response.text() - if result := re.search( + result = re.search( r'<div id="USERID" style="display: none">(.*?)<\/div>', response, - ): + ) + + if result: cls._user_id = result.group(1) else: raise RuntimeError("No user id found") @@ -67,9 +69,10 @@ class ChatgptLogin(AsyncGeneratorProvider): response.raise_for_status() async for line in response.content: if line.startswith(b"data: "): - if content := json.loads(line[6:])["choices"][0][ - "delta" - ].get("content"): + + content = json.loads(line[6:])["choices"][0]["delta"].get("content") + if content: yield content + async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response: response.raise_for_status()
\ No newline at end of file diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py index c4e72099..3019858b 100644 --- a/g4f/Provider/ChatgptX.py +++ b/g4f/Provider/ChatgptX.py @@ -35,15 +35,21 @@ class ChatgptX(AsyncGeneratorProvider): async with ClientSession(headers=headers) as session: async with session.get(f"{cls.url}/", proxy=proxy) as response: response = await response.text() - if result := re.search( + + result = re.search( r'<meta name="csrf-token" content="(.*?)"', response - ): + ) + if result: csrf_token = result.group(1) - if result := re.search(r"openconversions\('(.*?)'\)", response): + + result = re.search(r"openconversions\('(.*?)'\)", response) + if result: chat_id = result.group(1) - if result := re.search( + + result = re.search( r'<input type="hidden" id="user_id" value="(.*?)"', response - ): + ) + if result: user_id = result.group(1) if not csrf_token or not chat_id or not user_id: diff --git a/g4f/Provider/GptGod.py b/g4f/Provider/GptGod.py index a10a391d..08d9269e 100644 --- a/g4f/Provider/GptGod.py +++ b/g4f/Provider/GptGod.py @@ -47,12 +47,15 @@ class GptGod(AsyncGeneratorProvider): response.raise_for_status() event = None async for line in response.content: - print(line) + # print(line) if line.startswith(b'event: '): event = line[7:-1] + elif event == b"data" and line.startswith(b"data: "): - if data := json.loads(line[6:-1]): + data = json.loads(line[6:-1]) + if data: yield data + elif event == b"done": break
\ No newline at end of file diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py index a7bbc496..3e210925 100644 --- a/g4f/Provider/Vercel.py +++ b/g4f/Provider/Vercel.py @@ -6,10 +6,9 @@ from ..typing import Messages, TypedDict, CreateResult, Any from .base_provider import BaseProvider from ..debug import logging - class Vercel(BaseProvider): url = 'https://sdk.vercel.ai' - working = True + working = False supports_message_history = True supports_gpt_35_turbo = True supports_stream = True diff --git a/g4f/Provider/deprecated/CodeLinkAva.py b/g4f/Provider/deprecated/CodeLinkAva.py index a909ab97..22f4468a 100644 --- a/g4f/Provider/deprecated/CodeLinkAva.py +++ b/g4f/Provider/deprecated/CodeLinkAva.py @@ -46,5 +46,7 @@ class CodeLinkAva(AsyncGeneratorProvider): if line.startswith("data: [DONE]"): break line = json.loads(line[6:-1]) - if content := line["choices"][0]["delta"].get("content"): + + content = line["choices"][0]["delta"].get("content") + if content: yield content
\ No newline at end of file diff --git a/g4f/Provider/deprecated/Equing.py b/g4f/Provider/deprecated/Equing.py index 076b5ac5..9f510e50 100644 --- a/g4f/Provider/deprecated/Equing.py +++ b/g4f/Provider/deprecated/Equing.py @@ -65,5 +65,7 @@ class Equing(BaseProvider): if line: if b'content' in line: line_json = json.loads(line.decode('utf-8').split('data: ')[1]) - if token := line_json['choices'][0]['delta'].get('content'): + + token = line_json['choices'][0]['delta'].get('content') + if token: yield token
\ No newline at end of file diff --git a/g4f/Provider/deprecated/FastGpt.py b/g4f/Provider/deprecated/FastGpt.py index ef69e892..3af8c213 100644 --- a/g4f/Provider/deprecated/FastGpt.py +++ b/g4f/Provider/deprecated/FastGpt.py @@ -69,9 +69,11 @@ class FastGpt(BaseProvider): try: if b'content' in line: line_json = json.loads(line.decode('utf-8').split('data: ')[1]) - if token := line_json['choices'][0]['delta'].get( + token = line_json['choices'][0]['delta'].get( 'content' - ): + ) + + if token: yield token except: continue
\ No newline at end of file diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py index d93c9f8a..f885672d 100644 --- a/g4f/Provider/deprecated/Lockchat.py +++ b/g4f/Provider/deprecated/Lockchat.py @@ -38,6 +38,7 @@ class Lockchat(BaseProvider): for token in response.iter_lines(): if b"The model: `gpt-4` does not exist" in token: print("error, retrying...") + Lockchat.create_completion( model = model, messages = messages, @@ -47,5 +48,7 @@ class Lockchat(BaseProvider): if b"content" in token: token = json.loads(token.decode("utf-8").split("data: ")[1]) - if token := token["choices"][0]["delta"].get("content"): + token = token["choices"][0]["delta"].get("content") + + if token: yield (token)
\ No newline at end of file diff --git a/g4f/Provider/deprecated/Vitalentum.py b/g4f/Provider/deprecated/Vitalentum.py index 13160d94..8f466a52 100644 --- a/g4f/Provider/deprecated/Vitalentum.py +++ b/g4f/Provider/deprecated/Vitalentum.py @@ -49,5 +49,7 @@ class Vitalentum(AsyncGeneratorProvider): if line.startswith("data: [DONE]"): break line = json.loads(line[6:-1]) - if content := line["choices"][0]["delta"].get("content"): + content = line["choices"][0]["delta"].get("content") + + if content: yield content
\ No newline at end of file diff --git a/g4f/Provider/unfinished/ChatAiGpt.py b/g4f/Provider/unfinished/ChatAiGpt.py index 9d050093..bc962623 100644 --- a/g4f/Provider/unfinished/ChatAiGpt.py +++ b/g4f/Provider/unfinished/ChatAiGpt.py @@ -43,9 +43,12 @@ class ChatAiGpt(AsyncGeneratorProvider): async with session.get(f"{cls.url}/", proxy=proxy) as response: response.raise_for_status() response = await response.text() - if result := re.search( + + result = re.search( r'data-nonce=(.*?) data-post-id=([0-9]+)', response - ): + ) + + if result: cls._nonce, cls._post_id = result.group(1), result.group(2) else: raise RuntimeError("No nonce found") diff --git a/g4f/Provider/unfinished/MikuChat.py b/g4f/Provider/unfinished/MikuChat.py index 970fb0bb..bf19631f 100644 --- a/g4f/Provider/unfinished/MikuChat.py +++ b/g4f/Provider/unfinished/MikuChat.py @@ -48,7 +48,8 @@ class MikuChat(AsyncGeneratorProvider): async for line in response.iter_lines(): if line.startswith(b"data: "): line = json.loads(line[6:]) - if chunk := line["choices"][0]["delta"].get("content"): + chunk = line["choices"][0]["delta"].get("content") + if chunk: yield chunk def k(e: str, t: int): |