summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/GptChatly.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/GptChatly.py')
-rw-r--r--g4f/Provider/GptChatly.py46
1 files changed, 23 insertions, 23 deletions
diff --git a/g4f/Provider/GptChatly.py b/g4f/Provider/GptChatly.py
index 80fe6349..f4953b78 100644
--- a/g4f/Provider/GptChatly.py
+++ b/g4f/Provider/GptChatly.py
@@ -2,18 +2,17 @@
from __future__ import annotations
-from aiohttp import ClientSession
-
-from ..typing import Messages
+from ..requests import StreamSession
+from ..typing import Messages
from .base_provider import AsyncProvider
-from .helper import get_cookies
+from .helper import get_cookies
class GptChatly(AsyncProvider):
url = "https://gptchatly.com"
supports_gpt_35_turbo = True
supports_gpt_4 = True
- working = False
+ working = True
@classmethod
async def create_async(
@@ -22,9 +21,9 @@ class GptChatly(AsyncProvider):
messages: Messages,
proxy: str = None, cookies: dict = None, **kwargs) -> str:
+ cookies = get_cookies('gptchatly.com') if not cookies else cookies
if not cookies:
- cookies = get_cookies('gptchatly.com')
-
+ raise RuntimeError(f"g4f.provider.GptChatly requires cookies, [refresh https://gptchatly.com on chrome]")
if model.startswith("gpt-4"):
chat_url = f"{cls.url}/fetch-gpt4-response"
@@ -32,25 +31,26 @@ class GptChatly(AsyncProvider):
chat_url = f"{cls.url}/fetch-response"
headers = {
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
- "Accept": "*/*",
- "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
- "Accept-Encoding": "gzip, deflate, br",
- "Referer": f"{cls.url}/",
- "Content-Type": "application/json",
- "Origin": cls.url,
- "Connection": "keep-alive",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "Pragma": "no-cache",
- "Cache-Control": "no-cache",
- "TE": "trailers",
+ 'authority': 'gptchatly.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://gptchatly.com',
+ 'referer': 'https://gptchatly.com/',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
- async with ClientSession(headers=headers) as session:
+
+ async with StreamSession(headers=headers,
+ proxies={"https": proxy}, cookies=cookies, impersonate='chrome110') as session:
data = {
"past_conversations": messages
}
- async with session.post(chat_url, json=data, proxy=proxy) as response:
+ async with session.post(chat_url, json=data) as response:
response.raise_for_status()
return (await response.json())["chatGPTResponse"] \ No newline at end of file