summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--g4f/Provider/ChatBase.py5
-rw-r--r--g4f/Provider/GPTalk.py5
-rw-r--r--g4f/api/__init__.py4
3 files changed, 11 insertions, 3 deletions
diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py
index 9f11e1ac..ccc20244 100644
--- a/g4f/Provider/ChatBase.py
+++ b/g4f/Provider/ChatBase.py
@@ -12,6 +12,8 @@ class ChatBase(AsyncGeneratorProvider):
supports_message_history = True
working = True
jailbreak = True
+ list_incorrect_responses = ["support@chatbase",
+ "about Chatbase"]
@classmethod
async def create_async_generator(
@@ -53,6 +55,9 @@ class ChatBase(AsyncGeneratorProvider):
response_data = ""
async for stream in response.content.iter_any():
response_data += stream.decode()
+ for incorrect_response in cls.list_incorrect_responses:
+ if incorrect_response in response_data:
+ raise RuntimeError("Incorrect response")
yield stream.decode()
@classmethod
diff --git a/g4f/Provider/GPTalk.py b/g4f/Provider/GPTalk.py
index b5881e5d..5749ff2e 100644
--- a/g4f/Provider/GPTalk.py
+++ b/g4f/Provider/GPTalk.py
@@ -13,6 +13,7 @@ class GPTalk(AsyncGeneratorProvider):
working = True
supports_gpt_35_turbo = True
_auth = None
+ used_times = 0
@classmethod
async def create_async_generator(
@@ -44,7 +45,7 @@ class GPTalk(AsyncGeneratorProvider):
'x-auth-timestamp': f"{timestamp}",
}
async with ClientSession(headers=headers) as session:
- if not cls._auth or cls._auth["expires_at"] < timestamp:
+ if not cls._auth or cls._auth["expires_at"] < timestamp or cls.used_times == 5:
data = {
"fingerprint": secrets.token_hex(16).zfill(32),
"platform": "fingerprint"
@@ -52,6 +53,7 @@ class GPTalk(AsyncGeneratorProvider):
async with session.post(f"{cls.url}/api/chatgpt/user/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._auth = (await response.json())["data"]
+ cls.used_times = 0
data = {
"content": format_prompt(messages),
"accept": "stream",
@@ -72,6 +74,7 @@ class GPTalk(AsyncGeneratorProvider):
async with session.post(f"{cls.url}/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
token = (await response.json())["data"]["token"]
+ cls.used_times += 1
last_message = ""
async with session.get(f"{cls.url}/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
response.raise_for_status()
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 8eceb743..d8798ef2 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -78,7 +78,7 @@ class Api:
item_data['messages'] = ast.literal_eval(item_data.get('messages'))
model = item_data.get('model')
- stream = item_data.get('stream')
+ stream = True if item_data.get("stream") == "True" else False
messages = item_data.get('messages')
try:
@@ -86,7 +86,7 @@ class Api:
model=model,
stream=stream,
messages=messages,
- list_ignored_providers=self.list_ignored_providers)
+ ignored=self.list_ignored_providers)
except Exception as e:
logging.exception(e)
return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json")