summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Pizzagpt.py
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2024-08-30 22:39:18 +0200
committerGitHub <noreply@github.com>2024-08-30 22:39:18 +0200
commitc702f54e39a39c702cb2a2a8c6782c15422785aa (patch)
tree8a36ace98ab138e1eff134a5ed8891fd3c817b5b /g4f/Provider/Pizzagpt.py
parent. (diff)
parentfix for 500 Internal Server Error #2199 [Request] Blackbox provider now support Gemini and LLaMa 3.1 models #2198 with some stuff from #2196 (diff)
downloadgpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar
gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.gz
gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.bz2
gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.lz
gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.xz
gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.zst
gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.zip
Diffstat (limited to 'g4f/Provider/Pizzagpt.py')
-rw-r--r--g4f/Provider/Pizzagpt.py58
1 files changed, 30 insertions, 28 deletions
diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py
index 47e74ee3..860aef80 100644
--- a/g4f/Provider/Pizzagpt.py
+++ b/g4f/Provider/Pizzagpt.py
@@ -1,15 +1,19 @@
+from __future__ import annotations
+
import json
from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from .base_provider import AsyncGeneratorProvider
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
-class Pizzagpt(AsyncGeneratorProvider):
+class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.pizzagpt.it"
api_endpoint = "/api/chatx-completion"
- supports_message_history = False
- supports_gpt_35_turbo = True
working = True
+ supports_gpt_4 = True
+ default_model = 'gpt-4o-mini'
@classmethod
async def create_async_generator(
@@ -19,30 +23,28 @@ class Pizzagpt(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
- payload = {
- "question": messages[-1]["content"]
- }
headers = {
- "Accept": "application/json",
- "Accept-Encoding": "gzip, deflate, br, zstd",
- "Accept-Language": "en-US,en;q=0.9",
- "Content-Type": "application/json",
- "Origin": cls.url,
- "Referer": f"{cls.url}/en",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
- "X-Secret": "Marinara"
+ "accept": "application/json",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "referer": f"{cls.url}/en",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ "x-secret": "Marinara"
}
-
- async with ClientSession() as session:
- async with session.post(
- f"{cls.url}{cls.api_endpoint}",
- json=payload,
- proxy=proxy,
- headers=headers
- ) as response:
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "question": prompt
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
response_json = await response.json()
- yield response_json["answer"]["content"]
+ content = response_json.get("answer", {}).get("content", "")
+ yield content \ No newline at end of file