summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/GizAI.py
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-11-04 14:37:04 +0100
committerkqlio67 <kqlio67@users.noreply.github.com>2024-11-04 14:37:04 +0100
commit8c7791aae38ef364182fc8676d2e7349f9341a4c (patch)
tree226fe36a867beb214b66e54fd35a3b5ae10a7aba /g4f/Provider/GizAI.py
parentMerge pull request #3 from rkihacker/main (diff)
downloadgpt4free-8c7791aae38ef364182fc8676d2e7349f9341a4c.tar
gpt4free-8c7791aae38ef364182fc8676d2e7349f9341a4c.tar.gz
gpt4free-8c7791aae38ef364182fc8676d2e7349f9341a4c.tar.bz2
gpt4free-8c7791aae38ef364182fc8676d2e7349f9341a4c.tar.lz
gpt4free-8c7791aae38ef364182fc8676d2e7349f9341a4c.tar.xz
gpt4free-8c7791aae38ef364182fc8676d2e7349f9341a4c.tar.zst
gpt4free-8c7791aae38ef364182fc8676d2e7349f9341a4c.zip
Diffstat (limited to 'g4f/Provider/GizAI.py')
-rw-r--r--g4f/Provider/GizAI.py120
1 files changed, 20 insertions, 100 deletions
diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py
index 127edc9e..a5ce0ec2 100644
--- a/g4f/Provider/GizAI.py
+++ b/g4f/Provider/GizAI.py
@@ -1,62 +1,24 @@
from __future__ import annotations
-import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from ..image import ImageResponse
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
+
class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://app.giz.ai/assistant/"
+ url = "https://app.giz.ai"
api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
working = True
-
+ supports_stream = False
supports_system_message = True
supports_message_history = True
- # Chat models
default_model = 'chat-gemini-flash'
- chat_models = [
- default_model,
- 'chat-gemini-pro',
- 'chat-gpt4m',
- 'chat-gpt4',
- 'claude-sonnet',
- 'claude-haiku',
- 'llama-3-70b',
- 'llama-3-8b',
- 'mistral-large',
- 'chat-o1-mini'
- ]
-
- # Image models
- image_models = [
- 'flux1',
- 'sdxl',
- 'sd',
- 'sd35',
- ]
-
- models = [*chat_models, *image_models]
+ models = [default_model]
- model_aliases = {
- # Chat model aliases
- "gemini-flash": "chat-gemini-flash",
- "gemini-pro": "chat-gemini-pro",
- "gpt-4o-mini": "chat-gpt4m",
- "gpt-4o": "chat-gpt4",
- "claude-3.5-sonnet": "claude-sonnet",
- "claude-3-haiku": "claude-haiku",
- "llama-3.1-70b": "llama-3-70b",
- "llama-3.1-8b": "llama-3-8b",
- "o1-mini": "chat-o1-mini",
- # Image model aliases
- "sd-1.5": "sd",
- "sd-3.5": "sd35",
- "flux-schnell": "flux1",
- }
+ model_aliases = {"gemini-flash": "chat-gemini-flash",}
@classmethod
def get_model(cls, model: str) -> str:
@@ -68,10 +30,6 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model
@classmethod
- def is_image_model(cls, model: str) -> bool:
- return model in cls.image_models
-
- @classmethod
async def create_async_generator(
cls,
model: str,
@@ -87,7 +45,8 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'application/json',
- 'Origin': 'https://app.giz.ai',
+ 'DNT': '1',
+ 'Origin': cls.url,
'Pragma': 'no-cache',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
@@ -97,55 +56,16 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"'
}
-
- async with ClientSession() as session:
- if cls.is_image_model(model):
- # Image generation
- prompt = messages[-1]["content"]
- data = {
- "model": model,
- "input": {
- "width": "1024",
- "height": "1024",
- "steps": 4,
- "output_format": "webp",
- "batch_size": 1,
- "mode": "plan",
- "prompt": prompt
- }
- }
- async with session.post(
- cls.api_endpoint,
- headers=headers,
- data=json.dumps(data),
- proxy=proxy
- ) as response:
- response.raise_for_status()
- response_data = await response.json()
- if response_data.get('status') == 'completed' and response_data.get('output'):
- for url in response_data['output']:
- yield ImageResponse(images=url, alt="Generated Image")
- else:
- # Chat completion
- data = {
- "model": model,
- "input": {
- "messages": [
- {
- "type": "human",
- "content": format_prompt(messages)
- }
- ],
- "mode": "plan"
- },
- "noStream": True
- }
- async with session.post(
- cls.api_endpoint,
- headers=headers,
- data=json.dumps(data),
- proxy=proxy
- ) as response:
- response.raise_for_status()
- result = await response.json()
- yield result.get('output', '')
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": model,
+ "input": {
+ "messages": messages,
+ "mode": "plan"
+ },
+ "noStream": True
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ result = await response.json()
+ yield result['output'].strip()