summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/needs_auth/OpenaiChat.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/needs_auth/OpenaiChat.py')
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py16
1 files changed, 15 insertions, 1 deletions
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 43444699..13e15f1d 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -6,6 +6,7 @@ import uuid
import json
import base64
import time
+import requests
from aiohttp import ClientWebSocketResponse
from copy import copy
@@ -62,7 +63,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
default_model = "auto"
default_vision_model = "gpt-4o"
- models = ["auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"]
+ fallback_models = ["auto", "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
+ vision_models = fallback_models
_api_key: str = None
_headers: dict = None
@@ -70,6 +72,18 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
_expires: int = None
@classmethod
+ def get_models(cls):
+ if not cls.models:
+ try:
+ response = requests.get(f"{cls.url}/backend-anon/models")
+ response.raise_for_status()
+ data = response.json()
+ cls.models = [model.get("slug") for model in data.get("models")]
+ except Exception:
+ cls.models = cls.fallback_models
+ return cls.models
+
+ @classmethod
async def create(
cls,
prompt: str = None,