From 791b9f5c5a3abdbc60cb78ddbb971bfce0346816 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Thu, 27 Feb 2025 18:47:48 +0100 Subject: Add default llama 3 model --- g4f/Provider/PollinationsAI.py | 5 +---- g4f/Provider/hf/HuggingChat.py | 4 +++- g4f/Provider/hf/HuggingFaceAPI.py | 4 +++- g4f/Provider/hf/models.py | 7 ++++++- g4f/Provider/needs_auth/OpenaiChat.py | 5 +++-- g4f/Provider/openai/har_file.py | 2 -- g4f/gui/client/demo.html | 9 +++------ g4f/gui/client/static/css/style.css | 2 +- g4f/models.py | 9 ++++++++- 9 files changed, 28 insertions(+), 19 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index 1df3a692..d2c651a4 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -42,7 +42,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): text_models = [default_model] image_models = [default_image_model] extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3"] - vision_models = [default_vision_model, "gpt-4o-mini"] + vision_models = [default_vision_model, "gpt-4o-mini", "o1-mini"] extra_text_models = ["claude", "claude-email", "deepseek-reasoner", "deepseek-r1"] + vision_models _models_loaded = False model_aliases = { @@ -53,14 +53,11 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): "qwen-2.5-coder-32b": "qwen-coder", "llama-3.3-70b": "llama", "mistral-nemo": "mistral", - "gpt-4o-mini": "rtist", "gpt-4o": "searchgpt", - "gpt-4o-mini": "p1", "deepseek-chat": "claude-hybridspace", "llama-3.1-8b": "llamalight", "gpt-4o-vision": "gpt-4o", "gpt-4o-mini-vision": "gpt-4o-mini", - "gpt-4o-mini": "claude", "deepseek-chat": "claude-email", "deepseek-r1": "deepseek-reasoner", "gemini-2.0": "gemini", diff --git a/g4f/Provider/hf/HuggingChat.py b/g4f/Provider/hf/HuggingChat.py index d4656996..0d86f4ee 100644 --- a/g4f/Provider/hf/HuggingChat.py +++ b/g4f/Provider/hf/HuggingChat.py @@ -23,7 +23,7 @@ from ...requests import get_args_from_nodriver, DEFAULT_HEADERS from ...requests.raise_for_status import raise_for_status from ...providers.response import JsonConversation, ImageResponse, Sources, TitleGeneration, Reasoning, RequestLogin from ...cookies import get_cookies -from .models import default_model, fallback_models, image_models, model_aliases +from .models import default_model, fallback_models, image_models, model_aliases, llama_models from ... import debug class Conversation(JsonConversation): @@ -97,6 +97,8 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin): ) -> AsyncResult: if not has_curl_cffi: raise MissingRequirementsError('Install "curl_cffi" package | pip install -U curl_cffi') + if model == llama_models["name"]: + model = llama_models["text"] if images is None else llama_models["vision"] model = cls.get_model(model) session = Session(**auth_result.get_dict()) diff --git a/g4f/Provider/hf/HuggingFaceAPI.py b/g4f/Provider/hf/HuggingFaceAPI.py index e03ffc37..e775a7ae 100644 --- a/g4f/Provider/hf/HuggingFaceAPI.py +++ b/g4f/Provider/hf/HuggingFaceAPI.py @@ -6,7 +6,7 @@ from ...requests import StreamSession, raise_for_status from ...errors import ModelNotSupportedError from ...providers.helper import get_last_user_message from ..template.OpenaiTemplate import OpenaiTemplate -from .models import model_aliases, vision_models, default_vision_model +from .models import model_aliases, vision_models, default_vision_model, llama_models from .HuggingChat import HuggingChat from ... import debug @@ -63,6 +63,8 @@ class HuggingFaceAPI(OpenaiTemplate): ): if model in cls.model_aliases: model = cls.model_aliases[model] + if model == llama_models["name"]: + model = llama_models["text"] if images is None else llama_models["vision"] api_base = f"https://api-inference.huggingface.co/models/{model}/v1" pipeline_tag = await cls.get_pipline_tag(model, api_key) if pipeline_tag not in ("text-generation", "image-text-to-text"): diff --git a/g4f/Provider/hf/models.py b/g4f/Provider/hf/models.py index d9a461fc..53c33a21 100644 --- a/g4f/Provider/hf/models.py +++ b/g4f/Provider/hf/models.py @@ -46,4 +46,9 @@ extra_models = [ "NousResearch/Hermes-3-Llama-3.1-8B", ] default_vision_model = "meta-llama/Llama-3.2-11B-Vision-Instruct" -vision_models = [default_vision_model, "Qwen/Qwen2-VL-7B-Instruct"] \ No newline at end of file +vision_models = [default_vision_model, "Qwen/Qwen2-VL-7B-Instruct"] +llama_models = { + "name": "llama-3", + "text": "meta-llama/Llama-3.3-70B-Instruct", + "vision": "meta-llama/Llama-3.2-11B-Vision-Instruct", +} \ No newline at end of file diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index b36c69d6..da36b7cd 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -101,7 +101,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): image_models = image_models vision_models = text_models models = models - synthesize_content_type = "audio/mpeg" + synthesize_content_type = "audio/aac" request_config = RequestConfig() _expires: int = None @@ -588,7 +588,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): page = browser.main_tab def on_request(event: nodriver.cdp.network.RequestWillBeSent): if event.request.url == start_url or event.request.url.startswith(conversation_url): - cls.request_config.headers = event.request.headers + for key, value in event.request.headers.items(): + cls.request_config.headers[key.lower()] = value elif event.request.url in (backend_url, backend_anon_url): if "OpenAI-Sentinel-Proof-Token" in event.request.headers: cls.request_config.proof_token = json.loads(base64.b64decode( diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py index 93cfded6..6e727f5e 100644 --- a/g4f/Provider/openai/har_file.py +++ b/g4f/Provider/openai/har_file.py @@ -30,8 +30,6 @@ class RequestConfig: turnstile_token: str = None arkose_request: arkReq = None arkose_token: str = None - headers: dict = {} - cookies: dict = {} data_build: str = "prod-db8e51e8414e068257091cf5003a62d3d4ee6ed0" class arkReq: diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html index 3fb34040..f512c0ff 100644 --- a/g4f/gui/client/demo.html +++ b/g4f/gui/client/demo.html @@ -256,10 +256,8 @@ input.setCustomValidity("Invalid Access Token."); localStorage.removeItem("HuggingFace-api_key"); if (localStorage.getItem("oauth")) { - window.location.href = (await oauthLoginUrl({ - clientId: 'ed074164-4f8d-4fb2-8bec-44952707965e', - scopes: ['inference-api'] - })); + localStorage.removeItem("oauth"); + window.location.replace("/"); } return; } @@ -292,8 +290,7 @@ document.getElementById("signout").onclick = async function() { localStorage.removeItem("oauth"); localStorage.removeItem("HuggingFace-api_key"); - window.location.href = window.location.href.replace(/\?.*$/, ''); - window.location.reload(); + window.location.replace("/"); } } else { localStorage.removeItem("oauth"); diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index bf2c8b21..8b608970 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -1213,7 +1213,7 @@ ul { } #chatPrompt { - padding-left: 60px; + padding-left: 30px; } .field.collapsible { diff --git a/g4f/models.py b/g4f/models.py index d944d530..84aba2ab 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -204,6 +204,12 @@ llama_3_1_405b = Model( ) # llama 3.2 +llama_3 = VisionModel( + name = "llama-3", + base_provider = "Meta Llama", + best_provider = IterListProvider([HuggingChat, HuggingFace]) +) + llama_3_2_1b = Model( name = "llama-3.2-1b", base_provider = "Meta Llama", @@ -872,7 +878,8 @@ class ModelUtils: demo_models = { - "default": [llama_3_2_11b, [HuggingFace]], + "default": [llama_3, [HuggingFace]], + llama_3_2_11b.name: [llama_3_2_11b, [HuggingChat]], qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]], deepseek_r1.name: [deepseek_r1, [HuggingFace, PollinationsAI]], janus_pro_7b.name: [janus_pro_7b, [HuggingSpace, G4F]], -- cgit v1.2.3