From c18f10243ecc3247dbaa43039cf80fc1257c7330 Mon Sep 17 00:00:00 2001
From: hlohaus <983577+hlohaus@users.noreply.github.com>
Date: Mon, 27 Jan 2025 17:37:25 +0100
Subject: Check request limit in demo only in API Stop recognition in UI on
enter request Fix Ratelimt for Ping in GUI Use OpenaiTemplate for OIVSCode
Support Reasoning in Blackbox Add error reporting in UI Support Custom
Provider in Demo
---
g4f/Provider/Blackbox.py | 13 +++-
g4f/Provider/OIVSCode.py | 98 ++-----------------------------
g4f/Provider/needs_auth/Custom.py | 1 +
g4f/Provider/needs_auth/OpenaiTemplate.py | 54 +++++++++++------
4 files changed, 52 insertions(+), 114 deletions(-)
(limited to 'g4f/Provider')
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 7e3cf40f..d8669884 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -14,7 +14,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse, to_data_uri
from ..cookies import get_cookies_dir
from .helper import format_prompt
-from ..providers.response import FinishReason, JsonConversation
+from ..providers.response import FinishReason, JsonConversation, Reasoning
class Conversation(JsonConversation):
validated_value: str = None
@@ -310,7 +310,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
prompt = messages[-1]["content"]
yield ImageResponse(images=[image_url], alt=prompt)
else:
- if "Generated by BLACKBOX.AI" in text_to_yield:
+ if "" in text_to_yield and "" in chunk_text :
+ chunk_text = text_to_yield.split('', 1)
+ yield chunk_text[0]
+ chunk_text = text_to_yield.split('', 1)
+ yield Reasoning(chunk_text[0])
+ yield chunk_text[1]
+ full_response = text_to_yield
+ elif "Generated by BLACKBOX.AI" in text_to_yield:
conversation.validated_value = await cls.fetch_validated(force_refresh=True)
if conversation.validated_value:
data["validated"] = conversation.validated_value
@@ -337,7 +344,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
reason = "length"
else:
reason = "stop"
-
+
if return_conversation:
conversation.message_history.append({"role": "assistant", "content": full_response})
yield conversation
diff --git a/g4f/Provider/OIVSCode.py b/g4f/Provider/OIVSCode.py
index 56e6ceb8..6a918869 100644
--- a/g4f/Provider/OIVSCode.py
+++ b/g4f/Provider/OIVSCode.py
@@ -1,101 +1,15 @@
from __future__ import annotations
-import json
-from aiohttp import ClientSession
+from .needs_auth.OpenaiTemplate import OpenaiTemplate
-from ..image import to_data_uri
-from ..typing import AsyncResult, Messages, ImagesType
-from ..requests.raise_for_status import raise_for_status
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-from ..providers.response import FinishReason
-
-
-class OIVSCode(AsyncGeneratorProvider, ProviderModelMixin):
+class OIVSCode(OpenaiTemplate):
label = "OI VSCode Server"
url = "https://oi-vscode-server.onrender.com"
- api_endpoint = "https://oi-vscode-server.onrender.com/v1/chat/completions"
-
+ api_base = "https://oi-vscode-server.onrender.com/v1"
working = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
+ needs_auth = False
default_model = "gpt-4o-mini-2024-07-18"
default_vision_model = default_model
- vision_models = [default_model, "gpt-4o-mini"]
- models = vision_models
-
- model_aliases = {"gpt-4o-mini": "gpt-4o-mini-2024-07-18"}
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- stream: bool = False,
- images: ImagesType = None,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "content-type": "application/json",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
- }
-
- async with ClientSession(headers=headers) as session:
-
- if images is not None:
- messages[-1]['content'] = [
- {
- "type": "text",
- "text": messages[-1]['content']
- },
- *[
- {
- "type": "image_url",
- "image_url": {
- "url": to_data_uri(image)
- }
- }
- for image, _ in images
- ]
- ]
-
- data = {
- "model": model,
- "stream": stream,
- "messages": messages
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- await raise_for_status(response)
-
- full_response = ""
-
- if stream:
- async for line in response.content:
- if line:
- line = line.decode()
- if line.startswith("data: "):
- if line.strip() == "data: [DONE]":
- break
- try:
- data = json.loads(line[6:])
- if content := data["choices"][0]["delta"].get("content"):
- yield content
- full_response += content
- except:
- continue
-
- reason = "length" if len(full_response) > 0 else "stop"
- yield FinishReason(reason)
- else:
- response_data = await response.json()
- full_response = response_data["choices"][0]["message"]["content"]
- yield full_response
-
- reason = "length" if len(full_response) > 0 else "stop"
- yield FinishReason(reason)
+ vision_models = [default_model, "gpt-4o-mini"]
+ model_aliases = {"gpt-4o-mini": "gpt-4o-mini-2024-07-18"}
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Custom.py b/g4f/Provider/needs_auth/Custom.py
index 17a61a7b..ceb46973 100644
--- a/g4f/Provider/needs_auth/Custom.py
+++ b/g4f/Provider/needs_auth/Custom.py
@@ -5,5 +5,6 @@ from .OpenaiTemplate import OpenaiTemplate
class Custom(OpenaiTemplate):
label = "Custom Provider"
working = True
+ needs_auth = False
api_base = "http://localhost:8080/v1"
sort_models = False
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiTemplate.py b/g4f/Provider/needs_auth/OpenaiTemplate.py
index 72481c9b..5315d99c 100644
--- a/g4f/Provider/needs_auth/OpenaiTemplate.py
+++ b/g4f/Provider/needs_auth/OpenaiTemplate.py
@@ -8,7 +8,7 @@ from ..helper import filter_none
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
from ...typing import Union, Optional, AsyncResult, Messages, ImagesType
from ...requests import StreamSession, raise_for_status
-from ...providers.response import FinishReason, ToolCalls, Usage, Reasoning
+from ...providers.response import FinishReason, ToolCalls, Usage, Reasoning, ImageResponse
from ...errors import MissingAuthError, ResponseError
from ...image import to_data_uri
from ... import debug
@@ -59,6 +59,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
top_p: float = None,
stop: Union[str, list[str]] = None,
stream: bool = False,
+ prompt: str = None,
headers: dict = None,
impersonate: str = None,
tools: Optional[list] = None,
@@ -67,32 +68,47 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
) -> AsyncResult:
if cls.needs_auth and api_key is None:
raise MissingAuthError('Add a "api_key"')
- if api_base is None:
- api_base = cls.api_base
- if images is not None and messages:
- if not model and hasattr(cls, "default_vision_model"):
- model = cls.default_vision_model
- last_message = messages[-1].copy()
- last_message["content"] = [
- *[{
- "type": "image_url",
- "image_url": {"url": to_data_uri(image)}
- } for image, _ in images],
- {
- "type": "text",
- "text": messages[-1]["content"]
- }
- ]
- messages[-1] = last_message
async with StreamSession(
proxy=proxy,
headers=cls.get_headers(stream, api_key, headers),
timeout=timeout,
impersonate=impersonate,
) as session:
+ model = cls.get_model(model, api_key=api_key, api_base=api_base)
+ if api_base is None:
+ api_base = cls.api_base
+
+ # Proxy for image generation feature
+ if model in cls.image_models:
+ data = {
+ "prompt": messages[-1]["content"] if prompt is None else prompt,
+ "model": model,
+ }
+ async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data) as response:
+ data = await response.json()
+ cls.raise_error(data)
+ await raise_for_status(response)
+ yield ImageResponse([image["url"] for image in data["data"]], prompt)
+ return
+
+ if images is not None and messages:
+ if not model and hasattr(cls, "default_vision_model"):
+ model = cls.default_vision_model
+ last_message = messages[-1].copy()
+ last_message["content"] = [
+ *[{
+ "type": "image_url",
+ "image_url": {"url": to_data_uri(image)}
+ } for image, _ in images],
+ {
+ "type": "text",
+ "text": messages[-1]["content"]
+ }
+ ]
+ messages[-1] = last_message
data = filter_none(
messages=messages,
- model=cls.get_model(model, api_key=api_key, api_base=api_base),
+ model=model,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
--
cgit v1.2.3