summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Blackbox.py12
-rw-r--r--g4f/Provider/Copilot.py23
-rw-r--r--g4f/Provider/RubiksAI.py124
-rw-r--r--g4f/Provider/needs_auth/Cerebras.py65
-rw-r--r--g4f/Provider/needs_auth/CopilotAccount.py7
-rw-r--r--g4f/Provider/needs_auth/HuggingFace2.py28
-rw-r--r--g4f/Provider/needs_auth/OpenaiAPI.py4
-rw-r--r--g4f/Provider/needs_auth/__init__.py2
8 files changed, 176 insertions, 89 deletions
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 97466c04..ba58a511 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -28,6 +28,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
image_models = [default_image_model, 'repomap']
text_models = [default_model, 'gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'blackboxai-pro']
+ model_aliases = {
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
+ }
agentMode = {
default_image_model: {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
}
@@ -198,6 +201,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async with ClientSession(headers=headers) as session:
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
+ is_first = False
async for chunk in response.content.iter_any():
text_chunk = chunk.decode(errors="ignore")
if model in cls.image_models:
@@ -217,5 +221,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
for i, result in enumerate(search_results, 1):
formatted_response += f"\n{i}. {result['title']}: {result['link']}"
yield formatted_response
- else:
- yield text_chunk.strip()
+ elif text_chunk:
+ if is_first:
+ is_first = False
+ yield text_chunk.lstrip()
+ else:
+ yield text_chunk \ No newline at end of file
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index e40278c7..e8eea0a5 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -21,8 +21,9 @@ from .helper import format_prompt
from ..typing import CreateResult, Messages, ImageType
from ..errors import MissingRequirementsError
from ..requests.raise_for_status import raise_for_status
+from ..providers.helper import format_cookies
from ..requests import get_nodriver
-from ..image import to_bytes, is_accepted_format
+from ..image import ImageResponse, to_bytes, is_accepted_format
from .. import debug
class Conversation(BaseConversation):
@@ -70,18 +71,21 @@ class Copilot(AbstractProvider):
access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
else:
access_token = conversation.access_token
- websocket_url = f"{websocket_url}&acessToken={quote(access_token)}"
- headers = {"Authorization": f"Bearer {access_token}"}
+ debug.log(f"Copilot: Access token: {access_token[:7]}...{access_token[-5:]}")
+ debug.log(f"Copilot: Cookies: {';'.join([*cookies])}")
+ websocket_url = f"{websocket_url}&accessToken={quote(access_token)}"
+ headers = {"authorization": f"Bearer {access_token}", "cookie": format_cookies(cookies)}
with Session(
timeout=timeout,
proxy=proxy,
impersonate="chrome",
headers=headers,
- cookies=cookies
+ cookies=cookies,
) as session:
- response = session.get(f"{cls.url}/")
+ response = session.get("https://copilot.microsoft.com/c/api/user")
raise_for_status(response)
+ debug.log(f"Copilot: User: {response.json().get('firstName', 'null')}")
if conversation is None:
response = session.post(cls.conversation_url)
raise_for_status(response)
@@ -119,6 +123,7 @@ class Copilot(AbstractProvider):
is_started = False
msg = None
+ image_prompt: str = None
while True:
try:
msg = wss.recv()[0]
@@ -128,7 +133,11 @@ class Copilot(AbstractProvider):
if msg.get("event") == "appendText":
is_started = True
yield msg.get("text")
- elif msg.get("event") in ["done", "partCompleted"]:
+ elif msg.get("event") == "generatingImage":
+ image_prompt = msg.get("prompt")
+ elif msg.get("event") == "imageGenerated":
+ yield ImageResponse(msg.get("url"), image_prompt, {"preview": msg.get("thumbnailUrl")})
+ elif msg.get("event") == "done":
break
if not is_started:
raise RuntimeError(f"Last message: {msg}")
@@ -152,7 +161,7 @@ class Copilot(AbstractProvider):
})()
""")
if access_token is None:
- asyncio.sleep(1)
+ await asyncio.sleep(1)
cookies = {}
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
cookies[c.name] = c.value
diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py
index 7e76d558..c06e6c3d 100644
--- a/g4f/Provider/RubiksAI.py
+++ b/g4f/Provider/RubiksAI.py
@@ -1,7 +1,6 @@
+
from __future__ import annotations
-import asyncio
-import aiohttp
import random
import string
import json
@@ -11,34 +10,24 @@ from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
+from ..requests.raise_for_status import raise_for_status
class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Rubiks AI"
url = "https://rubiks.ai"
- api_endpoint = "https://rubiks.ai/search/api.php"
+ api_endpoint = "https://rubiks.ai/search/api/"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
- default_model = 'llama-3.1-70b-versatile'
- models = [default_model, 'gpt-4o-mini']
+ default_model = 'gpt-4o-mini'
+ models = [default_model, 'gpt-4o', 'o1-mini', 'claude-3.5-sonnet', 'grok-beta', 'gemini-1.5-pro', 'nova-pro']
model_aliases = {
"llama-3.1-70b": "llama-3.1-70b-versatile",
}
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
@staticmethod
def generate_mid() -> str:
"""
@@ -70,7 +59,8 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- websearch: bool = False,
+ web_search: bool = False,
+ temperature: float = 0.6,
**kwargs
) -> AsyncResult:
"""
@@ -80,20 +70,18 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
- model (str): The model to use in the request.
- messages (Messages): The messages to send as a prompt.
- proxy (str, optional): Proxy URL, if needed.
- - websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False.
+ - web_search (bool, optional): Indicates whether to include search sources in the response. Defaults to False.
"""
model = cls.get_model(model)
- prompt = format_prompt(messages)
- q_value = prompt
mid_value = cls.generate_mid()
- referer = cls.create_referer(q=q_value, mid=mid_value, model=model)
-
- url = cls.api_endpoint
- params = {
- 'q': q_value,
- 'model': model,
- 'id': '',
- 'mid': mid_value
+ referer = cls.create_referer(q=messages[-1]["content"], mid=mid_value, model=model)
+
+ data = {
+ "messages": messages,
+ "model": model,
+ "search": web_search,
+ "stream": True,
+ "temperature": temperature
}
headers = {
@@ -111,52 +99,34 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"'
}
-
- try:
- timeout = aiohttp.ClientTimeout(total=None)
- async with ClientSession(timeout=timeout) as session:
- async with session.get(url, headers=headers, params=params, proxy=proxy) as response:
- if response.status != 200:
- yield f"Request ended with status code {response.status}"
- return
-
- assistant_text = ''
- sources = []
-
- async for line in response.content:
- decoded_line = line.decode('utf-8').strip()
- if not decoded_line.startswith('data: '):
- continue
- data = decoded_line[6:]
- if data in ('[DONE]', '{"done": ""}'):
- break
- try:
- json_data = json.loads(data)
- except json.JSONDecodeError:
- continue
-
- if 'url' in json_data and 'title' in json_data:
- if websearch:
- sources.append({'title': json_data['title'], 'url': json_data['url']})
-
- elif 'choices' in json_data:
- for choice in json_data['choices']:
- delta = choice.get('delta', {})
- content = delta.get('content', '')
- role = delta.get('role', '')
- if role == 'assistant':
- continue
- assistant_text += content
-
- if websearch and sources:
- sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)])
- assistant_text += f"\n\n**Source:**\n{sources_text}"
-
- yield assistant_text
-
- except asyncio.CancelledError:
- yield "The request was cancelled."
- except aiohttp.ClientError as e:
- yield f"An error occurred during the request: {e}"
- except Exception as e:
- yield f"An unexpected error occurred: {e}"
+ async with ClientSession() as session:
+ async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+
+ sources = []
+ async for line in response.content:
+ decoded_line = line.decode('utf-8').strip()
+ if not decoded_line.startswith('data: '):
+ continue
+ data = decoded_line[6:]
+ if data in ('[DONE]', '{"done": ""}'):
+ break
+ try:
+ json_data = json.loads(data)
+ except json.JSONDecodeError:
+ continue
+
+ if 'url' in json_data and 'title' in json_data:
+ if web_search:
+ sources.append({'title': json_data['title'], 'url': json_data['url']})
+
+ elif 'choices' in json_data:
+ for choice in json_data['choices']:
+ delta = choice.get('delta', {})
+ content = delta.get('content', '')
+ if content:
+ yield content
+
+ if web_search and sources:
+ sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)])
+ yield f"\n\n**Source:**\n{sources_text}" \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Cerebras.py b/g4f/Provider/needs_auth/Cerebras.py
new file mode 100644
index 00000000..0f94c476
--- /dev/null
+++ b/g4f/Provider/needs_auth/Cerebras.py
@@ -0,0 +1,65 @@
+from __future__ import annotations
+
+import requests
+from aiohttp import ClientSession
+
+from .OpenaiAPI import OpenaiAPI
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ...cookies import get_cookies
+
+class Cerebras(OpenaiAPI):
+ label = "Cerebras Inference"
+ url = "https://inference.cerebras.ai/"
+ working = True
+ default_model = "llama3.1-70b"
+ fallback_models = [
+ "llama3.1-70b",
+ "llama3.1-8b",
+ ]
+ model_aliases = {"llama-3.1-70b": "llama3.1-70b", "llama-3.1-8b": "llama3.1-8b"}
+
+ @classmethod
+ def get_models(cls, api_key: str = None):
+ if not cls.models:
+ try:
+ headers = {}
+ if api_key:
+ headers["authorization"] = f"Bearer ${api_key}"
+ response = requests.get(f"https://api.cerebras.ai/v1/models", headers=headers)
+ raise_for_status(response)
+ data = response.json()
+ cls.models = [model.get("model") for model in data.get("models")]
+ except Exception:
+ cls.models = cls.fallback_models
+ return cls.models
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://api.cerebras.ai/v1",
+ api_key: str = None,
+ cookies: Cookies = None,
+ **kwargs
+ ) -> AsyncResult:
+ if api_key is None and cookies is None:
+ cookies = get_cookies(".cerebras.ai")
+ async with ClientSession(cookies=cookies) as session:
+ async with session.get("https://inference.cerebras.ai/api/auth/session") as response:
+ raise_for_status(response)
+ data = await response.json()
+ if data:
+ api_key = data.get("user", {}).get("demoApiKey")
+ async for chunk in super().create_async_generator(
+ model, messages,
+ api_base=api_base,
+ impersonate="chrome",
+ api_key=api_key,
+ headers={
+ "User-Agent": "ex/JS 1.5.0",
+ },
+ **kwargs
+ ):
+ yield chunk
diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py
index 76e51278..497aab98 100644
--- a/g4f/Provider/needs_auth/CopilotAccount.py
+++ b/g4f/Provider/needs_auth/CopilotAccount.py
@@ -1,9 +1,12 @@
from __future__ import annotations
+from ..base_provider import ProviderModelMixin
from ..Copilot import Copilot
-class CopilotAccount(Copilot):
+class CopilotAccount(Copilot, ProviderModelMixin):
needs_auth = True
parent = "Copilot"
default_model = "Copilot"
- default_vision_model = default_model \ No newline at end of file
+ default_vision_model = default_model
+ models = [default_model]
+ image_models = models \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/HuggingFace2.py b/g4f/Provider/needs_auth/HuggingFace2.py
new file mode 100644
index 00000000..847d459b
--- /dev/null
+++ b/g4f/Provider/needs_auth/HuggingFace2.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from .OpenaiAPI import OpenaiAPI
+from ..HuggingChat import HuggingChat
+from ...typing import AsyncResult, Messages
+
+class HuggingFace2(OpenaiAPI):
+ label = "HuggingFace (Inference API)"
+ url = "https://huggingface.co"
+ working = True
+ default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
+ default_vision_model = default_model
+ models = [
+ *HuggingChat.models
+ ]
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://api-inference.huggingface.co/v1",
+ max_tokens: int = 500,
+ **kwargs
+ ) -> AsyncResult:
+ return super().create_async_generator(
+ model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py
index 116b5f6f..83268b6d 100644
--- a/g4f/Provider/needs_auth/OpenaiAPI.py
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -34,6 +34,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
stop: Union[str, list[str]] = None,
stream: bool = False,
headers: dict = None,
+ impersonate: str = None,
extra_data: dict = {},
**kwargs
) -> AsyncResult:
@@ -55,7 +56,8 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
async with StreamSession(
proxies={"all": proxy},
headers=cls.get_headers(stream, api_key, headers),
- timeout=timeout
+ timeout=timeout,
+ impersonate=impersonate,
) as session:
data = filter_none(
messages=messages,
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 0f430ab5..1c7fe7c5 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -1,6 +1,7 @@
from .gigachat import *
from .BingCreateImages import BingCreateImages
+from .Cerebras import Cerebras
from .CopilotAccount import CopilotAccount
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
@@ -8,6 +9,7 @@ from .Gemini import Gemini
from .GeminiPro import GeminiPro
from .Groq import Groq
from .HuggingFace import HuggingFace
+from .HuggingFace2 import HuggingFace2
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .OpenaiAPI import OpenaiAPI