summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Airforce.py184
-rw-r--r--g4f/Provider/AmigoChat.py (renamed from g4f/Provider/not_working/AmigoChat.py)67
-rw-r--r--g4f/Provider/Cloudflare.py136
-rw-r--r--g4f/Provider/DDG.py108
-rw-r--r--g4f/Provider/HuggingChat.py9
-rw-r--r--g4f/Provider/Liaobots.py2
-rw-r--r--g4f/Provider/Pi.py80
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/Provider/bing/create_images.py2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py2
-rw-r--r--g4f/Provider/not_working/__init__.py1
-rw-r--r--g4f/Provider/openai/proofofwork.py1
12 files changed, 326 insertions, 267 deletions
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index c7ae44c0..6254e160 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -1,59 +1,171 @@
from __future__ import annotations
-from typing import Any, Dict
-import inspect
-from aiohttp import ClientSession
+import random
+import json
+import re
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
+from ..image import ImageResponse
+from ..requests import StreamSession, raise_for_status
from .airforce.AirforceChat import AirforceChat
from .airforce.AirforceImage import AirforceImage
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
api_endpoint_completions = AirforceChat.api_endpoint
- api_endpoint_imagine2 = AirforceImage.api_endpoint
+ api_endpoint_imagine = AirforceImage.api_endpoint
working = True
- supports_stream = AirforceChat.supports_stream
- supports_system_message = AirforceChat.supports_system_message
- supports_message_history = AirforceChat.supports_message_history
-
- default_model = AirforceChat.default_model
- models = [*AirforceChat.models, *AirforceImage.models]
-
+ default_model = "gpt-4o-mini"
+ supports_system_message = True
+ supports_message_history = True
+ text_models = [
+ 'gpt-4-turbo',
+ default_model,
+ 'llama-3.1-70b-turbo',
+ 'llama-3.1-8b-turbo',
+ ]
+ image_models = [
+ 'flux',
+ 'flux-realism',
+ 'flux-anime',
+ 'flux-3d',
+ 'flux-disney',
+ 'flux-pixel',
+ 'flux-4o',
+ 'any-dark',
+ ]
+ models = [
+ *text_models,
+ *image_models,
+ ]
model_aliases = {
- **AirforceChat.model_aliases,
- **AirforceImage.model_aliases
+ "gpt-4o": "chatgpt-4o-latest",
+ "llama-3.1-70b": "llama-3.1-70b-turbo",
+ "llama-3.1-8b": "llama-3.1-8b-turbo",
+ "gpt-4": "gpt-4-turbo",
}
@classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ if model in cls.image_models:
+ return cls._generate_image(model, messages, proxy, seed, size)
else:
- return cls.default_model
+ return cls._generate_text(model, messages, proxy, stream, **kwargs)
@classmethod
- async def create_async_generator(cls, model: str, messages: Messages, **kwargs) -> AsyncResult:
- model = cls.get_model(model)
-
- provider = AirforceChat if model in AirforceChat.text_models else AirforceImage
+ async def _generate_image(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "origin": "https://llmplayground.net",
+ "user-agent": "Mozilla/5.0"
+ }
+ if seed is None:
+ seed = random.randint(0, 100000)
+ prompt = messages[-1]['content']
- if model not in provider.models:
- raise ValueError(f"Unsupported model: {model}")
+ async with StreamSession(headers=headers, proxy=proxy) as session:
+ params = {
+ "model": model,
+ "prompt": prompt,
+ "size": size,
+ "seed": seed
+ }
+ async with session.get(f"{cls.api_endpoint_imagine}", params=params) as response:
+ await raise_for_status(response)
+ content_type = response.headers.get('Content-Type', '').lower()
- # Get the signature of the provider's create_async_generator method
- sig = inspect.signature(provider.create_async_generator)
-
- # Filter kwargs to only include parameters that the provider's method accepts
- filtered_kwargs = {k: v for k, v in kwargs.items() if k in sig.parameters}
+ if 'application/json' in content_type:
+ raise RuntimeError(await response.json().get("error", {}).get("message"))
+ elif 'image' in content_type:
+ image_data = b""
+ async for chunk in response.iter_content():
+ if chunk:
+ image_data += chunk
+ image_url = f"{cls.api_endpoint_imagine}?model={model}&prompt={prompt}&size={size}&seed={seed}"
+ yield ImageResponse(images=image_url, alt=prompt)
- # Add model and messages to filtered_kwargs
- filtered_kwargs['model'] = model
- filtered_kwargs['messages'] = messages
+ @classmethod
+ async def _generate_text(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ max_tokens: int = 4096,
+ temperature: float = 1,
+ top_p: float = 1,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer missing api key",
+ "content-type": "application/json",
+ "user-agent": "Mozilla/5.0"
+ }
+ async with StreamSession(headers=headers, proxy=proxy) as session:
+ data = {
+ "messages": messages,
+ "model": model,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stream": stream
+ }
+ async with session.post(cls.api_endpoint_completions, json=data) as response:
+ await raise_for_status(response)
+ content_type = response.headers.get('Content-Type', '').lower()
+ if 'application/json' in content_type:
+ json_data = await response.json()
+ if json_data.get("model") == "error":
+ raise RuntimeError(json_data['choices'][0]['message'].get('content', ''))
+ if stream:
+ async for line in response.iter_lines():
+ if line:
+ line = line.decode('utf-8').strip()
+ if line.startswith("data: ") and line != "data: [DONE]":
+ json_data = json.loads(line[6:])
+ content = json_data['choices'][0]['delta'].get('content', '')
+ if content:
+ yield cls._filter_content(content)
+ else:
+ json_data = await response.json()
+ content = json_data['choices'][0]['message']['content']
+ yield cls._filter_content(content)
- async for result in provider.create_async_generator(**filtered_kwargs):
- yield result
+ @classmethod
+ def _filter_content(cls, part_response: str) -> str:
+ part_response = re.sub(
+ r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
+ '',
+ part_response
+ )
+
+ part_response = re.sub(
+ r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
+ '',
+ part_response
+ )
+ return part_response \ No newline at end of file
diff --git a/g4f/Provider/not_working/AmigoChat.py b/g4f/Provider/AmigoChat.py
index 274a5e14..2e66dccf 100644
--- a/g4f/Provider/not_working/AmigoChat.py
+++ b/g4f/Provider/AmigoChat.py
@@ -2,18 +2,18 @@ from __future__ import annotations
import json
import uuid
-from aiohttp import ClientSession, ClientTimeout, ClientResponseError
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-from ...image import ImageResponse
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+from ..requests import StreamSession, raise_for_status
+from ..errors import ResponseStatusError
class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://amigochat.io/chat/"
chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
- working = False
+ working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
@@ -67,15 +67,6 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
}
@classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
def get_personaId(cls, model: str) -> str:
return cls.persona_ids[model]
@@ -86,6 +77,12 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages,
proxy: str = None,
stream: bool = False,
+ timeout: int = 300,
+ frequency_penalty: float = 0,
+ max_tokens: int = 4000,
+ presence_penalty: float = 0,
+ temperature: float = 0.5,
+ top_p: float = 0.95,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
@@ -113,31 +110,25 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
"x-device-language": "en-US",
"x-device-platform": "web",
"x-device-uuid": device_uuid,
- "x-device-version": "1.0.32"
+ "x-device-version": "1.0.41"
}
- async with ClientSession(headers=headers) as session:
- if model in cls.chat_models:
- # Chat completion
+ async with StreamSession(headers=headers, proxy=proxy) as session:
+ if model not in cls.image_models:
data = {
- "messages": [{"role": m["role"], "content": m["content"]} for m in messages],
+ "messages": messages,
"model": model,
"personaId": cls.get_personaId(model),
- "frequency_penalty": 0,
- "max_tokens": 4000,
- "presence_penalty": 0,
+ "frequency_penalty": frequency_penalty,
+ "max_tokens": max_tokens,
+ "presence_penalty": presence_penalty,
"stream": stream,
- "temperature": 0.5,
- "top_p": 0.95
+ "temperature": temperature,
+ "top_p": top_p
}
-
- timeout = ClientTimeout(total=300) # 5 minutes timeout
- async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy, timeout=timeout) as response:
- if response.status not in (200, 201):
- error_text = await response.text()
- raise Exception(f"Error {response.status}: {error_text}")
-
- async for line in response.content:
+ async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response:
+ await raise_for_status(response)
+ async for line in response.iter_lines():
line = line.decode('utf-8').strip()
if line.startswith('data: '):
if line == 'data: [DONE]':
@@ -164,11 +155,9 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
"model": model,
"personaId": cls.get_personaId(model)
}
- async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
-
+ async with session.post(cls.image_api_endpoint, json=data) as response:
+ await raise_for_status(response)
response_data = await response.json()
-
if "data" in response_data:
image_urls = []
for item in response_data["data"]:
@@ -179,10 +168,8 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
yield ImageResponse(image_urls, prompt)
else:
yield None
-
break
-
- except (ClientResponseError, Exception) as e:
+ except (ResponseStatusError, Exception) as e:
retry_count += 1
if retry_count >= max_retries:
raise e
diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py
index 8fb37bef..825c5027 100644
--- a/g4f/Provider/Cloudflare.py
+++ b/g4f/Provider/Cloudflare.py
@@ -1,72 +1,52 @@
from __future__ import annotations
-from aiohttp import ClientSession
import asyncio
import json
import uuid
-import cloudscraper
-from typing import AsyncGenerator
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
+from ..typing import AsyncResult, Messages, Cookies
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop
+from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
label = "Cloudflare AI"
url = "https://playground.ai.cloudflare.com"
api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
+ models_url = "https://playground.ai.cloudflare.com/api/models"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
-
- default_model = '@cf/meta/llama-3.1-8b-instruct-awq'
- models = [
- '@cf/meta/llama-2-7b-chat-fp16',
- '@cf/meta/llama-2-7b-chat-int8',
-
- '@cf/meta/llama-3-8b-instruct',
- '@cf/meta/llama-3-8b-instruct-awq',
- '@hf/meta-llama/meta-llama-3-8b-instruct',
-
- default_model,
- '@cf/meta/llama-3.1-8b-instruct-fp8',
-
- '@cf/meta/llama-3.2-1b-instruct',
-
- '@hf/mistral/mistral-7b-instruct-v0.2',
-
- '@cf/qwen/qwen1.5-7b-chat-awq',
-
- '@cf/defog/sqlcoder-7b-2',
- ]
-
+ default_model = "@cf/meta/llama-3.1-8b-instruct"
model_aliases = {
"llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
"llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
-
"llama-3-8b": "@cf/meta/llama-3-8b-instruct",
"llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq",
"llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct",
-
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
-
"llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct",
-
"qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
-
- #"sqlcoder-7b": "@cf/defog/sqlcoder-7b-2",
}
+ _args: dict = None
@classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
+ def get_models(cls) -> str:
+ if not cls.models:
+ if cls._args is None:
+ get_running_loop(check_nested=True)
+ args = get_args_from_nodriver(cls.url, cookies={
+ '__cf_bm': uuid.uuid4().hex,
+ })
+ cls._args = asyncio.run(args)
+ with Session(**cls._args) as session:
+ response = session.get(cls.models_url)
+ raise_for_status(response)
+ json_data = response.json()
+ cls.models = [model.get("name") for model in json_data.get("models")]
+ cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
+ return cls.models
@classmethod
async def create_async_generator(
@@ -75,76 +55,34 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages,
proxy: str = None,
max_tokens: int = 2048,
+ cookies: Cookies = None,
+ timeout: int = 300,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
- headers = {
- 'Accept': 'text/event-stream',
- 'Accept-Language': 'en-US,en;q=0.9',
- 'Cache-Control': 'no-cache',
- 'Content-Type': 'application/json',
- 'Origin': cls.url,
- 'Pragma': 'no-cache',
- 'Referer': f'{cls.url}/',
- 'Sec-Ch-Ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
- 'Sec-Ch-Ua-Mobile': '?0',
- 'Sec-Ch-Ua-Platform': '"Linux"',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
- }
-
- cookies = {
- '__cf_bm': uuid.uuid4().hex,
- }
-
- scraper = cloudscraper.create_scraper()
-
+ if cls._args is None:
+ cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies)
data = {
- "messages": [
- {"role": "user", "content": format_prompt(messages)}
- ],
+ "messages": messages,
"lora": None,
"model": model,
"max_tokens": max_tokens,
"stream": True
}
-
- max_retries = 3
- full_response = ""
-
- for attempt in range(max_retries):
- try:
- response = scraper.post(
- cls.api_endpoint,
- headers=headers,
- cookies=cookies,
- json=data,
- stream=True,
- proxies={'http': proxy, 'https': proxy} if proxy else None
- )
-
- if response.status_code == 403:
- await asyncio.sleep(2 ** attempt)
- continue
-
- response.raise_for_status()
-
- for line in response.iter_lines():
+ async with StreamSession(**cls._args) as session:
+ async with session.post(
+ cls.api_endpoint,
+ json=data,
+ ) as response:
+ await raise_for_status(response)
+ cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
+ async for line in response.iter_lines():
if line.startswith(b'data: '):
if line == b'data: [DONE]':
- if full_response:
- yield full_response
break
try:
- content = json.loads(line[6:].decode('utf-8'))
- if 'response' in content and content['response'] != '</s>':
+ content = json.loads(line[6:].decode())
+ if content.get("response") and content.get("response") != '</s>':
yield content['response']
except Exception:
- continue
- break
- except Exception as e:
- if attempt == max_retries - 1:
- raise
+ continue \ No newline at end of file
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index 43cc39c0..c4be0ea8 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -2,12 +2,31 @@ from __future__ import annotations
import json
import aiohttp
-from aiohttp import ClientSession
+from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
from .helper import format_prompt
+from ..requests.aiohttp import get_connector
+from ..requests.raise_for_status import raise_for_status
+from .. import debug
+MODELS = [
+ {"model":"gpt-4o","modelName":"GPT-4o","modelVariant":None,"modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"4"},
+ {"model":"gpt-4o-mini","modelName":"GPT-4o","modelVariant":"mini","modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"3"},
+ {"model":"claude-3-5-sonnet-20240620","modelName":"Claude 3.5","modelVariant":"Sonnet","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"7"},
+ {"model":"claude-3-opus-20240229","modelName":"Claude 3","modelVariant":"Opus","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"2"},
+ {"model":"claude-3-haiku-20240307","modelName":"Claude 3","modelVariant":"Haiku","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"1"},
+ {"model":"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo","modelName":"Llama 3.1","modelVariant":"70B","modelStyleId":"llama-3","createdBy":"Meta","moderationLevel":"MEDIUM","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"5"},
+ {"model":"mistralai/Mixtral-8x7B-Instruct-v0.1","modelName":"Mixtral","modelVariant":"8x7B","modelStyleId":"mixtral","createdBy":"Mistral AI","moderationLevel":"LOW","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"6"}
+]
+
+class Conversation(BaseConversation):
+ vqd: str = None
+ message_history: Messages = []
+
+ def __init__(self, model: str):
+ self.model = model
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://duckduckgo.com"
@@ -18,81 +37,74 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
default_model = "gpt-4o-mini"
- models = [
- "gpt-4o-mini",
- "claude-3-haiku-20240307",
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
- ]
+ models = [model.get("model") for model in MODELS]
model_aliases = {
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
- "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"
+ "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "gpt-4": "gpt-4o-mini"
}
@classmethod
- def get_model(cls, model: str) -> str:
- return cls.model_aliases.get(model, model) if model in cls.model_aliases else cls.default_model
-
- @classmethod
- async def get_vqd(cls):
+ async def get_vqd(cls, proxy: str, connector: BaseConnector = None):
status_url = "https://duckduckgo.com/duckchat/v1/status"
-
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'Accept': 'text/event-stream',
'x-vqd-accept': '1'
}
-
- async with aiohttp.ClientSession() as session:
- try:
- async with session.get(status_url, headers=headers) as response:
- if response.status == 200:
- return response.headers.get("x-vqd-4")
- else:
- print(f"Error: Status code {response.status}")
- return None
- except Exception as e:
- print(f"Error getting VQD: {e}")
- return None
+ async with aiohttp.ClientSession(connector=get_connector(connector, proxy)) as session:
+ async with session.get(status_url, headers=headers) as response:
+ await raise_for_status(response)
+ return response.headers.get("x-vqd-4")
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- conversation: dict = None,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
proxy: str = None,
+ connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
+ is_new_conversation = False
+ if conversation is None:
+ conversation = Conversation(model)
+ is_new_conversation = True
+ debug.last_model = model
+ if conversation.vqd is None:
+ conversation.vqd = await cls.get_vqd(proxy, connector)
+ if not conversation.vqd:
+ raise Exception("Failed to obtain VQD token")
+
headers = {
'accept': 'text/event-stream',
'content-type': 'application/json',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
+ 'x-vqd-4': conversation.vqd,
}
-
- vqd = conversation.get('vqd') if conversation else await cls.get_vqd()
- if not vqd:
- raise Exception("Failed to obtain VQD token")
-
- headers['x-vqd-4'] = vqd
-
- if conversation:
- message_history = conversation.get('messages', [])
- message_history.append({"role": "user", "content": format_prompt(messages)})
- else:
- message_history = [{"role": "user", "content": format_prompt(messages)}]
-
- async with ClientSession(headers=headers) as session:
+ async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
+ if is_new_conversation:
+ conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
+ else:
+ conversation.message_history = [
+ *conversation.message_history,
+ messages[-2],
+ messages[-1]
+ ]
+ if return_conversation:
+ yield conversation
data = {
- "model": model,
- "messages": message_history
+ "model": conversation.model,
+ "messages": conversation.message_history
}
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
+ async with session.post(cls.api_endpoint, json=data) as response:
+ conversation.vqd = response.headers.get("x-vqd-4")
+ await raise_for_status(response)
async for line in response.content:
if line:
decoded_line = line.decode('utf-8')
@@ -105,4 +117,4 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
if 'message' in json_data:
yield json_data['message']
except json.JSONDecodeError:
- pass
+ pass \ No newline at end of file
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index d4a4b497..509a7f16 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -3,8 +3,13 @@ from __future__ import annotations
import json
import requests
-from curl_cffi import requests as cf_reqs
+try:
+ from curl_cffi import requests as cf_reqs
+ has_curl_cffi = True
+except ImportError:
+ has_curl_cffi = False
from ..typing import CreateResult, Messages
+from ..errors import MissingRequirementsError
from .base_provider import ProviderModelMixin, AbstractProvider
from .helper import format_prompt
@@ -55,6 +60,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
stream: bool,
**kwargs
) -> CreateResult:
+ if not has_curl_cffi:
+ raise MissingRequirementsError('Install "curl_cffi" package | pip install -U curl_cffi')
model = cls.get_model(model)
if model in cls.models:
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 7ccfa877..fc50bdee 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -179,7 +179,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4o": "gpt-4o-2024-08-06",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
- "gpt-4": "gpt-4-0613",
+ "gpt-4": "gpt-4o-mini-free",
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 68a7357f..6aabe7b1 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -2,20 +2,21 @@ from __future__ import annotations
import json
-from ..typing import CreateResult, Messages
-from .base_provider import AbstractProvider, format_prompt
-from ..requests import Session, get_session_from_browser, raise_for_status
+from ..typing import AsyncResult, Messages, Cookies
+from .base_provider import AsyncGeneratorProvider, format_prompt
+from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
-class Pi(AbstractProvider):
+class Pi(AsyncGeneratorProvider):
url = "https://pi.ai/talk"
working = True
supports_stream = True
- _session = None
default_model = "pi"
models = [default_model]
+ _headers: dict = None
+ _cookies: Cookies = {}
@classmethod
- def create_completion(
+ async def create_async_generator(
cls,
model: str,
messages: Messages,
@@ -23,49 +24,52 @@ class Pi(AbstractProvider):
proxy: str = None,
timeout: int = 180,
conversation_id: str = None,
- webdriver: WebDriver = None,
**kwargs
- ) -> CreateResult:
- if cls._session is None:
- cls._session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout)
- if not conversation_id:
- conversation_id = cls.start_conversation(cls._session)
- prompt = format_prompt(messages)
- else:
- prompt = messages[-1]["content"]
- answer = cls.ask(cls._session, prompt, conversation_id)
- for line in answer:
- if "text" in line:
- yield line["text"]
-
+ ) -> AsyncResult:
+ if cls._headers is None:
+ args = await get_args_from_nodriver(cls.url, proxy=proxy, timeout=timeout)
+ cls._cookies = args.get("cookies", {})
+ cls._headers = args.get("headers")
+ async with StreamSession(headers=cls._headers, cookies=cls._cookies, proxy=proxy) as session:
+ if not conversation_id:
+ conversation_id = await cls.start_conversation(session)
+ prompt = format_prompt(messages)
+ else:
+ prompt = messages[-1]["content"]
+ answer = cls.ask(session, prompt, conversation_id)
+ async for line in answer:
+ if "text" in line:
+ yield line["text"]
+
@classmethod
- def start_conversation(cls, session: Session) -> str:
- response = session.post('https://pi.ai/api/chat/start', data="{}", headers={
+ async def start_conversation(cls, session: StreamSession) -> str:
+ async with session.post('https://pi.ai/api/chat/start', data="{}", headers={
'accept': 'application/json',
'x-api-version': '3'
- })
- raise_for_status(response)
- return response.json()['conversations'][0]['sid']
+ }) as response:
+ await raise_for_status(response)
+ return (await response.json())['conversations'][0]['sid']
- def get_chat_history(session: Session, conversation_id: str):
+ async def get_chat_history(session: StreamSession, conversation_id: str):
params = {
'conversation': conversation_id,
}
- response = session.get('https://pi.ai/api/chat/history', params=params)
- raise_for_status(response)
- return response.json()
+ async with session.get('https://pi.ai/api/chat/history', params=params) as response:
+ await raise_for_status(response)
+ return await response.json()
- def ask(session: Session, prompt: str, conversation_id: str):
+ @classmethod
+ async def ask(cls, session: StreamSession, prompt: str, conversation_id: str):
json_data = {
'text': prompt,
'conversation': conversation_id,
'mode': 'BASE',
}
- response = session.post('https://pi.ai/api/chat', json=json_data, stream=True)
- raise_for_status(response)
- for line in response.iter_lines():
- if line.startswith(b'data: {"text":'):
- yield json.loads(line.split(b'data: ')[1])
- elif line.startswith(b'data: {"title":'):
- yield json.loads(line.split(b'data: ')[1])
-
+ async with session.post('https://pi.ai/api/chat', json=json_data) as response:
+ await raise_for_status(response)
+ cls._cookies = merge_cookies(cls._cookies, response)
+ async for line in response.iter_lines():
+ if line.startswith(b'data: {"text":'):
+ yield json.loads(line.split(b'data: ')[1])
+ elif line.startswith(b'data: {"title":'):
+ yield json.loads(line.split(b'data: ')[1])
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index da0eacfe..8a162baf 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -13,6 +13,7 @@ from .local import *
from .AIUncensored import AIUncensored
from .Airforce import Airforce
+from .AmigoChat import AmigoChat
from .Bing import Bing
from .Blackbox import Blackbox
from .ChatGpt import ChatGpt
diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py
index 7a08ddfe..45ba30b6 100644
--- a/g4f/Provider/bing/create_images.py
+++ b/g4f/Provider/bing/create_images.py
@@ -132,7 +132,7 @@ async def create_images(session: ClientSession, prompt: str, timeout: int = TIME
redirect_url = response.headers["Location"].replace("&nfy=1", "")
redirect_url = f"{BING_URL}{redirect_url}"
- request_id = redirect_url.split("id=")[1]
+ request_id = redirect_url.split("id=")[-1]
async with session.get(redirect_url) as response:
response.raise_for_status()
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 3a0d6b29..85e11181 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -196,7 +196,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
async with session.get(url, headers=headers) as response:
cls._update_request_args(session)
if response.status == 401:
- raise MissingAuthError('Add a "api_key" or a .har file' if cls._api_key is None else "Invalid api key")
+ raise MissingAuthError('Add a .har file for OpenaiChat' if cls._api_key is None else "Invalid api key")
await raise_for_status(response)
data = await response.json()
if "categories" in data:
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
index a6edf5f8..1bfe7ed9 100644
--- a/g4f/Provider/not_working/__init__.py
+++ b/g4f/Provider/not_working/__init__.py
@@ -2,7 +2,6 @@ from .AI365VIP import AI365VIP
from .AIChatFree import AIChatFree
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
-from .AmigoChat import AmigoChat
from .Aura import Aura
from .Chatgpt4o import Chatgpt4o
from .ChatgptFree import ChatgptFree
diff --git a/g4f/Provider/openai/proofofwork.py b/g4f/Provider/openai/proofofwork.py
index baf8a0ea..55603892 100644
--- a/g4f/Provider/openai/proofofwork.py
+++ b/g4f/Provider/openai/proofofwork.py
@@ -4,7 +4,6 @@ import json
import base64
from datetime import datetime, timezone
-
def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proofTokens: list = None):
if not required:
return