diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-08-30 22:39:18 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-08-30 22:39:18 +0200 |
commit | c702f54e39a39c702cb2a2a8c6782c15422785aa (patch) | |
tree | 8a36ace98ab138e1eff134a5ed8891fd3c817b5b /g4f/Provider/Blackbox.py | |
parent | . (diff) | |
parent | fix for 500 Internal Server Error #2199 [Request] Blackbox provider now support Gemini and LLaMa 3.1 models #2198 with some stuff from #2196 (diff) | |
download | gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.gz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.bz2 gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.lz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.xz gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.tar.zst gpt4free-c702f54e39a39c702cb2a2a8c6782c15422785aa.zip |
Diffstat (limited to 'g4f/Provider/Blackbox.py')
-rw-r--r-- | g4f/Provider/Blackbox.py | 22 |
1 files changed, 18 insertions, 4 deletions
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index a86471f2..fd84875e 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -14,7 +14,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.blackbox.ai" working = True default_model = 'blackbox' - + models = [ + default_model, + "gemini-1.5-flash", + "llama-3.1-8b", + 'llama-3.1-70b', + 'llama-3.1-405b', + ] @classmethod async def create_async_generator( cls, @@ -28,7 +34,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): if image is not None: messages[-1]["data"] = { "fileText": image_name, - "imageBase64": to_data_uri(image) + "imageBase64": to_data_uri(image), + "title": str(uuid.uuid4()) } headers = { @@ -48,7 +55,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): async with ClientSession(headers=headers) as session: random_id = secrets.token_hex(16) random_user_id = str(uuid.uuid4()) - + model_id_map = { + "blackbox": {}, + "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, + "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, + 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, + 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"} + } data = { "messages": messages, "id": random_id, @@ -62,12 +75,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "webSearchMode": False, "userSystemPrompt": "", "githubToken": None, + "trendingAgentModel": model_id_map[model], # if you actually test this on the site, just ask each model "yo", weird behavior imo "maxTokens": None } async with session.post( f"{cls.url}/api/chat", json=data, proxy=proxy - ) as response: # type: ClientResponse + ) as response: response.raise_for_status() async for chunk in response.content.iter_any(): if chunk: |