diff options
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/DeepInfra.py | 2 | ||||
-rw-r--r-- | g4f/client/async_client.py | 5 | ||||
-rw-r--r-- | g4f/client/client.py | 7 |
3 files changed, 9 insertions, 5 deletions
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py index 763b960a..f3e31962 100644 --- a/g4f/Provider/DeepInfra.py +++ b/g4f/Provider/DeepInfra.py @@ -11,7 +11,7 @@ class DeepInfra(Openai): needs_auth = True supports_stream = True supports_message_history = True - default_model = "meta-llama/Meta-Llama-3-70b-instruct" + default_model = "meta-llama/Meta-Llama-3-70B-Instruct" default_vision_model = "llava-hf/llava-1.5-7b-hf" model_aliases = { 'dbrx-instruct': 'databricks/dbrx-instruct', diff --git a/g4f/client/async_client.py b/g4f/client/async_client.py index a97b4d7a..dbfa6b70 100644 --- a/g4f/client/async_client.py +++ b/g4f/client/async_client.py @@ -184,9 +184,10 @@ async def iter_image_response( return ImagesResponse([Image(None, image, chunk.alt) for image in chunk.get_list()], int(time.time())) def create_image(provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator: - prompt = f"create a image with: {prompt}" - if provider.__name__ == "You": + if isinstance(provider, type) and provider.__name__ == "You": kwargs["chat_mode"] = "create" + else: + prompt = f"create a image with: {prompt}" return provider.create_async_generator( model, [{"role": "user", "content": prompt}], diff --git a/g4f/client/client.py b/g4f/client/client.py index 1b090981..acf53c70 100644 --- a/g4f/client/client.py +++ b/g4f/client/client.py @@ -125,9 +125,12 @@ def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]: return ImagesResponse([Image(image) for image in chunk.get_list()]) def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> Iterator: - prompt = f"create a image with: {prompt}" - if provider.__name__ == "You": + + + if isinstance(provider, type) and provider.__name__ == "You": kwargs["chat_mode"] = "create" + else: + prompt = f"create a image with: {prompt}" return provider.create_completion( model, [{"role": "user", "content": prompt}], |