From 96e1efee0f31fad48dafa417551b31f636609227 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 20:29:03 +0300 Subject: docs(docs/client.md): update G4F Client API guide --- docs/client.md | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) (limited to 'docs/client.md') diff --git a/docs/client.md b/docs/client.md index 08445402..9621e3c2 100644 --- a/docs/client.md +++ b/docs/client.md @@ -7,6 +7,7 @@ - [Getting Started](#getting-started) - [Switching to G4F Client](#switching-to-g4f-client) - [Initializing the Client](#initializing-the-client) + - [Creating Chat Completions](#creating-chat-completions) - [Configuration](#configuration) - [Usage Examples](#usage-examples) - [Text Completions](#text-completions) @@ -22,7 +23,7 @@ ## Introduction -Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI API. +Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI or Anthropic client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI and Anthropic API. ## Getting Started ### Switching to G4F Client @@ -42,7 +43,7 @@ from g4f.client import Client as OpenAI -The G4F Client preserves the same familiar API interface as OpenAI, ensuring a smooth transition process. +The G4F Client preserves the same familiar API interface as OpenAI or Anthropic, ensuring a smooth transition process. ## Initializing the Client To utilize the G4F Client, create a new instance. **Below is an example showcasing custom providers:** @@ -56,6 +57,30 @@ client = Client( # Add any other necessary parameters ) ``` + +## Creating Chat Completions +**Here’s an improved example of creating chat completions:** +```python +response = client.chat.completions.create( + system="You are a helpful assistant.", + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + # Add any other necessary parameters +) +``` + +**This example:** + - Sets a system message to define the assistant's role + - Asks a specific question `Say this is a test` + - Configures various parameters like temperature and max_tokens for more control over the output + - Disables streaming for a complete response + +You can adjust these parameters based on your specific needs. ## Configuration @@ -271,6 +296,7 @@ while True: try: # Get GPT's response response = client.chat.completions.create( + system="You are a helpful assistant.", messages=messages, model=g4f.models.default, ) -- cgit v1.2.3 From 72e8152853386bc40842c8150187b9b0a38426af Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Mon, 28 Oct 2024 10:36:45 +0200 Subject: feat(docs/client.md): add base64 response format for image generation --- docs/client.md | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'docs/client.md') diff --git a/docs/client.md b/docs/client.md index 9621e3c2..b4f351d3 100644 --- a/docs/client.md +++ b/docs/client.md @@ -154,7 +154,7 @@ from g4f.client import Client client = Client() response = client.images.generate( - model="dall-e-3", + model="flux", prompt="a white siamese cat" # Add any other necessary parameters ) @@ -164,6 +164,23 @@ image_url = response.data[0].url print(f"Generated image URL: {image_url}") ``` + +#### Base64 Response Format +```python +from g4f.client import Client + +client = Client() + +response = client.images.generate( + model="flux", + prompt="a white siamese cat", + response_format="b64_json" +) + +base64_text = response.data[0].b64_json +print(base64_text) +``` + ### Creating Image Variations -- cgit v1.2.3 From e79c8b01f58d21502c962f38c804bf81196f89fb Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 29 Oct 2024 22:03:05 +0200 Subject: Update (docs/async_client.md docs/client.md docs/interference-api.md g4f/client/client.py) --- docs/client.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'docs/client.md') diff --git a/docs/client.md b/docs/client.md index b4f351d3..388b2e4b 100644 --- a/docs/client.md +++ b/docs/client.md @@ -23,7 +23,7 @@ ## Introduction -Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI or Anthropic client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI and Anthropic API. +Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI API. ## Getting Started ### Switching to G4F Client @@ -43,7 +43,7 @@ from g4f.client import Client as OpenAI -The G4F Client preserves the same familiar API interface as OpenAI or Anthropic, ensuring a smooth transition process. +The G4F Client preserves the same familiar API interface as OpenAI, ensuring a smooth transition process. ## Initializing the Client To utilize the G4F Client, create a new instance. **Below is an example showcasing custom providers:** @@ -62,7 +62,6 @@ client = Client( **Here’s an improved example of creating chat completions:** ```python response = client.chat.completions.create( - system="You are a helpful assistant.", model="gpt-3.5-turbo", messages=[ { @@ -75,7 +74,6 @@ response = client.chat.completions.create( ``` **This example:** - - Sets a system message to define the assistant's role - Asks a specific question `Say this is a test` - Configures various parameters like temperature and max_tokens for more control over the output - Disables streaming for a complete response @@ -313,7 +311,6 @@ while True: try: # Get GPT's response response = client.chat.completions.create( - system="You are a helpful assistant.", messages=messages, model=g4f.models.default, ) -- cgit v1.2.3