summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rw-r--r--docs/async_client.md9
-rw-r--r--docs/client.md4
-rw-r--r--docs/docker.md2
-rw-r--r--docs/git.md2
-rw-r--r--docs/interference-api.md6
-rw-r--r--g4f/client/client.py28
7 files changed, 34 insertions, 19 deletions
diff --git a/README.md b/README.md
index 53f759f4..a47791ee 100644
--- a/README.md
+++ b/README.md
@@ -174,7 +174,7 @@ from g4f.client import Client
client = Client()
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[{"role": "user", "content": "Hello"}],
# Add any other necessary parameters
)
diff --git a/docs/async_client.md b/docs/async_client.md
index 0719a463..7194c792 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -57,7 +57,7 @@ client = Client(
**Here’s an improved example of creating chat completions:**
```python
response = await async_client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -99,7 +99,7 @@ async def main():
client = Client()
response = await client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -230,7 +230,7 @@ async def main():
client = Client()
task1 = client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -262,6 +262,7 @@ The G4F AsyncClient supports a wide range of AI models and providers, allowing y
### Models
- GPT-3.5-Turbo
+ - GPT-4o-Mini
- GPT-4
- DALL-E 3
- Gemini
@@ -306,7 +307,7 @@ Implementing proper error handling and following best practices is crucial when
```python
try:
response = await client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/client.md b/docs/client.md
index 388b2e4b..da45d7fd 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -62,7 +62,7 @@ client = Client(
**Here’s an improved example of creating chat completions:**
```python
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -104,7 +104,7 @@ from g4f.client import Client
client = Client()
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/docker.md b/docs/docker.md
index e1caaf3d..8017715c 100644
--- a/docs/docker.md
+++ b/docs/docker.md
@@ -71,7 +71,7 @@ import requests
url = "http://localhost:1337/v1/chat/completions"
body = {
- "model": "gpt-3.5-turbo",
+ "model": "gpt-4o-mini",
"stream": False,
"messages": [
{"role": "assistant", "content": "What can you do?"}
diff --git a/docs/git.md b/docs/git.md
index 33a0ff42..ff6c8091 100644
--- a/docs/git.md
+++ b/docs/git.md
@@ -95,7 +95,7 @@ from g4f.client import Client
client = Client()
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/interference-api.md b/docs/interference-api.md
index 2e18e7b5..324334c4 100644
--- a/docs/interference-api.md
+++ b/docs/interference-api.md
@@ -64,7 +64,7 @@ curl -X POST "http://localhost:1337/v1/chat/completions" \
"content": "Hello"
}
],
- "model": "gpt-3.5-turbo"
+ "model": "gpt-4o-mini"
}'
```
@@ -104,7 +104,7 @@ client = OpenAI(
)
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[{"role": "user", "content": "Write a poem about a tree"}],
stream=True,
)
@@ -131,7 +131,7 @@ import requests
url = "http://localhost:1337/v1/chat/completions"
body = {
- "model": "gpt-3.5-turbo",
+ "model": "gpt-4o-mini",
"stream": False,
"messages": [
{"role": "assistant", "content": "What can you do?"}
diff --git a/g4f/client/client.py b/g4f/client/client.py
index 8e195213..63358302 100644
--- a/g4f/client/client.py
+++ b/g4f/client/client.py
@@ -154,14 +154,29 @@ class AsyncClient(Client):
stacklevel=2
)
super().__init__(*args, **kwargs)
+ self.chat = Chat(self)
+ self._images = Images(self)
+ self.completions = Completions(self)
- async def chat_complete(self, *args, **kwargs):
- """Legacy method that redirects to async_create"""
- return await self.chat.completions.async_create(*args, **kwargs)
+ @property
+ def images(self) -> 'Images':
+ return self._images
+
+ async def async_create(self, *args, **kwargs) -> Union['ChatCompletion', AsyncIterator['ChatCompletionChunk']]:
+ response = await super().async_create(*args, **kwargs)
+ async for result in response:
+ return result
- async def create_image(self, *args, **kwargs):
- """Legacy method that redirects to async_generate"""
- return await self.images.async_generate(*args, **kwargs)
+ async def async_generate(self, *args, **kwargs) -> 'ImagesResponse':
+ return await super().async_generate(*args, **kwargs)
+
+ async def _fetch_image(self, url: str) -> bytes:
+ async with ClientSession() as session:
+ async with session.get(url) as resp:
+ if resp.status == 200:
+ return await resp.read()
+ else:
+ raise Exception(f"Failed to fetch image from {url}, status code {resp.status}")
class Completions:
def __init__(self, client: Client, provider: ProviderType = None):
@@ -531,4 +546,3 @@ class Images:
async def create_variation(self, image: Union[str, bytes], model: str = None, response_format: str = "url", **kwargs):
# Existing implementation, adjust if you want to support b64_json here as well
pass
-