summaryrefslogtreecommitdiffstats
path: root/docs
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-11-12 07:44:48 +0100
committerkqlio67 <kqlio67@users.noreply.github.com>2024-11-12 07:44:48 +0100
commit21a26f68826778afa7ab932ef4cd488b422fdc68 (patch)
treebaac655d7744f9c872a7968ac94583297fa4bc10 /docs
parentUpdate (g4f/Provider/GizAI.py) (diff)
downloadgpt4free-21a26f68826778afa7ab932ef4cd488b422fdc68.tar
gpt4free-21a26f68826778afa7ab932ef4cd488b422fdc68.tar.gz
gpt4free-21a26f68826778afa7ab932ef4cd488b422fdc68.tar.bz2
gpt4free-21a26f68826778afa7ab932ef4cd488b422fdc68.tar.lz
gpt4free-21a26f68826778afa7ab932ef4cd488b422fdc68.tar.xz
gpt4free-21a26f68826778afa7ab932ef4cd488b422fdc68.tar.zst
gpt4free-21a26f68826778afa7ab932ef4cd488b422fdc68.zip
Diffstat (limited to 'docs')
-rw-r--r--docs/async_client.md9
-rw-r--r--docs/client.md4
-rw-r--r--docs/docker.md2
-rw-r--r--docs/git.md2
-rw-r--r--docs/interference-api.md6
5 files changed, 12 insertions, 11 deletions
diff --git a/docs/async_client.md b/docs/async_client.md
index 0719a463..7194c792 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -57,7 +57,7 @@ client = Client(
**Here’s an improved example of creating chat completions:**
```python
response = await async_client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -99,7 +99,7 @@ async def main():
client = Client()
response = await client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -230,7 +230,7 @@ async def main():
client = Client()
task1 = client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -262,6 +262,7 @@ The G4F AsyncClient supports a wide range of AI models and providers, allowing y
### Models
- GPT-3.5-Turbo
+ - GPT-4o-Mini
- GPT-4
- DALL-E 3
- Gemini
@@ -306,7 +307,7 @@ Implementing proper error handling and following best practices is crucial when
```python
try:
response = await client.chat.completions.async_create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/client.md b/docs/client.md
index 388b2e4b..da45d7fd 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -62,7 +62,7 @@ client = Client(
**Here’s an improved example of creating chat completions:**
```python
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
@@ -104,7 +104,7 @@ from g4f.client import Client
client = Client()
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/docker.md b/docs/docker.md
index e1caaf3d..8017715c 100644
--- a/docs/docker.md
+++ b/docs/docker.md
@@ -71,7 +71,7 @@ import requests
url = "http://localhost:1337/v1/chat/completions"
body = {
- "model": "gpt-3.5-turbo",
+ "model": "gpt-4o-mini",
"stream": False,
"messages": [
{"role": "assistant", "content": "What can you do?"}
diff --git a/docs/git.md b/docs/git.md
index 33a0ff42..ff6c8091 100644
--- a/docs/git.md
+++ b/docs/git.md
@@ -95,7 +95,7 @@ from g4f.client import Client
client = Client()
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[
{
"role": "user",
diff --git a/docs/interference-api.md b/docs/interference-api.md
index 2e18e7b5..324334c4 100644
--- a/docs/interference-api.md
+++ b/docs/interference-api.md
@@ -64,7 +64,7 @@ curl -X POST "http://localhost:1337/v1/chat/completions" \
"content": "Hello"
}
],
- "model": "gpt-3.5-turbo"
+ "model": "gpt-4o-mini"
}'
```
@@ -104,7 +104,7 @@ client = OpenAI(
)
response = client.chat.completions.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
messages=[{"role": "user", "content": "Write a poem about a tree"}],
stream=True,
)
@@ -131,7 +131,7 @@ import requests
url = "http://localhost:1337/v1/chat/completions"
body = {
- "model": "gpt-3.5-turbo",
+ "model": "gpt-4o-mini",
"stream": False,
"messages": [
{"role": "assistant", "content": "What can you do?"}