summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md16
-rw-r--r--etc/tool/translate_readme.py14
-rw-r--r--g4f/Provider/DeepInfra.py2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py4
4 files changed, 19 insertions, 17 deletions
diff --git a/README.md b/README.md
index 59ad89f3..03df1326 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@ Written by [@xtekky](https://github.com/hlohaus) & maintained by [@hlohaus](http
> By using this repository or any code related to it, you agree to the [legal notice](LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
> [!Warning]
-*"gpt4free"* serves as a **PoC** (proof of concept), demonstrating the development of a an api package with multi-provider requests, with features like timeouts, load balance and flow control.
+*"gpt4free"* serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control.
> [!Note]
<sup><strong>Lastet version:</strong></sup> [![PyPI version](https://img.shields.io/pypi/v/g4f?color=blue)](https://pypi.org/project/g4f) [![Docker version](https://img.shields.io/docker/v/hlohaus789/g4f?label=docker&color=blue)](https://hub.docker.com/r/hlohaus789/g4f)
@@ -28,21 +28,21 @@ docker pull hlohaus789/g4f
- Join our Discord Group: [discord.gg/XfybzPXPH5](https://discord.gg/XfybzPXPH5)
## đŸ”ģ Site Takedown
-Is your site on this repository and you want to take it down ? email takedown@g4f.ai with proof it is yours and it will be removed as fast as possible. - to prevent reproduction please secure your api ; )
+Is your site on this repository and you want to take it down? Send an email to takedown@g4f.ai with proof it is yours and it will be removed as fast as possible. To prevent reproduction please secure your API ;)
## 🚀 Feedback and Todo
You can always leave some feedback here: https://forms.gle/FeWV9RLEedfdkmFN6
As per the survey, here is a list of improvements to come
-- [x] update the repository to include the new openai library syntax (ex: `Openai()` class) | completed, use `g4f.client.Client`
-- [ ] golang implementation
-- [ ] 🚧 Improve Documentation (in /docs & Guides, Howtos, & Do video tutorials
+- [x] Update the repository to include the new openai library syntax (ex: `Openai()` class) | completed, use `g4f.client.Client`
+- [ ] Golang implementation
+- [ ] 🚧 Improve Documentation (in /docs & Guides, Howtos, & Do video tutorials)
- [x] Improve the provider status list & updates
- [ ] Tutorials on how to reverse sites to write your own wrapper (PoC only ofc)
- [ ] Improve the Bing wrapper. (might write a new wrapper in golang as it is very fast)
- [ ] Write a standard provider performance test to improve the stability
- [ ] Potential support and development of local models
-- [ ] 🚧 improve compatibility and error handling
+- [ ] 🚧 Improve compatibility and error handling
## 📚 Table of Contents
@@ -90,7 +90,7 @@ docker pull hlohaus789/g4f
docker run -p 8080:8080 -p 1337:1337 -p 7900:7900 --shm-size="2g" hlohaus789/g4f:latest
```
3. Open the included client on: [http://localhost:8080/chat/](http://localhost:8080/chat/)
-or set the api base in your client to: [http://localhost:1337/v1](http://localhost:1337/v1)
+or set the API base in your client to: [http://localhost:1337/v1](http://localhost:1337/v1)
4. (Optional) If you need to log in to a provider, you can view the desktop from the container here: http://localhost:7900/?autoconnect=1&resize=scale&password=secret.
##### Use your smartphone:
@@ -191,7 +191,7 @@ See: [/docs/interference](/docs/interference.md)
##### Cookies / Access Token
-For generating images with Bing and for the OpenAi Chat you need cookies or a token from your browser session. From Bing you need the "_U" cookie and from OpenAI you need the "access_token". You can pass the cookies / the access token in the create function or you use the `set_cookies` setter before you run G4F:
+For generating images with Bing and for the OpenAI Chat you need cookies or a token from your browser session. From Bing you need the "_U" cookie and from OpenAI you need the "access_token". You can pass the cookies / the access token in the create function or you use the `set_cookies` setter before you run G4F:
```python
from g4f.cookies import set_cookies
diff --git a/etc/tool/translate_readme.py b/etc/tool/translate_readme.py
index 43bfdcde..e0a9b1f1 100644
--- a/etc/tool/translate_readme.py
+++ b/etc/tool/translate_readme.py
@@ -18,12 +18,12 @@ Don't translate or change inline code examples.
```md
"""
keep_note = "Keep this: [!Note] as [!Note].\n"
-blacklist = [
+blocklist = [
'## Šī¸ Copyright',
'## 🚀 Providers and Models',
'## 🔗 Related GPT4Free Projects'
]
-whitelist = [
+allowlist = [
"### Other",
"### Models"
]
@@ -52,15 +52,15 @@ async def translate(text):
return result
async def translate_part(part, i):
- blacklisted = False
- for headline in blacklist:
+ blocklisted = False
+ for headline in blocklist:
if headline in part:
- blacklisted = True
- if blacklisted:
+ blocklisted = True
+ if blocklisted:
lines = part.split('\n')
lines[0] = await translate(lines[0])
part = '\n'.join(lines)
- for trans in whitelist:
+ for trans in allowlist:
if trans in part:
part = part.replace(trans, await translate(trans))
else:
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py
index 183f00ea..f44679ff 100644
--- a/g4f/Provider/DeepInfra.py
+++ b/g4f/Provider/DeepInfra.py
@@ -61,6 +61,8 @@ class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
'model' : cls.get_model(model),
'messages': messages,
'temperature': kwargs.get("temperature", 0.7),
+ 'max_tokens': kwargs.get("max_tokens", 512),
+ 'stop': kwargs.get("stop", []),
'stream' : True
}
async with session.post('https://api.deepinfra.com/v1/openai/chat/completions',
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 0fa433a4..e507404b 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -288,7 +288,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
json={"is_visible": False},
headers=headers
) as response:
- response.raise_for_status()
+ ...
@classmethod
async def create_async_generator(
@@ -434,7 +434,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
action = "continue"
await asyncio.sleep(5)
if history_disabled and auto_continue:
- await cls.delete_conversation(session, cls._headers, conversation_id)
+ await cls.delete_conversation(session, cls._headers, fields.conversation_id)
@staticmethod
async def iter_messages_ws(ws: ClientWebSocketResponse) -> AsyncIterator: