summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md20
-rw-r--r--g4f/Provider/Bing.py1
-rw-r--r--g4f/Provider/ChatBase.py1
-rw-r--r--g4f/Provider/ChatForAi.py3
-rw-r--r--g4f/Provider/ChatgptX.py3
-rw-r--r--g4f/Provider/FakeGpt.py3
-rw-r--r--g4f/Provider/FreeGpt.py1
-rw-r--r--g4f/Provider/GPTalk.py3
-rw-r--r--g4f/Provider/GptForLove.py1
-rw-r--r--g4f/Provider/You.py3
-rw-r--r--g4f/Provider/Yqcloud.py1
-rw-r--r--g4f/Provider/base_provider.py3
12 files changed, 29 insertions, 14 deletions
diff --git a/README.md b/README.md
index f313156f..5a86b786 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,11 @@
![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9)
-By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
-- latest pypi version: [`0.1.7.7`](https://pypi.org/project/g4f/0.1.7.7)
+> **Note**
+> By using this repository or any code related to it, you agree to the [legal notice](./LEGAL_NOTICE.md). The author is not responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
+
+> **Note**
+> Latest pypi version: [`0.1.7.7`](https://pypi.org/project/g4f/0.1.7.7)
```sh
pip install -U g4f
```
@@ -124,7 +127,7 @@ docker compose build
docker compose up
```
-You server will now be running at `http://localhost:1337`. You can interact with the API or run your tests as you would normally.
+Your server will now be running at `http://localhost:1337`. You can interact with the API or run your tests as you would normally.
To stop the Docker containers, simply run:
@@ -132,7 +135,8 @@ To stop the Docker containers, simply run:
docker compose down
```
-**Note:** When using Docker, any changes you make to your local files will be reflected in the Docker container thanks to the volume mapping in the `docker-compose.yml` file. If you add or remove dependencies, however, you'll need to rebuild the Docker image using `docker compose build`.
+> **Note**
+> When using Docker, any changes you make to your local files will be reflected in the Docker container thanks to the volume mapping in the `docker-compose.yml` file. If you add or remove dependencies, however, you'll need to rebuild the Docker image using `docker compose build`.
## Usage
@@ -327,7 +331,7 @@ python -m g4f.api
```py
import openai
-openai.api_key = "Empty if you don't use embeddings, otherwise your hugginface token"
+openai.api_key = " Leave Empty if you don't use embeddings, otherwise your Hugging Face token"
openai.api_base = "http://localhost:1337/v1"
@@ -546,8 +550,8 @@ python etc/tool/create_provider.py
#### Create Provider
1. Check out the current [list of potential providers](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites), or find your own provider source!
-2. Create a new file in [g4f/provider](./g4f/provider) with the name of the Provider
-3. Implement a class that extends [BaseProvider](./g4f/provider/base_provider.py).
+2. Create a new file in [g4f/Provider](./g4f/Provider) with the name of the Provider
+3. Implement a class that extends [BaseProvider](./g4f/Provider/base_provider.py).
```py
from __future__ import annotations
@@ -573,7 +577,7 @@ class HogeService(AsyncGeneratorProvider):
4. Here, you can adjust the settings, for example, if the website does support streaming, set `supports_stream` to `True`...
5. Write code to request the provider in `create_async_generator` and `yield` the response, _even if_ it's a one-time response, do not hesitate to look at other providers for inspiration
-6. Add the Provider Name in [g4f/provider/**init**.py](./g4f/provider/__init__.py)
+6. Add the Provider Name in [g4f/Provider/**init**.py](./g4f/Provider/__init__.py)
```py
from .HogeService import HogeService
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index c54a3ae5..ca14510c 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -32,6 +32,7 @@ default_cookies = {
class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat"
working = True
+ supports_message_history = True
supports_gpt_4 = True
@staticmethod
diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py
index 7a2e2ff8..ada51fed 100644
--- a/g4f/Provider/ChatBase.py
+++ b/g4f/Provider/ChatBase.py
@@ -9,6 +9,7 @@ from .base_provider import AsyncGeneratorProvider
class ChatBase(AsyncGeneratorProvider):
url = "https://www.chatbase.co"
supports_gpt_35_turbo = True
+ supports_message_history = True
working = True
list_incorrect_responses = ["support@chatbase",
"about Chatbase"]
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py
index 718affeb..7a4e9264 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/ChatForAi.py
@@ -11,6 +11,7 @@ from .base_provider import AsyncGeneratorProvider
class ChatForAi(AsyncGeneratorProvider):
url = "https://chatforai.store"
working = True
+ supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
@@ -69,4 +70,4 @@ class ChatForAi(AsyncGeneratorProvider):
def generate_signature(timestamp: int, message: str, id: str):
buffer = f"{timestamp}:{id}:{message}:7YN8z6d6"
- return hashlib.sha256(buffer.encode()).hexdigest() \ No newline at end of file
+ return hashlib.sha256(buffer.encode()).hexdigest()
diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py
index 9a8711b9..75ff0da5 100644
--- a/g4f/Provider/ChatgptX.py
+++ b/g4f/Provider/ChatgptX.py
@@ -12,6 +12,7 @@ from .helper import format_prompt
class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de"
supports_gpt_35_turbo = True
+ supports_message_history = True
working = True
@classmethod
@@ -96,4 +97,4 @@ class ChatgptX(AsyncGeneratorProvider):
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if content:
- yield content \ No newline at end of file
+ yield content
diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/FakeGpt.py
index 5bce1280..a89425d3 100644
--- a/g4f/Provider/FakeGpt.py
+++ b/g4f/Provider/FakeGpt.py
@@ -10,6 +10,7 @@ from .helper import format_prompt
class FakeGpt(AsyncGeneratorProvider):
url = "https://chat-shared2.zhile.io"
+ supports_message_history = True
supports_gpt_35_turbo = True
working = True
_access_token = None
@@ -91,4 +92,4 @@ class FakeGpt(AsyncGeneratorProvider):
raise RuntimeError("No valid response")
def random_string(length: int = 10):
- return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) \ No newline at end of file
+ return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 00d9137d..758e411b 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -12,6 +12,7 @@ domains = [
class FreeGpt(AsyncGeneratorProvider):
url = "https://freegpts1.aifree.site/"
+ supports_message_history = True
supports_gpt_35_turbo = True
working = True
diff --git a/g4f/Provider/GPTalk.py b/g4f/Provider/GPTalk.py
index c6b57d91..a5644fc4 100644
--- a/g4f/Provider/GPTalk.py
+++ b/g4f/Provider/GPTalk.py
@@ -11,6 +11,7 @@ from .helper import format_prompt
class GPTalk(AsyncGeneratorProvider):
url = "https://gptalk.net"
supports_gpt_35_turbo = True
+ supports_message_history = True
working = True
_auth = None
@@ -81,4 +82,4 @@ class GPTalk(AsyncGeneratorProvider):
break
message = json.loads(line[6:-1])["content"]
yield message[len(last_message):]
- last_message = message \ No newline at end of file
+ last_message = message
diff --git a/g4f/Provider/GptForLove.py b/g4f/Provider/GptForLove.py
index 28939592..4b31809c 100644
--- a/g4f/Provider/GptForLove.py
+++ b/g4f/Provider/GptForLove.py
@@ -9,6 +9,7 @@ from .helper import format_prompt
class GptForLove(AsyncGeneratorProvider):
url = "https://ai18.gptforlove.com"
+ supports_message_history = True
supports_gpt_35_turbo = True
working = True
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 1afd18be..34972586 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -10,6 +10,7 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
class You(AsyncGeneratorProvider):
url = "https://you.com"
working = True
+ supports_message_history = True
supports_gpt_35_turbo = True
@@ -37,4 +38,4 @@ class You(AsyncGeneratorProvider):
start = b'data: {"youChatToken": '
async for line in response.iter_lines():
if line.startswith(start):
- yield json.loads(line[len(start):-1]) \ No newline at end of file
+ yield json.loads(line[len(start):-1])
diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py
index 2829c5bf..12eb7bbb 100644
--- a/g4f/Provider/Yqcloud.py
+++ b/g4f/Provider/Yqcloud.py
@@ -10,6 +10,7 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/"
working = True
+ supports_message_history = True
supports_gpt_35_turbo = True
@staticmethod
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index c54b98e5..47ea6ff8 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -15,6 +15,7 @@ class BaseProvider(ABC):
supports_stream: bool = False
supports_gpt_35_turbo: bool = False
supports_gpt_4: bool = False
+ supports_message_history: bool = False
@staticmethod
@abstractmethod
@@ -135,4 +136,4 @@ class AsyncGeneratorProvider(AsyncProvider):
messages: Messages,
**kwargs
) -> AsyncResult:
- raise NotImplementedError() \ No newline at end of file
+ raise NotImplementedError()