summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-10-10 09:49:29 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-10-10 09:49:29 +0200
commit53192b86b129380660f7454170fa987faf2da3c5 (patch)
tree5797a34479840692f2ae5db9e63deacac9732bd7
parent ~ | g4f `v-0.1.5.7` (diff)
downloadgpt4free-53192b86b129380660f7454170fa987faf2da3c5.tar
gpt4free-53192b86b129380660f7454170fa987faf2da3c5.tar.gz
gpt4free-53192b86b129380660f7454170fa987faf2da3c5.tar.bz2
gpt4free-53192b86b129380660f7454170fa987faf2da3c5.tar.lz
gpt4free-53192b86b129380660f7454170fa987faf2da3c5.tar.xz
gpt4free-53192b86b129380660f7454170fa987faf2da3c5.tar.zst
gpt4free-53192b86b129380660f7454170fa987faf2da3c5.zip
-rw-r--r--README.md40
-rw-r--r--etc/tool/create_provider.py5
-rw-r--r--g4f/Provider/Acytoo.py4
-rw-r--r--g4f/Provider/GptGo.py6
-rw-r--r--g4f/Provider/H2o.py6
-rw-r--r--g4f/Provider/Myshell.py6
-rw-r--r--g4f/Provider/Phind.py6
-rw-r--r--g4f/Provider/base_provider.py18
-rw-r--r--g4f/Provider/deprecated/AiService.py4
-rw-r--r--g4f/Provider/helper.py4
-rw-r--r--g4f/Provider/retry_provider.py13
11 files changed, 55 insertions, 57 deletions
diff --git a/README.md b/README.md
index 691c5857..8b56a1f6 100644
--- a/README.md
+++ b/README.md
@@ -158,7 +158,6 @@ docker compose down
```py
import g4f
-
print(g4f.Provider.Ails.params) # supported args
# Automatic selection of provider
@@ -166,7 +165,7 @@ print(g4f.Provider.Ails.params) # supported args
# streamed completion
response = g4f.ChatCompletion.create(
model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": "Hello world"}],
+ messages=[{"role": "user", "content": "Hello"}],
stream=True,
)
@@ -176,22 +175,10 @@ for message in response:
# normal response
response = g4f.ChatCompletion.create(
model=g4f.models.gpt_4,
- messages=[{"role": "user", "content": "hi"}],
+ messages=[{"role": "user", "content": "Hello"}],
) # alterative model setting
print(response)
-
-
-# Set with provider
-response = g4f.ChatCompletion.create(
- model="gpt-3.5-turbo",
- provider=g4f.Provider.DeepAi,
- messages=[{"role": "user", "content": "Hello world"}],
- stream=True,
-)
-
-for message in response:
- print(message)
```
##### Completion
```py
@@ -215,6 +202,7 @@ print(response)
##### Providers:
```py
+import g4f
from g4f.Provider import (
AItianhu,
Acytoo,
@@ -237,8 +225,17 @@ from g4f.Provider import (
You,
Yqcloud,
)
-# Usage:
-response = g4f.ChatCompletion.create(..., provider=ProviderName)
+
+# Set with provider
+response = g4f.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ provider=g4f.Provider.Aichat,
+ messages=[{"role": "user", "content": "Hello"}],
+ stream=True,
+)
+
+for message in response:
+ print(message)
```
##### Cookies Required:
@@ -250,6 +247,7 @@ When running the g4f package locally, the package automatically retrieves cookie
```py
import g4f
+
from g4f.Provider import (
Bard,
Bing,
@@ -257,6 +255,7 @@ from g4f.Provider import (
OpenAssistant,
OpenaiChat,
)
+
# Usage:
response = g4f.ChatCompletion.create(
model=g4f.models.default,
@@ -319,6 +318,7 @@ response = await g4f.ChatCompletion.create(
proxy="http://host:port",
# or socks5://user:pass@host:port
)
+
print(f"Result:", response)
```
@@ -532,18 +532,18 @@ if __name__ == "__main__":
## Contribute
-####Create Provider with AI Tool
+#### Create Provider with AI Tool
Call in your terminal the "create_provider" script:
```bash
-$ python etc/tool/create_provider.py
+python etc/tool/create_provider.py
```
1. Enter your name for the new provider.
2. Copy&Paste a cURL command from your browser developer tools.
3. Let the AI ​​create the provider for you.
4. Customize the provider according to your needs.
-####Create Provider
+#### Create Provider
0. Check out the current [list of potential providers](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites), or find your own provider source!
1. Create a new file in [g4f/provider](./g4f/provider) with the name of the Provider
diff --git a/etc/tool/create_provider.py b/etc/tool/create_provider.py
index 62cfd605..ca20099a 100644
--- a/etc/tool/create_provider.py
+++ b/etc/tool/create_provider.py
@@ -86,15 +86,16 @@ Replace "hello" with `format_prompt(messages)`.
And replace "gpt-3.5-turbo" with `model`.
"""
+ print("Create code...")
response = []
for chunk in g4f.ChatCompletion.create(
model=g4f.models.gpt_35_long,
messages=[{"role": "user", "content": prompt}],
timeout=300,
- stream=True
+ stream=True,
):
- response.append(chunk)
print(chunk, end="", flush=True)
+ response.append(chunk)
print()
response = "".join(response)
diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py
index 0ac3425c..cefdd1ac 100644
--- a/g4f/Provider/Acytoo.py
+++ b/g4f/Provider/Acytoo.py
@@ -23,7 +23,7 @@ class Acytoo(AsyncGeneratorProvider):
headers=_create_header()
) as session:
async with session.post(
- cls.url + '/api/completions',
+ f'{cls.url}/api/completions',
proxy=proxy,
json=_create_payload(messages, **kwargs)
) as response:
@@ -40,7 +40,7 @@ def _create_header():
}
-def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs):
+def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
return {
'key' : '',
'model' : 'gpt-3.5-turbo',
diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py
index 5f6cc362..f9b94a5c 100644
--- a/g4f/Provider/GptGo.py
+++ b/g4f/Provider/GptGo.py
@@ -3,7 +3,7 @@ from __future__ import annotations
from aiohttp import ClientSession
import json
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -16,10 +16,10 @@ class GptGo(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py
index d92bd6d1..65429a28 100644
--- a/g4f/Provider/H2o.py
+++ b/g4f/Provider/H2o.py
@@ -5,7 +5,7 @@ import uuid
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -18,10 +18,10 @@ class H2o(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
model = model if model else cls.model
headers = {"Referer": cls.url + "/"}
diff --git a/g4f/Provider/Myshell.py b/g4f/Provider/Myshell.py
index 6ed4fd7a..847bac2f 100644
--- a/g4f/Provider/Myshell.py
+++ b/g4f/Provider/Myshell.py
@@ -6,7 +6,7 @@ from aiohttp import ClientSession
from aiohttp.http import WSMsgType
import asyncio
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -27,11 +27,11 @@ class Myshell(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
timeout: int = 90,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
if not model:
bot_id = models["samantha"]
elif model in models:
diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py
index ae4de686..d7c6f7c7 100644
--- a/g4f/Provider/Phind.py
+++ b/g4f/Provider/Phind.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import random
from datetime import datetime
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -17,11 +17,11 @@ class Phind(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
user_id = ''.join(random.choice(chars) for _ in range(24))
data = {
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 35764081..c54b98e5 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -5,7 +5,7 @@ from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod
from .helper import get_event_loop, get_cookies, format_prompt
-from ..typing import AsyncGenerator, CreateResult
+from ..typing import CreateResult, AsyncResult, Messages
class BaseProvider(ABC):
@@ -20,7 +20,7 @@ class BaseProvider(ABC):
@abstractmethod
def create_completion(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
@@ -30,7 +30,7 @@ class BaseProvider(ABC):
async def create_async(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
*,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
@@ -69,7 +69,7 @@ class AsyncProvider(BaseProvider):
def create_completion(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
@@ -81,7 +81,7 @@ class AsyncProvider(BaseProvider):
@abstractmethod
async def create_async(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
**kwargs
) -> str:
raise NotImplementedError()
@@ -94,7 +94,7 @@ class AsyncGeneratorProvider(AsyncProvider):
def create_completion(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool = True,
**kwargs
) -> CreateResult:
@@ -116,7 +116,7 @@ class AsyncGeneratorProvider(AsyncProvider):
async def create_async(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
**kwargs
) -> str:
return "".join([
@@ -132,7 +132,7 @@ class AsyncGeneratorProvider(AsyncProvider):
@abstractmethod
def create_async_generator(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
raise NotImplementedError() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/AiService.py b/g4f/Provider/deprecated/AiService.py
index 9b41e3c8..d1d15859 100644
--- a/g4f/Provider/deprecated/AiService.py
+++ b/g4f/Provider/deprecated/AiService.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import requests
-from ...typing import Any, CreateResult
+from ...typing import Any, CreateResult, Messages
from ..base_provider import BaseProvider
@@ -14,7 +14,7 @@ class AiService(BaseProvider):
@staticmethod
def create_completion(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool,
**kwargs: Any,
) -> CreateResult:
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index 5a9a9329..db19adc1 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -4,7 +4,7 @@ import asyncio
import sys
from asyncio import AbstractEventLoop
from os import path
-from typing import Dict, List
+from ..typing import Dict, List, Messages
import browser_cookie3
# Change event loop policy on windows
@@ -53,7 +53,7 @@ def get_cookies(cookie_domain: str) -> Dict[str, str]:
return _cookies[cookie_domain]
-def format_prompt(messages: List[Dict[str, str]], add_special_tokens=False) -> str:
+def format_prompt(messages: Messages, add_special_tokens=False) -> str:
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
[
diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py
index b49020b2..94b9b90a 100644
--- a/g4f/Provider/retry_provider.py
+++ b/g4f/Provider/retry_provider.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import random
from typing import List, Type, Dict
-from ..typing import CreateResult
+from ..typing import CreateResult, Messages
from .base_provider import BaseProvider, AsyncProvider
from ..debug import logging
@@ -10,10 +10,7 @@ from ..debug import logging
class RetryProvider(AsyncProvider):
__name__: str = "RetryProvider"
working: bool = True
- needs_auth: bool = False
supports_stream: bool = True
- supports_gpt_35_turbo: bool = False
- supports_gpt_4: bool = False
def __init__(
self,
@@ -27,7 +24,7 @@ class RetryProvider(AsyncProvider):
def create_completion(
self,
model: str,
- messages: List[Dict[str, str]],
+ messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
@@ -54,17 +51,17 @@ class RetryProvider(AsyncProvider):
if logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
- break
+ raise e
self.raise_exceptions()
async def create_async(
self,
model: str,
- messages: List[Dict[str, str]],
+ messages: Messages,
**kwargs
) -> str:
- providers = [provider for provider in self.providers]
+ providers = self.providers
if self.shuffle:
random.shuffle(providers)