summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-09-20 06:12:34 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-09-20 06:12:34 +0200
commit82bd6f91808a383781807262c4ae1f3de9740531 (patch)
tree9a12306a3dda5e883bc21e13b2f50294892a2fc9
parent~ | Merge pull request #914 from hlohaus/lesh (diff)
downloadgpt4free-82bd6f91808a383781807262c4ae1f3de9740531.tar
gpt4free-82bd6f91808a383781807262c4ae1f3de9740531.tar.gz
gpt4free-82bd6f91808a383781807262c4ae1f3de9740531.tar.bz2
gpt4free-82bd6f91808a383781807262c4ae1f3de9740531.tar.lz
gpt4free-82bd6f91808a383781807262c4ae1f3de9740531.tar.xz
gpt4free-82bd6f91808a383781807262c4ae1f3de9740531.tar.zst
gpt4free-82bd6f91808a383781807262c4ae1f3de9740531.zip
-rw-r--r--README.md65
-rw-r--r--g4f/Provider/Aivvm.py2
-rw-r--r--g4f/Provider/Bard.py17
-rw-r--r--g4f/Provider/ChatgptLogin.py9
-rw-r--r--g4f/Provider/CodeLinkAva.py7
-rw-r--r--g4f/Provider/H2o.py16
-rw-r--r--g4f/Provider/HuggingChat.py12
-rw-r--r--g4f/Provider/Vitalentum.py4
-rw-r--r--g4f/models.py2
-rw-r--r--testing/test_providers.py22
10 files changed, 81 insertions, 75 deletions
diff --git a/README.md b/README.md
index bf15f3a6..dc148e7d 100644
--- a/README.md
+++ b/README.md
@@ -238,43 +238,42 @@ response = g4f.ChatCompletion.create(
##### Async Support:
-To enhance speed and overall performance, execute providers asynchronously. The total execution time will be determined by the duration of the slowest provider's execution.
+To enhance speed and overall performance, execute providers asynchronously.
+The total execution time will be determined by the duration of the slowest provider's execution.
```py
import g4f, asyncio
-async def run_async():
- _providers = [
- g4f.Provider.AItianhu,
- g4f.Provider.Acytoo,
- g4f.Provider.Aichat,
- g4f.Provider.Ails,
- g4f.Provider.Aivvm,
- g4f.Provider.ChatBase,
- g4f.Provider.ChatgptAi,
- g4f.Provider.ChatgptLogin,
- g4f.Provider.CodeLinkAva,
- g4f.Provider.DeepAi,
- g4f.Provider.Opchatgpts,
- g4f.Provider.Vercel,
- g4f.Provider.Vitalentum,
- g4f.Provider.Wewordle,
- g4f.Provider.Ylokh,
- g4f.Provider.You,
- g4f.Provider.Yqcloud,
- ]
- responses = [
- provider.create_async(
- model=g4f.models.default,
- messages=[{"role": "user", "content": "Hello"}],
- )
- for provider in _providers
- ]
- responses = await asyncio.gather(*responses)
- for idx, provider in enumerate(_providers):
- print(f"{provider.__name__}:", responses[idx])
-
-asyncio.run(run_async())
+_providers = [
+ g4f.Provider.Aichat,
+ g4f.Provider.Aivvm,
+ g4f.Provider.ChatBase,
+ g4f.Provider.Bing,
+ g4f.Provider.CodeLinkAva,
+ g4f.Provider.DeepAi,
+ g4f.Provider.GptGo,
+ g4f.Provider.Wewordle,
+ g4f.Provider.You,
+ g4f.Provider.Yqcloud,
+]
+
+async def run_provider(provider: g4f.Provider.AsyncProvider):
+ try:
+ response = await provider.create_async(
+ model=g4f.models.default.name,
+ messages=[{"role": "user", "content": "Hello"}],
+ )
+ print(f"{provider.__name__}:", response)
+ except Exception as e:
+ print(f"{provider.__name__}:", e)
+
+async def run_all():
+ calls = [
+ run_provider(provider) for provider in _providers
+ ]
+ await asyncio.gather(*calls)
+
+asyncio.run(run_all())
```
### interference openai-proxy api (use with openai python package)
diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py
index dbfc588d..b2d7c139 100644
--- a/g4f/Provider/Aivvm.py
+++ b/g4f/Provider/Aivvm.py
@@ -41,7 +41,7 @@ class Aivvm(AsyncGeneratorProvider):
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
- "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "Accept-Language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
diff --git a/g4f/Provider/Bard.py b/g4f/Provider/Bard.py
index 2137d820..4e076378 100644
--- a/g4f/Provider/Bard.py
+++ b/g4f/Provider/Bard.py
@@ -13,6 +13,7 @@ class Bard(AsyncProvider):
url = "https://bard.google.com"
needs_auth = True
working = True
+ _snlm0e = None
@classmethod
async def create_async(
@@ -31,7 +32,6 @@ class Bard(AsyncProvider):
headers = {
'authority': 'bard.google.com',
- 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'origin': 'https://bard.google.com',
'referer': 'https://bard.google.com/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
@@ -42,13 +42,14 @@ class Bard(AsyncProvider):
cookies=cookies,
headers=headers
) as session:
- async with session.get(cls.url, proxy=proxy) as response:
- text = await response.text()
+ if not cls._snlm0e:
+ async with session.get(cls.url, proxy=proxy) as response:
+ text = await response.text()
- match = re.search(r'SNlM0e\":\"(.*?)\"', text)
- if not match:
- raise RuntimeError("No snlm0e value.")
- snlm0e = match.group(1)
+ match = re.search(r'SNlM0e\":\"(.*?)\"', text)
+ if not match:
+ raise RuntimeError("No snlm0e value.")
+ cls._snlm0e = match.group(1)
params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
@@ -57,7 +58,7 @@ class Bard(AsyncProvider):
}
data = {
- 'at': snlm0e,
+ 'at': cls._snlm0e,
'f.req': json.dumps([None, json.dumps([[prompt]])])
}
diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py
index 8b868f8e..3eb55a64 100644
--- a/g4f/Provider/ChatgptLogin.py
+++ b/g4f/Provider/ChatgptLogin.py
@@ -52,7 +52,14 @@ class ChatgptLogin(AsyncProvider):
}
async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
response.raise_for_status()
- return (await response.json())["data"]
+ data = await response.json()
+ if "data" in data:
+ return data["data"]
+ elif "msg" in data:
+ raise RuntimeError(data["msg"])
+ else:
+ raise RuntimeError(f"Response: {data}")
+
@classmethod
@property
diff --git a/g4f/Provider/CodeLinkAva.py b/g4f/Provider/CodeLinkAva.py
index 3ab4e264..e3b3eb3e 100644
--- a/g4f/Provider/CodeLinkAva.py
+++ b/g4f/Provider/CodeLinkAva.py
@@ -40,11 +40,12 @@ class CodeLinkAva(AsyncGeneratorProvider):
}
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
response.raise_for_status()
- start = "data: "
async for line in response.content:
line = line.decode()
- if line.startswith("data: ") and not line.startswith("data: [DONE]"):
- line = json.loads(line[len(start):-1])
+ if line.startswith("data: "):
+ if line.startswith("data: [DONE]"):
+ break
+ line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content
diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py
index 30090a58..d92bd6d1 100644
--- a/g4f/Provider/H2o.py
+++ b/g4f/Provider/H2o.py
@@ -23,7 +23,7 @@ class H2o(AsyncGeneratorProvider):
**kwargs
) -> AsyncGenerator:
model = model if model else cls.model
- headers = {"Referer": "https://gpt-gm.h2o.ai/"}
+ headers = {"Referer": cls.url + "/"}
async with ClientSession(
headers=headers
@@ -36,14 +36,14 @@ class H2o(AsyncGeneratorProvider):
"searchEnabled": "true",
}
async with session.post(
- "https://gpt-gm.h2o.ai/settings",
+ f"{cls.url}/settings",
proxy=proxy,
data=data
) as response:
response.raise_for_status()
async with session.post(
- "https://gpt-gm.h2o.ai/conversation",
+ f"{cls.url}/conversation",
proxy=proxy,
json={"model": model},
) as response:
@@ -71,7 +71,7 @@ class H2o(AsyncGeneratorProvider):
},
}
async with session.post(
- f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
+ f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
@@ -83,6 +83,14 @@ class H2o(AsyncGeneratorProvider):
if not line["token"]["special"]:
yield line["token"]["text"]
+ async with session.delete(
+ f"{cls.url}/conversation/{conversationId}",
+ proxy=proxy,
+ json=data
+ ) as response:
+ response.raise_for_status()
+
+
@classmethod
@property
def params(cls):
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 85f879f3..7702c9dd 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -25,10 +25,10 @@ class HuggingChat(AsyncGeneratorProvider):
**kwargs
) -> AsyncGenerator:
model = model if model else cls.model
- if not cookies:
- cookies = get_cookies(".huggingface.co")
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
+ if not cookies:
+ cookies = get_cookies(".huggingface.co")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
@@ -37,7 +37,7 @@ class HuggingChat(AsyncGeneratorProvider):
cookies=cookies,
headers=headers
) as session:
- async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
+ async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response:
conversation_id = (await response.json())["conversationId"]
send = {
@@ -62,7 +62,7 @@ class HuggingChat(AsyncGeneratorProvider):
"web_search_id": ""
}
}
- async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
+ async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response:
if not stream:
data = await response.json()
if "error" in data:
@@ -76,8 +76,6 @@ class HuggingChat(AsyncGeneratorProvider):
first = True
async for line in response.content:
line = line.decode("utf-8")
- if not line:
- continue
if line.startswith(start):
line = json.loads(line[len(start):-1])
if "token" not in line:
@@ -89,7 +87,7 @@ class HuggingChat(AsyncGeneratorProvider):
else:
yield line["token"]["text"]
- async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
+ async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()
diff --git a/g4f/Provider/Vitalentum.py b/g4f/Provider/Vitalentum.py
index 31ad8b80..d5265428 100644
--- a/g4f/Provider/Vitalentum.py
+++ b/g4f/Provider/Vitalentum.py
@@ -46,7 +46,9 @@ class Vitalentum(AsyncGeneratorProvider):
response.raise_for_status()
async for line in response.content:
line = line.decode()
- if line.startswith("data: ") and not line.startswith("data: [DONE]"):
+ if line.startswith("data: "):
+ if line.startswith("data: [DONE]"):
+ break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
diff --git a/g4f/models.py b/g4f/models.py
index 9b01fa3c..1066e1aa 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -14,7 +14,7 @@ from .Provider import (
H2o
)
-@dataclass
+@dataclass(unsafe_hash=True)
class Model:
name: str
base_provider: str
diff --git a/testing/test_providers.py b/testing/test_providers.py
index be04e7a3..5240119b 100644
--- a/testing/test_providers.py
+++ b/testing/test_providers.py
@@ -1,6 +1,6 @@
import sys
from pathlib import Path
-from colorama import Fore
+from colorama import Fore, Style
sys.path.append(str(Path(__file__).parent.parent))
@@ -8,10 +8,6 @@ from g4f import BaseProvider, models, Provider
logging = False
-class Styles:
- ENDC = "\033[0m"
- BOLD = "\033[1m"
- UNDERLINE = "\033[4m"
def main():
providers = get_providers()
@@ -29,11 +25,11 @@ def main():
print()
if failed_providers:
- print(f"{Fore.RED + Styles.BOLD}Failed providers:{Styles.ENDC}")
+ print(f"{Fore.RED + Style.BRIGHT}Failed providers:{Style.RESET_ALL}")
for _provider in failed_providers:
print(f"{Fore.RED}{_provider.__name__}")
else:
- print(f"{Fore.GREEN + Styles.BOLD}All providers are working")
+ print(f"{Fore.GREEN + Style.BRIGHT}All providers are working")
def get_providers() -> list[type[BaseProvider]]:
@@ -45,21 +41,15 @@ def get_providers() -> list[type[BaseProvider]]:
"AsyncProvider",
"AsyncGeneratorProvider"
]
- provider_names = [
- provider_name
+ return [
+ getattr(Provider, provider_name)
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
- return [getattr(Provider, provider_name) for provider_name in provider_names]
def create_response(_provider: type[BaseProvider]) -> str:
- if _provider.supports_gpt_35_turbo:
- model = models.gpt_35_turbo.name
- elif _provider.supports_gpt_4:
- model = models.gpt_4.name
- else:
- model = models.default.name
+ model = models.gpt_35_turbo.name if _provider.supports_gpt_35_turbo else models.default.name
response = _provider.create_completion(
model=model,
messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],