summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/base_provider.py20
-rw-r--r--g4f/Provider/retry_provider.py32
2 files changed, 27 insertions, 25 deletions
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index a21dc871..35764081 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -10,11 +10,11 @@ from ..typing import AsyncGenerator, CreateResult
class BaseProvider(ABC):
url: str
- working = False
- needs_auth = False
- supports_stream = False
- supports_gpt_35_turbo = False
- supports_gpt_4 = False
+ working: bool = False
+ needs_auth: bool = False
+ supports_stream: bool = False
+ supports_gpt_35_turbo: bool = False
+ supports_gpt_4: bool = False
@staticmethod
@abstractmethod
@@ -38,13 +38,15 @@ class BaseProvider(ABC):
) -> str:
if not loop:
loop = get_event_loop()
- def create_func():
+
+ def create_func() -> str:
return "".join(cls.create_completion(
model,
messages,
False,
**kwargs
))
+
return await loop.run_in_executor(
executor,
create_func
@@ -52,7 +54,7 @@ class BaseProvider(ABC):
@classmethod
@property
- def params(cls):
+ def params(cls) -> str:
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
@@ -103,7 +105,7 @@ class AsyncGeneratorProvider(AsyncProvider):
stream=stream,
**kwargs
)
- gen = generator.__aiter__()
+ gen = generator.__aiter__()
while True:
try:
yield loop.run_until_complete(gen.__anext__())
@@ -125,7 +127,7 @@ class AsyncGeneratorProvider(AsyncProvider):
**kwargs
)
])
-
+
@staticmethod
@abstractmethod
def create_async_generator(
diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py
index c1672aba..b49020b2 100644
--- a/g4f/Provider/retry_provider.py
+++ b/g4f/Provider/retry_provider.py
@@ -1,33 +1,33 @@
from __future__ import annotations
import random
-
+from typing import List, Type, Dict
from ..typing import CreateResult
from .base_provider import BaseProvider, AsyncProvider
from ..debug import logging
class RetryProvider(AsyncProvider):
- __name__ = "RetryProvider"
- working = True
- needs_auth = False
- supports_stream = True
- supports_gpt_35_turbo = False
- supports_gpt_4 = False
+ __name__: str = "RetryProvider"
+ working: bool = True
+ needs_auth: bool = False
+ supports_stream: bool = True
+ supports_gpt_35_turbo: bool = False
+ supports_gpt_4: bool = False
def __init__(
self,
- providers: list[type[BaseProvider]],
+ providers: List[Type[BaseProvider]],
shuffle: bool = True
) -> None:
- self.providers = providers
- self.shuffle = shuffle
+ self.providers: List[Type[BaseProvider]] = providers
+ self.shuffle: bool = shuffle
def create_completion(
self,
model: str,
- messages: list[dict[str, str]],
+ messages: List[Dict[str, str]],
stream: bool = False,
**kwargs
) -> CreateResult:
@@ -38,8 +38,8 @@ class RetryProvider(AsyncProvider):
if self.shuffle:
random.shuffle(providers)
- self.exceptions = {}
- started = False
+ self.exceptions: Dict[str, Exception] = {}
+ started: bool = False
for provider in providers:
try:
if logging:
@@ -61,14 +61,14 @@ class RetryProvider(AsyncProvider):
async def create_async(
self,
model: str,
- messages: list[dict[str, str]],
+ messages: List[Dict[str, str]],
**kwargs
) -> str:
providers = [provider for provider in self.providers]
if self.shuffle:
random.shuffle(providers)
- self.exceptions = {}
+ self.exceptions: Dict[str, Exception] = {}
for provider in providers:
try:
return await provider.create_async(model, messages, **kwargs)
@@ -79,7 +79,7 @@ class RetryProvider(AsyncProvider):
self.raise_exceptions()
- def raise_exceptions(self):
+ def raise_exceptions(self) -> None:
if self.exceptions:
raise RuntimeError("\n".join(["All providers failed:"] + [
f"{p}: {self.exceptions[p].__class__.__name__}: {self.exceptions[p]}" for p in self.exceptions