summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-02-23 17:27:19 +0100
committerGitHub <noreply@github.com>2024-02-23 17:27:19 +0100
commit0687d3c0af7179848ce0b91b8a4f0af2a432edaa (patch)
treee88b25bcc756e02741ea1875fcf62ff9ba76b975 /g4f/Provider
parentFix isssue with stop in client (diff)
parentImprove readme, add smartphone guide (diff)
downloadgpt4free-0687d3c0af7179848ce0b91b8a4f0af2a432edaa.tar
gpt4free-0687d3c0af7179848ce0b91b8a4f0af2a432edaa.tar.gz
gpt4free-0687d3c0af7179848ce0b91b8a4f0af2a432edaa.tar.bz2
gpt4free-0687d3c0af7179848ce0b91b8a4f0af2a432edaa.tar.lz
gpt4free-0687d3c0af7179848ce0b91b8a4f0af2a432edaa.tar.xz
gpt4free-0687d3c0af7179848ce0b91b8a4f0af2a432edaa.tar.zst
gpt4free-0687d3c0af7179848ce0b91b8a4f0af2a432edaa.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/FreeChatgpt.py39
-rw-r--r--g4f/Provider/GeminiProChat.py1
2 files changed, 11 insertions, 29 deletions
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
index 0f993690..8981ef79 100644
--- a/g4f/Provider/FreeChatgpt.py
+++ b/g4f/Provider/FreeChatgpt.py
@@ -4,24 +4,13 @@ import json, random
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-models = {
- "claude-v2": "claude-2.0",
- "claude-v2.1":"claude-2.1",
- "gemini-pro": "google-gemini-pro"
-}
-urls = [
- "https://free.chatgpt.org.uk",
- "https://ai.chatgpt.org.uk"
-]
-
-class FreeChatgpt(AsyncGeneratorProvider):
+class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.chatgpt.org.uk"
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
supports_message_history = True
+ default_model = "google-gemini-pro"
@classmethod
async def create_async_generator(
@@ -31,11 +20,6 @@ class FreeChatgpt(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
- if model in models:
- model = models[model]
- elif not model:
- model = "gpt-3.5-turbo"
- url = random.choice(urls)
headers = {
"Accept": "application/json, text/event-stream",
"Content-Type":"application/json",
@@ -51,16 +35,15 @@ class FreeChatgpt(AsyncGeneratorProvider):
}
async with ClientSession(headers=headers) as session:
data = {
- "messages":messages,
- "stream":True,
- "model":model,
- "temperature":0.5,
- "presence_penalty":0,
- "frequency_penalty":0,
- "top_p":1,
- **kwargs
+ "messages": messages,
+ "stream": True,
+ "model": cls.get_model(""),
+ "temperature": kwargs.get("temperature", 0.5),
+ "presence_penalty": kwargs.get("presence_penalty", 0),
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
+ "top_p": kwargs.get("top_p", 1)
}
- async with session.post(f'{url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
+ async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
response.raise_for_status()
started = False
async for line in response.content:
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py
index 488f5f0e..8b8fc5dc 100644
--- a/g4f/Provider/GeminiProChat.py
+++ b/g4f/Provider/GeminiProChat.py
@@ -11,7 +11,6 @@ from .base_provider import AsyncGeneratorProvider
class GeminiProChat(AsyncGeneratorProvider):
url = "https://gemini-chatbot-sigma.vercel.app"
working = True
- supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(