summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2024-10-30 09:57:55 +0100
committerTekky <98614666+xtekky@users.noreply.github.com>2024-10-30 09:57:55 +0100
commit1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26 (patch)
tree435d831df4ad5c18839cfaa647f23e5be035cdd6 /g4f/Provider
parentimplement direct import of `Client` without using `g4f.client` (diff)
parentMerge pull request #2304 from kqlio67/main (diff)
downloadgpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar
gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.gz
gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.bz2
gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.lz
gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.xz
gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.tar.zst
gpt4free-1443c60cc86f7f02cc6c7a4b2a31d6b1dad66a26.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/AI365VIP.py2
-rw-r--r--g4f/Provider/AiMathGPT.py4
-rw-r--r--g4f/Provider/Blackbox.py10
-rw-r--r--g4f/Provider/GizAI.py151
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/Provider/nexra/NexraChatGPT.py270
-rw-r--r--g4f/Provider/nexra/NexraChatGPT4o.py86
-rw-r--r--g4f/Provider/nexra/NexraChatGptV2.py92
-rw-r--r--g4f/Provider/nexra/NexraChatGptWeb.py64
-rw-r--r--g4f/Provider/nexra/__init__.py3
10 files changed, 390 insertions, 293 deletions
diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/AI365VIP.py
index c7ebf6b5..511ad568 100644
--- a/g4f/Provider/AI365VIP.py
+++ b/g4f/Provider/AI365VIP.py
@@ -10,7 +10,7 @@ from .helper import format_prompt
class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.ai365vip.com"
api_endpoint = "/api/chat"
- working = True
+ working = False
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
diff --git a/g4f/Provider/AiMathGPT.py b/g4f/Provider/AiMathGPT.py
index 4399320a..90931691 100644
--- a/g4f/Provider/AiMathGPT.py
+++ b/g4f/Provider/AiMathGPT.py
@@ -60,10 +60,6 @@ class AiMathGPT(AsyncGeneratorProvider, ProviderModelMixin):
data = {
"messages": [
{
- "role": "system",
- "content": ""
- },
- {
"role": "user",
"content": format_prompt(messages)
}
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 5cd43eed..4052893a 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -51,7 +51,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'ReactAgent',
'XcodeAgent',
'AngularJSAgent',
- 'RepoMap',
]
agentMode = {
@@ -78,7 +77,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'ReactAgent': {'mode': True, 'id': "React Agent"},
'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
- 'RepoMap': {'mode': True, 'id': "repomap"},
}
userSelectedModel = {
@@ -174,7 +172,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
proxy: Optional[str] = None,
image: ImageType = None,
image_name: str = None,
- websearch: bool = False,
+ web_search: bool = False,
**kwargs
) -> AsyncGenerator[Union[str, ImageResponse], None]:
"""
@@ -186,7 +184,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
proxy (Optional[str]): Proxy URL, if needed.
image (ImageType): Image data to be processed, if any.
image_name (str): Name of the image file, if an image is provided.
- websearch (bool): Enables or disables web search mode.
+ web_search (bool): Enables or disables web search mode.
**kwargs: Additional keyword arguments.
Yields:
@@ -276,7 +274,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"clickedForceWebSearch": False,
"visitFromDelta": False,
"mobileClient": False,
- "webSearchMode": websearch,
+ "webSearchMode": web_search,
"userSelectedModel": cls.userSelectedModel.get(model, model)
}
@@ -313,7 +311,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
else:
yield cleaned_response
else:
- if websearch:
+ if web_search:
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
if match:
source_part = match.group(1).strip()
diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py
new file mode 100644
index 00000000..127edc9e
--- /dev/null
+++ b/g4f/Provider/GizAI.py
@@ -0,0 +1,151 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from ..image import ImageResponse
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://app.giz.ai/assistant/"
+ api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
+ working = True
+
+ supports_system_message = True
+ supports_message_history = True
+
+ # Chat models
+ default_model = 'chat-gemini-flash'
+ chat_models = [
+ default_model,
+ 'chat-gemini-pro',
+ 'chat-gpt4m',
+ 'chat-gpt4',
+ 'claude-sonnet',
+ 'claude-haiku',
+ 'llama-3-70b',
+ 'llama-3-8b',
+ 'mistral-large',
+ 'chat-o1-mini'
+ ]
+
+ # Image models
+ image_models = [
+ 'flux1',
+ 'sdxl',
+ 'sd',
+ 'sd35',
+ ]
+
+ models = [*chat_models, *image_models]
+
+ model_aliases = {
+ # Chat model aliases
+ "gemini-flash": "chat-gemini-flash",
+ "gemini-pro": "chat-gemini-pro",
+ "gpt-4o-mini": "chat-gpt4m",
+ "gpt-4o": "chat-gpt4",
+ "claude-3.5-sonnet": "claude-sonnet",
+ "claude-3-haiku": "claude-haiku",
+ "llama-3.1-70b": "llama-3-70b",
+ "llama-3.1-8b": "llama-3-8b",
+ "o1-mini": "chat-o1-mini",
+ # Image model aliases
+ "sd-1.5": "sd",
+ "sd-3.5": "sd35",
+ "flux-schnell": "flux1",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def is_image_model(cls, model: str) -> bool:
+ return model in cls.image_models
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ 'Accept': 'application/json, text/plain, */*',
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Content-Type': 'application/json',
+ 'Origin': 'https://app.giz.ai',
+ 'Pragma': 'no-cache',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"'
+ }
+
+ async with ClientSession() as session:
+ if cls.is_image_model(model):
+ # Image generation
+ prompt = messages[-1]["content"]
+ data = {
+ "model": model,
+ "input": {
+ "width": "1024",
+ "height": "1024",
+ "steps": 4,
+ "output_format": "webp",
+ "batch_size": 1,
+ "mode": "plan",
+ "prompt": prompt
+ }
+ }
+ async with session.post(
+ cls.api_endpoint,
+ headers=headers,
+ data=json.dumps(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ response_data = await response.json()
+ if response_data.get('status') == 'completed' and response_data.get('output'):
+ for url in response_data['output']:
+ yield ImageResponse(images=url, alt="Generated Image")
+ else:
+ # Chat completion
+ data = {
+ "model": model,
+ "input": {
+ "messages": [
+ {
+ "type": "human",
+ "content": format_prompt(messages)
+ }
+ ],
+ "mode": "plan"
+ },
+ "noStream": True
+ }
+ async with session.post(
+ cls.api_endpoint,
+ headers=headers,
+ data=json.dumps(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ result = await response.json()
+ yield result.get('output', '')
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 8f36606b..1caf8aaf 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -47,6 +47,7 @@ from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
+from .GizAI import GizAI
from .GPROChat import GPROChat
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py
index fc5051ee..074a0363 100644
--- a/g4f/Provider/nexra/NexraChatGPT.py
+++ b/g4f/Provider/nexra/NexraChatGPT.py
@@ -1,45 +1,52 @@
from __future__ import annotations
+import asyncio
import json
import requests
+from typing import Any, Dict
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
-class NexraChatGPT(AbstractProvider, ProviderModelMixin):
+
+class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra ChatGPT"
url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
+ api_endpoint_nexra_chatgpt = "https://nexra.aryahcr.cc/api/chat/gpt"
+ api_endpoint_nexra_chatgpt4o = "https://nexra.aryahcr.cc/api/chat/complements"
+ api_endpoint_nexra_chatgpt_v2 = "https://nexra.aryahcr.cc/api/chat/complements"
+ api_endpoint_nexra_gptweb = "https://nexra.aryahcr.cc/api/chat/gptweb"
working = True
+ supports_system_message = True
+ supports_message_history = True
+ supports_stream = True
default_model = 'gpt-3.5-turbo'
- models = ['gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314', default_model, 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002']
+ nexra_chatgpt = [
+ 'gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314',
+ default_model, 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
+ 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002'
+ ]
+ nexra_chatgpt4o = ['gpt-4o']
+ nexra_chatgptv2 = ['chatgpt']
+ nexra_gptweb = ['gptweb']
+ models = nexra_chatgpt + nexra_chatgpt4o + nexra_chatgptv2 + nexra_gptweb
model_aliases = {
"gpt-4": "gpt-4-0613",
- "gpt-4": "gpt-4-32k",
- "gpt-4": "gpt-4-0314",
- "gpt-4": "gpt-4-32k-0314",
-
+ "gpt-4-32k": "gpt-4-32k-0314",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
-
+ "gpt-3.5-turbo-0613": "gpt-3.5-turbo-16k-0613",
"gpt-3": "text-davinci-003",
- "gpt-3": "text-davinci-002",
- "gpt-3": "code-davinci-002",
- "gpt-3": "text-curie-001",
- "gpt-3": "text-babbage-001",
- "gpt-3": "text-ada-001",
- "gpt-3": "text-ada-001",
- "gpt-3": "davinci",
- "gpt-3": "curie",
- "gpt-3": "babbage",
- "gpt-3": "ada",
- "gpt-3": "babbage-002",
- "gpt-3": "davinci-002",
+ "text-davinci-002": "code-davinci-002",
+ "text-curie-001": "text-babbage-001",
+ "text-ada-001": "davinci",
+ "curie": "babbage",
+ "ada": "babbage-002",
+ "davinci-002": "davinci-002",
+ "chatgpt": "chatgpt",
+ "gptweb": "gptweb"
}
@classmethod
@@ -50,40 +57,229 @@ class NexraChatGPT(AbstractProvider, ProviderModelMixin):
return cls.model_aliases[model]
else:
return cls.default_model
-
+
@classmethod
- def create_completion(
+ async def create_async_generator(
cls,
model: str,
messages: Messages,
+ stream: bool = False,
proxy: str = None,
markdown: bool = False,
**kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
+ ) -> AsyncResult:
+ if model in cls.nexra_chatgpt:
+ async for chunk in cls._create_async_generator_nexra_chatgpt(model, messages, proxy, **kwargs):
+ yield chunk
+ elif model in cls.nexra_chatgpt4o:
+ async for chunk in cls._create_async_generator_nexra_chatgpt4o(model, messages, stream, proxy, markdown, **kwargs):
+ yield chunk
+ elif model in cls.nexra_chatgptv2:
+ async for chunk in cls._create_async_generator_nexra_chatgpt_v2(model, messages, stream, proxy, markdown, **kwargs):
+ yield chunk
+ elif model in cls.nexra_gptweb:
+ async for chunk in cls._create_async_generator_nexra_gptweb(model, messages, proxy, **kwargs):
+ yield chunk
+ @classmethod
+ async def _create_async_generator_nexra_chatgpt(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
- 'Content-Type': 'application/json'
+ "Content-Type": "application/json"
}
+ prompt = format_prompt(messages)
data = {
- "messages": [],
- "prompt": format_prompt(messages),
+ "messages": messages,
+ "prompt": prompt,
"model": model,
"markdown": markdown
}
+
+ loop = asyncio.get_event_loop()
+ try:
+ response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt, data, headers, proxy)
+ filtered_response = cls._filter_response(response)
+
+ for chunk in filtered_response:
+ yield chunk
+ except Exception as e:
+ print(f"Error during API request (nexra_chatgpt): {e}")
+
+ @classmethod
+ async def _create_async_generator_nexra_chatgpt4o(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
- response = requests.post(cls.api_endpoint, headers=headers, json=data)
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
- return cls.process_response(response)
+ loop = asyncio.get_event_loop()
+ try:
+ response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt4o, data, headers, proxy, stream)
+
+ if stream:
+ async for chunk in cls._process_streaming_response(response):
+ yield chunk
+ else:
+ for chunk in cls._process_non_streaming_response(response):
+ yield chunk
+ except Exception as e:
+ print(f"Error during API request (nexra_chatgpt4o): {e}")
@classmethod
- def process_response(cls, response):
+ async def _create_async_generator_nexra_chatgpt_v2(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ loop = asyncio.get_event_loop()
+ try:
+ response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt_v2, data, headers, proxy, stream)
+
+ if stream:
+ async for chunk in cls._process_streaming_response(response):
+ yield chunk
+ else:
+ for chunk in cls._process_non_streaming_response(response):
+ yield chunk
+ except Exception as e:
+ print(f"Error during API request (nexra_chatgpt_v2): {e}")
+
+ @classmethod
+ async def _create_async_generator_nexra_gptweb(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "markdown": markdown,
+ }
+
+ loop = asyncio.get_event_loop()
+ try:
+ response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_gptweb, data, headers, proxy)
+
+ for chunk in response.iter_content(1024):
+ if chunk:
+ decoded_chunk = chunk.decode().lstrip('_')
+ try:
+ response_json = json.loads(decoded_chunk)
+ if response_json.get("status"):
+ yield response_json.get("gpt", "")
+ except json.JSONDecodeError:
+ continue
+ except Exception as e:
+ print(f"Error during API request (nexra_gptweb): {e}")
+
+ @staticmethod
+ def _sync_post_request(url: str, data: Dict[str, Any], headers: Dict[str, str], proxy: str = None, stream: bool = False) -> requests.Response:
+ proxies = {
+ "http": proxy,
+ "https": proxy,
+ } if proxy else None
+
+ try:
+ response = requests.post(url, json=data, headers=headers, proxies=proxies, stream=stream)
+ response.raise_for_status()
+ return response
+ except requests.RequestException as e:
+ print(f"Request failed: {e}")
+ raise
+
+ @staticmethod
+ def _process_non_streaming_response(response: requests.Response) -> str:
if response.status_code == 200:
try:
- data = response.json()
- return data.get('gpt', '')
+ content = response.text.lstrip('')
+ data = json.loads(content)
+ return data.get('message', '')
except json.JSONDecodeError:
return "Error: Unable to decode JSON response"
else:
return f"Error: {response.status_code}"
+
+ @staticmethod
+ async def _process_streaming_response(response: requests.Response):
+ full_message = ""
+ for line in response.iter_lines(decode_unicode=True):
+ if line:
+ try:
+ line = line.lstrip('')
+ data = json.loads(line)
+ if data.get('finish'):
+ break
+ message = data.get('message', '')
+ if message:
+ yield message[len(full_message):]
+ full_message = message
+ except json.JSONDecodeError:
+ pass
+
+ @staticmethod
+ def _filter_response(response: requests.Response) -> str:
+ response_json = response.json()
+ return response_json.get("gpt", "")
diff --git a/g4f/Provider/nexra/NexraChatGPT4o.py b/g4f/Provider/nexra/NexraChatGPT4o.py
deleted file mode 100644
index 126d32b8..00000000
--- a/g4f/Provider/nexra/NexraChatGPT4o.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ..helper import format_prompt
-
-class NexraChatGPT4o(AbstractProvider, ProviderModelMixin):
- label = "Nexra ChatGPT4o"
- url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = True
- supports_stream = True
-
- default_model = "gpt-4o"
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "messages": [
- {
- "role": "user",
- "content": format_prompt(messages)
- }
- ],
- "stream": stream,
- "markdown": markdown,
- "model": model
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
-
- if stream:
- return cls.process_streaming_response(response)
- else:
- return cls.process_non_streaming_response(response)
-
- @classmethod
- def process_non_streaming_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.lstrip('')
- data = json.loads(content)
- return data.get('message', '')
- except json.JSONDecodeError:
- return "Error: Unable to decode JSON response"
- else:
- return f"Error: {response.status_code}"
-
- @classmethod
- def process_streaming_response(cls, response):
- full_message = ""
- for line in response.iter_lines(decode_unicode=True):
- if line:
- try:
- line = line.lstrip('')
- data = json.loads(line)
- if data.get('finish'):
- break
- message = data.get('message', '')
- if message and message != full_message:
- yield message[len(full_message):]
- full_message = message
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraChatGptV2.py b/g4f/Provider/nexra/NexraChatGptV2.py
deleted file mode 100644
index 1ff42705..00000000
--- a/g4f/Provider/nexra/NexraChatGptV2.py
+++ /dev/null
@@ -1,92 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ..helper import format_prompt
-
-class NexraChatGptV2(AbstractProvider, ProviderModelMixin):
- label = "Nexra ChatGPT v2"
- url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = True
- supports_stream = True
-
- default_model = 'chatgpt'
- models = [default_model]
- model_aliases = {"gpt-4": "chatgpt"}
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "messages": [
- {
- "role": "user",
- "content": format_prompt(messages)
- }
- ],
- "stream": stream,
- "markdown": markdown,
- "model": model
- }
-
- response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
-
- if stream:
- return cls.process_streaming_response(response)
- else:
- return cls.process_non_streaming_response(response)
-
- @classmethod
- def process_non_streaming_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.lstrip('')
- data = json.loads(content)
- return data.get('message', '')
- except json.JSONDecodeError:
- return "Error: Unable to decode JSON response"
- else:
- return f"Error: {response.status_code}"
-
- @classmethod
- def process_streaming_response(cls, response):
- full_message = ""
- for line in response.iter_lines(decode_unicode=True):
- if line:
- try:
- line = line.lstrip('')
- data = json.loads(line)
- if data.get('finish'):
- break
- message = data.get('message', '')
- if message:
- yield message[len(full_message):]
- full_message = message
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraChatGptWeb.py b/g4f/Provider/nexra/NexraChatGptWeb.py
deleted file mode 100644
index f82694d4..00000000
--- a/g4f/Provider/nexra/NexraChatGptWeb.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from __future__ import annotations
-
-import json
-import requests
-
-from ...typing import CreateResult, Messages
-from ..base_provider import ProviderModelMixin, AbstractProvider
-from ..helper import format_prompt
-
-class NexraChatGptWeb(AbstractProvider, ProviderModelMixin):
- label = "Nexra ChatGPT Web"
- url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
- working = True
-
- default_model = "gptweb"
- models = [default_model]
- model_aliases = {"gpt-4": "gptweb"}
- api_endpoints = {"gptweb": "https://nexra.aryahcr.cc/api/chat/gptweb"}
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- markdown: bool = False,
- **kwargs
- ) -> CreateResult:
- model = cls.get_model(model)
- api_endpoint = cls.api_endpoints.get(model, cls.api_endpoints[cls.default_model])
-
- headers = {
- 'Content-Type': 'application/json'
- }
-
- data = {
- "prompt": format_prompt(messages),
- "markdown": markdown
- }
-
- response = requests.post(api_endpoint, headers=headers, json=data)
-
- return cls.process_response(response)
-
- @classmethod
- def process_response(cls, response):
- if response.status_code == 200:
- try:
- content = response.text.lstrip('_')
- json_response = json.loads(content)
- return json_response.get('gpt', '')
- except json.JSONDecodeError:
- return "Error: Unable to decode JSON response"
- else:
- return f"Error: {response.status_code}"
diff --git a/g4f/Provider/nexra/__init__.py b/g4f/Provider/nexra/__init__.py
index 6121fdc0..bebc1fb6 100644
--- a/g4f/Provider/nexra/__init__.py
+++ b/g4f/Provider/nexra/__init__.py
@@ -1,9 +1,6 @@
from .NexraBing import NexraBing
from .NexraBlackbox import NexraBlackbox
from .NexraChatGPT import NexraChatGPT
-from .NexraChatGPT4o import NexraChatGPT4o
-from .NexraChatGptV2 import NexraChatGptV2
-from .NexraChatGptWeb import NexraChatGptWeb
from .NexraDallE import NexraDallE
from .NexraDallE2 import NexraDallE2
from .NexraEmi import NexraEmi