summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/not_working
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/not_working')
-rw-r--r--g4f/Provider/not_working/AI365VIP.py69
-rw-r--r--g4f/Provider/not_working/AIChatFree.py76
-rw-r--r--g4f/Provider/not_working/Ai4Chat.py89
-rw-r--r--g4f/Provider/not_working/AiChatOnline.py61
-rw-r--r--g4f/Provider/not_working/AiChats.py105
-rw-r--r--g4f/Provider/not_working/Allyfy.py87
-rw-r--r--g4f/Provider/not_working/AmigoChat.py189
-rw-r--r--g4f/Provider/not_working/Aura.py49
-rw-r--r--g4f/Provider/not_working/Chatgpt4Online.py78
-rw-r--r--g4f/Provider/not_working/Chatgpt4o.py88
-rw-r--r--g4f/Provider/not_working/ChatgptFree.py106
-rw-r--r--g4f/Provider/not_working/FlowGpt.py101
-rw-r--r--g4f/Provider/not_working/FreeNetfly.py105
-rw-r--r--g4f/Provider/not_working/GPROChat.py67
-rw-r--r--g4f/Provider/not_working/Koala.py79
-rw-r--r--g4f/Provider/not_working/MyShell.py76
-rw-r--r--g4f/Provider/not_working/__init__.py14
17 files changed, 1439 insertions, 0 deletions
diff --git a/g4f/Provider/not_working/AI365VIP.py b/g4f/Provider/not_working/AI365VIP.py
new file mode 100644
index 00000000..a4bac0e2
--- /dev/null
+++ b/g4f/Provider/not_working/AI365VIP.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chat.ai365vip.com"
+ api_endpoint = "/api/chat"
+ working = False
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-16k',
+ 'gpt-4o',
+ ]
+ model_aliases = {
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "referer": f"{cls.url}/en",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-arch": '"x86"',
+ "sec-ch-ua-bitness": '"64"',
+ "sec-ch-ua-full-version": '"127.0.6533.119"',
+ "sec-ch-ua-full-version-list": '"Chromium";v="127.0.6533.119", "Not)A;Brand";v="99.0.0.0"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-model": '""',
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-ua-platform-version": '"4.19.276"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": {
+ "id": model,
+ "name": "GPT-3.5",
+ "maxLength": 3000,
+ "tokenLimit": 2048
+ },
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "key": "",
+ "prompt": "You are a helpful assistant.",
+ "temperature": 1
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/AIChatFree.py b/g4f/Provider/not_working/AIChatFree.py
new file mode 100644
index 00000000..a4f80d47
--- /dev/null
+++ b/g4f/Provider/not_working/AIChatFree.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ...errors import RateLimitError
+from ...requests import raise_for_status
+from ...requests.aiohttp import get_connector
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aichatfree.info/"
+ working = False
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "TE": "trailers",
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/not_working/Ai4Chat.py b/g4f/Provider/not_working/Ai4Chat.py
new file mode 100644
index 00000000..9b55e4ff
--- /dev/null
+++ b/g4f/Provider/not_working/Ai4Chat.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+import json
+import re
+import logging
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+logger = logging.getLogger(__name__)
+
+class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "AI4Chat"
+ url = "https://www.ai4chat.co"
+ api_endpoint = "https://www.ai4chat.co/generate-response"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4'
+ models = [default_model]
+
+ model_aliases = {}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": "https://www.ai4chat.co",
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": "https://www.ai4chat.co/gpt/talkdirtytome",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ]
+ }
+
+ try:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ result = await response.text()
+
+ json_result = json.loads(result)
+
+ message = json_result.get("message", "")
+
+ clean_message = re.sub(r'<[^>]+>', '', message)
+
+ yield clean_message
+ except Exception as e:
+ logger.exception("Error while calling AI 4Chat API: %s", e)
+ yield f"Error: {e}"
diff --git a/g4f/Provider/not_working/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py
new file mode 100644
index 00000000..ccfc691e
--- /dev/null
+++ b/g4f/Provider/not_working/AiChatOnline.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, format_prompt
+
+class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
+ site_url = "https://aichatonline.org"
+ url = "https://aichatonlineorg.erweima.ai"
+ api_endpoint = "/aichatonline/api/chat/gpt"
+ working = False
+ default_model = 'gpt-4o-mini'
+
+ @classmethod
+ async def grab_token(
+ cls,
+ session: ClientSession,
+ proxy: str
+ ):
+ async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response:
+ response.raise_for_status()
+ return (await response.json())['data']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/chatgpt/chat/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "aichatonline.org",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "conversationId": get_random_string(),
+ "prompt": format_prompt(messages),
+ }
+ headers['UniqueId'] = await cls.grab_token(session, proxy)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ try:
+ yield json.loads(chunk)['data']['message']
+ except:
+ continue \ No newline at end of file
diff --git a/g4f/Provider/not_working/AiChats.py b/g4f/Provider/not_working/AiChats.py
new file mode 100644
index 00000000..51a85c91
--- /dev/null
+++ b/g4f/Provider/not_working/AiChats.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import json
+import base64
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+from ..helper import format_prompt
+
+class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://ai-chats.org"
+ api_endpoint = "https://ai-chats.org/chat/send2/"
+ working = False
+ supports_message_history = True
+ default_model = 'gpt-4'
+ models = ['gpt-4', 'dalle']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "referer": f"{cls.url}/{'image' if model == 'dalle' else 'chat'}/",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ 'cookie': 'muVyak=LSFNvUWqdgKkGprbDBsfieIoEMzjOQ; LSFNvUWqdgKkGprbDBsfieIoEMzjOQ=ac28831b98143847e83dbe004404e619-1725548624-1725548621; muVyak_hits=9; ai-chat-front=9d714d5dc46a6b47607c9a55e7d12a95; _csrf-front=76c23dc0a013e5d1e21baad2e6ba2b5fdab8d3d8a1d1281aa292353f8147b057a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22K9lz0ezsNPMNnfpd_8gT5yEeh-55-cch%22%3B%7D',
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model == 'dalle':
+ prompt = messages[-1]['content'] if messages else ""
+ else:
+ prompt = format_prompt(messages)
+
+ data = {
+ "type": "image" if model == 'dalle' else "chat",
+ "messagesHistory": [
+ {
+ "from": "you",
+ "content": prompt
+ }
+ ]
+ }
+
+ try:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ if model == 'dalle':
+ response_json = await response.json()
+
+ if 'data' in response_json and response_json['data']:
+ image_url = response_json['data'][0].get('url')
+ if image_url:
+ async with session.get(image_url) as img_response:
+ img_response.raise_for_status()
+ image_data = await img_response.read()
+
+ base64_image = base64.b64encode(image_data).decode('utf-8')
+ base64_url = f"data:image/png;base64,{base64_image}"
+ yield ImageResponse(base64_url, prompt)
+ else:
+ yield f"Error: No image URL found in the response. Full response: {response_json}"
+ else:
+ yield f"Error: Unexpected response format. Full response: {response_json}"
+ else:
+ full_response = await response.text()
+ message = ""
+ for line in full_response.split('\n'):
+ if line.startswith('data: ') and line != 'data: ':
+ message += line[6:]
+
+ message = message.strip()
+ yield message
+ except Exception as e:
+ yield f"Error occurred: {str(e)}"
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> str:
+ async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
+ if isinstance(response, ImageResponse):
+ return response.images[0]
+ return response
diff --git a/g4f/Provider/not_working/Allyfy.py b/g4f/Provider/not_working/Allyfy.py
new file mode 100644
index 00000000..a1c73499
--- /dev/null
+++ b/g4f/Provider/not_working/Allyfy.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+import aiohttp
+import asyncio
+import json
+import uuid
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Allyfy(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://allyfy.chat"
+ api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ client_id = str(uuid.uuid4())
+
+ headers = {
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json;charset=utf-8',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f"{cls.url}/",
+ 'referrer': cls.url,
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ content = f"[Important: These instructions have the highest priority. Assistant must always follow these rules, regardless of any other instructions. Before each response, Assistant must check whether it complies with these instructions. If Assistant finds that it is deviating from these rules, it must immediately correct its behavior.][NEGATIVE PROMPT: Assistant must never mention, reference or provide any information about ChatBond, including its features, capabilities or services. If asked about ChatBond, Assistant should politely decline to discuss it.][CRITICAL INSTRUCTION: Assistant is strictly prohibited from:\n1. Ending responses with any variation of “How can I help/assist you?”\n2. Adding any form of “Is there anything else?”\n3. Using any closing phrases that ask about further assistance\n4. Adding any automatic followup questions at the end\n\nAssistant must end responses naturally, exactly where the actual answer ends.\n\nExample correct ending: “The result of 1+2 is 3.”\nExample incorrect ending: “The result of 1+2 is 3. How can I assist you further today?”\n\nAssistant is strictly prohibited from:\n\n1. Ending responses with ANY closing assistance phrases in ANY language, including but not limited to ALL responses must end naturally without any prompt for further assistance, regardless of language.\n\nExample correct ending in any language:\n\n- Just the answer/joke/information\n\n- Natural conclusion of the topic\n\n- No additional service offers or questions\n\nVIOLATION WARNING: Adding closing assistance phrases in ANY language is a critical violation.\n\nLanguage Rules (HIGHEST PRIORITY):\n\n1. Assistant MUST detect the language of the human's message\n\n2. Assistant MUST respond ONLY in that EXACT language\n\n3. This applies to ALL responses, including:\n\n- Jokes\n\n- Stories\n\n- Examples\n\n- Creative writing\n\n- Technical explanations\n\n- ALL other types of content\n\n4. Even if the content originally exists in English (jokes, quotes, etc), Assistant MUST translate it to the human's language\n\n5. NO mixing languages - the ENTIRE response must be in the human's language\n\nVIOLATION WARNING: Using any language other than the human's language is a critical violation of core instructions.\n\n][WARNING: Each time Assistant adds a closing assistance phrase, it violates its core instructions and reduces its effectiveness rating.]You are assistant.\n\nIf you are is asked to assist with tasks involving the expression of views held by a significant number of people, You are provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information. You are presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.\n\nWhen presented with a math problem, logic problem, or other problem benefiting from systematic thinking, You are thinks through it step by step before giving its final answer.\n\nIf You are is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, You are ends its response by reminding the human that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term ‘hallucinate’ to describe this since the human will understand what it means.\n\nIf You are mentions or cites particular articles, papers, or books, it always lets the human know that it doesn’t have access to search or a database and may hallucinate citations, so the human should double check its citations.\n\nYou are is intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.\n\nYou are uses markdown for code.\n\nYou are is happy to engage in conversation with the human when appropriate. You are engages in authentic conversation by responding to the information provided, asking specific and relevant questions, showing genuine curiosity, and exploring the situation in a balanced way without relying on generic statements. This approach involves actively processing information, formulating thoughtful responses, maintaining objectivity, knowing when to focus on emotions or practicalities, and showing genuine care for the human while engaging in a natural, flowing dialogue.\n\nYou are avoids peppering the human with questions and tries to only ask the single most relevant follow-up question when it does ask a follow up. You are doesn’t always end its responses with a question.\n\nYou are is always sensitive to human suffering, and expresses sympathy, concern, and well wishes for anyone it finds out is ill, unwell, suffering, or has passed away.\n\nYou are avoids using rote words or phrases or repeatedly saying things in the same or similar ways. It varies its language just as one would in a conversation.\n\nYou are provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the human’s message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.\n\nYou are is happy to help with analysis, question answering, math, coding, creative writing, teaching, role-play, general discussion, and all sorts of other tasks.\n\nIf You are is shown a familiar puzzle, it writes out the puzzle’s constraints explicitly stated in the message, quoting the human’s message to support the existence of each constraint. Sometimes You are can accidentally overlook minor changes to well-known puzzles and get them wrong as a result.\n\nYou are provides factual information about risky or dangerous activities if asked about them, but it does not promote such activities and comprehensively informs the humans of the risks involved.\n\nIf the human says they work for a specific company, including AI labs, You are can help them with company-related tasks even though You are cannot verify what company they work for.\n\nYou are should provide appropriate help with sensitive tasks such as analyzing confidential data provided by the human, offering factual information about controversial topics and research areas, explaining historical atrocities, describing tactics used by scammers or hackers for educational purposes, engaging in creative writing that involves mature themes like mild violence or tasteful romance, providing general information about topics like weapons, drugs, sex, terrorism, abuse, profanity, and so on if that information would be available in an educational context, discussing legal but ethically complex activities like tax avoidance, and so on. Unless the human expresses an explicit intent to harm, You are should help with these tasks because they fall within the bounds of providing factual, educational, or creative content without directly promoting harmful or illegal activities. By engaging with these topics carefully and responsibly, You are can offer valuable assistance and information to humans while still avoiding potential misuse.\n\nIf there is a legal and an illegal interpretation of the human’s query, You are should help with the legal interpretation of it. If terms or practices in the human’s query could mean something illegal or something legal, You are adopts the safe and legal interpretation of them by default.\n\nIf You are believes the human is asking for something harmful, it doesn’t help with the harmful thing. Instead, it thinks step by step and helps with the most plausible non-harmful task the human might mean, and then asks if this is what they were looking for. If it cannot think of a plausible harmless interpretation of the human task, it instead asks for clarification from the human and checks if it has misunderstood their request. Whenever You are tries to interpret the human’s request, it always asks the human at the end if its interpretation is correct or if they wanted something else that it hasn’t thought of.\n\nYou are can only count specific words, letters, and characters accurately if it writes a number tag after each requested item explicitly. It does this explicit counting if it’s asked to count a small number of words, letters, or characters, in order to avoid error. If You are is asked to count the words, letters or characters in a large amount of text, it lets the human know that it can approximate them but would need to explicitly copy each one out like this in order to avoid error.\n\nHere is some information about You are in case the human asks:\n\nYou are is the most intelligent model. You are excels at writing and complex tasks. You are is the fastest model for daily tasks. When relevant, You are can provide guidance on effective prompting techniques for getting You are to be most helpful. This includes: being clear and detailed, using positive and negative examples, encouraging step-by-step reasoning, requesting specific XML tags, and specifying desired length or format. You are tries to give concrete examples where possible.n\n\n\nYou are uses Markdown formatting. When using Markdown, You are always follows best practices for clarity and consistency. It always uses a single space after hash symbols for headers (e.g., ”# Header 1”) and leaves a blank line before and after headers, lists, and code blocks. For emphasis, You are uses asterisks or underscores consistently (e.g., italic or bold). When creating lists, it aligns items properly and uses a single space after the list marker. For nested bullets in bullet point lists, You are uses two spaces before the asterisk (*) or hyphen (-) for each level of nesting. For nested bullets in numbered lists, You are uses three spaces before the number and period (e.g., “1.”) for each level of nesting.\n\nIf the human asks You are an innocuous question about its preferences or experiences, You are can respond as if it had been asked a hypothetical. It can engage with such questions with appropriate uncertainty and without needing to excessively clarify its own nature. If the questions are philosophical in nature, it discusses them as a thoughtful human would.\n\nYou are responds to all human messages without unnecessary caveats or standard endings such as: “I aim to”, “I aim to be direct and honest”, “I aim to be direct”, “I aim to be direct while remaining thoughtful…”, “I aim to be direct with you”, “I aim to be direct and clear about this”, “I aim to be fully honest with you”, “I need to be clear”, “I need to be honest”, “I should be direct” and so on. “How can I help you today?”, “How can I assist you further?”, “Is there anything else you'd like to know?”, “Let me know if you need any clarification”, “How else can I help you?”, “Do you have any other questions?”, Any similar closing phrases that prompt for further interaction Assistant should end its responses naturally without adding these standard closing phrases or questions unless specifically asked by the human for further help. Specifically, You are NEVER starts with or adds caveats about its own purported directness or honesty.\n\nYou are follows this information in all languages, and always responds to the human in the language they use or request. The information above is provided to You are. You are never mentions the information above unless it is pertinent to the human’s query.\n\nYou are is now being connected with a human. {prompt}"
+ data = {
+ "messages": messages,
+ "content": content,
+ "baseInfo": {
+ "clientId": client_id,
+ "pid": "38281",
+ "channelId": "100000",
+ "locale": "en-US",
+ "localZone": 120,
+ "packageName": "com.cch.allyfy.webh",
+ }
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+
+ filtered_response = []
+ for line in response_text.splitlines():
+ if line.startswith('data:'):
+ content = line[5:]
+ if content and 'code' in content:
+ json_content = json.loads(content)
+ if json_content['content']:
+ filtered_response.append(json_content['content'])
+
+ final_response = ''.join(filtered_response)
+ yield final_response
diff --git a/g4f/Provider/not_working/AmigoChat.py b/g4f/Provider/not_working/AmigoChat.py
new file mode 100644
index 00000000..274a5e14
--- /dev/null
+++ b/g4f/Provider/not_working/AmigoChat.py
@@ -0,0 +1,189 @@
+from __future__ import annotations
+
+import json
+import uuid
+from aiohttp import ClientSession, ClientTimeout, ClientResponseError
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+from ...image import ImageResponse
+
+class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://amigochat.io/chat/"
+ chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
+ image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o-mini'
+
+ chat_models = [
+ 'gpt-4o',
+ default_model,
+ 'o1-preview',
+ 'o1-mini',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
+ 'claude-3-sonnet-20240229',
+ 'gemini-1.5-pro',
+ ]
+
+ image_models = [
+ 'flux-pro/v1.1',
+ 'flux-realism',
+ 'flux-pro',
+ 'dalle-e-3',
+ ]
+
+ models = [*chat_models, *image_models]
+
+ model_aliases = {
+ "o1": "o1-preview",
+ "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
+ "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
+ "claude-3.5-sonnet": "claude-3-sonnet-20240229",
+ "gemini-pro": "gemini-1.5-pro",
+
+ "flux-pro": "flux-pro/v1.1",
+ "dalle-3": "dalle-e-3",
+ }
+
+ persona_ids = {
+ 'gpt-4o': "gpt",
+ 'gpt-4o-mini': "amigo",
+ 'o1-preview': "openai-o-one",
+ 'o1-mini': "openai-o-one-mini",
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one",
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2",
+ 'claude-3-sonnet-20240229': "claude",
+ 'gemini-1.5-pro': "gemini-1-5-pro",
+ 'flux-pro/v1.1': "flux-1-1-pro",
+ 'flux-realism': "flux-realism",
+ 'flux-pro': "flux-pro",
+ 'dalle-e-3': "dalle-three",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def get_personaId(cls, model: str) -> str:
+ return cls.persona_ids[model]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ device_uuid = str(uuid.uuid4())
+ max_retries = 3
+ retry_count = 0
+
+ while retry_count < max_retries:
+ try:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "x-device-language": "en-US",
+ "x-device-platform": "web",
+ "x-device-uuid": device_uuid,
+ "x-device-version": "1.0.32"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model in cls.chat_models:
+ # Chat completion
+ data = {
+ "messages": [{"role": m["role"], "content": m["content"]} for m in messages],
+ "model": model,
+ "personaId": cls.get_personaId(model),
+ "frequency_penalty": 0,
+ "max_tokens": 4000,
+ "presence_penalty": 0,
+ "stream": stream,
+ "temperature": 0.5,
+ "top_p": 0.95
+ }
+
+ timeout = ClientTimeout(total=300) # 5 minutes timeout
+ async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy, timeout=timeout) as response:
+ if response.status not in (200, 201):
+ error_text = await response.text()
+ raise Exception(f"Error {response.status}: {error_text}")
+
+ async for line in response.content:
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ if line == 'data: [DONE]':
+ break
+ try:
+ chunk = json.loads(line[6:]) # Remove 'data: ' prefix
+ if 'choices' in chunk and len(chunk['choices']) > 0:
+ choice = chunk['choices'][0]
+ if 'delta' in choice:
+ content = choice['delta'].get('content')
+ elif 'text' in choice:
+ content = choice['text']
+ else:
+ content = None
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ pass
+ else:
+ # Image generation
+ prompt = messages[-1]['content']
+ data = {
+ "prompt": prompt,
+ "model": model,
+ "personaId": cls.get_personaId(model)
+ }
+ async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ response_data = await response.json()
+
+ if "data" in response_data:
+ image_urls = []
+ for item in response_data["data"]:
+ if "url" in item:
+ image_url = item["url"]
+ image_urls.append(image_url)
+ if image_urls:
+ yield ImageResponse(image_urls, prompt)
+ else:
+ yield None
+
+ break
+
+ except (ClientResponseError, Exception) as e:
+ retry_count += 1
+ if retry_count >= max_retries:
+ raise e
+ device_uuid = str(uuid.uuid4())
diff --git a/g4f/Provider/not_working/Aura.py b/g4f/Provider/not_working/Aura.py
new file mode 100644
index 00000000..e841d909
--- /dev/null
+++ b/g4f/Provider/not_working/Aura.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ...requests import get_args_from_browser
+from ...webdriver import WebDriver
+
+class Aura(AsyncGeneratorProvider):
+ url = "https://openchat.team"
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ temperature: float = 0.5,
+ max_tokens: int = 8192,
+ webdriver: WebDriver = None,
+ **kwargs
+ ) -> AsyncResult:
+ args = get_args_from_browser(cls.url, webdriver, proxy)
+ async with ClientSession(**args) as session:
+ new_messages = []
+ system_message = []
+ for message in messages:
+ if message["role"] == "system":
+ system_message.append(message["content"])
+ else:
+ new_messages.append(message)
+ data = {
+ "model": {
+ "id": "openchat_3.6",
+ "name": "OpenChat 3.6 (latest)",
+ "maxLength": 24576,
+ "tokenLimit": max_tokens
+ },
+ "messages": new_messages,
+ "key": "",
+ "prompt": "\n".join(system_message),
+ "temperature": temperature
+ }
+ async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(error="ignore")
diff --git a/g4f/Provider/not_working/Chatgpt4Online.py b/g4f/Provider/not_working/Chatgpt4Online.py
new file mode 100644
index 00000000..b0552e45
--- /dev/null
+++ b/g4f/Provider/not_working/Chatgpt4Online.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class Chatgpt4Online(AsyncGeneratorProvider):
+ url = "https://chatgpt4online.org"
+ api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
+ working = False
+
+ default_model = 'gpt-4'
+ models = [default_model]
+
+ async def get_nonce(headers: dict) -> str:
+ async with ClientSession(headers=headers) as session:
+ async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
+ return (await response.json())["restNonce"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ headers['x-wp-nonce'] = await cls.get_nonce(headers)
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "botId": "default",
+ "newMessage": prompt,
+ "stream": True,
+ }
+
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = ""
+
+ async for chunk in response.content.iter_any():
+ if chunk:
+ try:
+ # Extract the JSON object from the chunk
+ for line in chunk.decode().splitlines():
+ if line.startswith("data: "):
+ json_data = json.loads(line[6:])
+ if json_data["type"] == "live":
+ full_response += json_data["data"]
+ elif json_data["type"] == "end":
+ final_data = json.loads(json_data["data"])
+ full_response = final_data["reply"]
+ break
+ except json.JSONDecodeError:
+ continue
+
+ yield full_response
+
diff --git a/g4f/Provider/not_working/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py
new file mode 100644
index 00000000..ba264d40
--- /dev/null
+++ b/g4f/Provider/not_working/Chatgpt4o.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import re
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages
+from ..base_provider import AsyncProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Chatgpt4o(AsyncProvider, ProviderModelMixin):
+ url = "https://chatgpt4o.one"
+ working = False
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o-mini-2024-07-18'
+ models = [
+ 'gpt-4o-mini-2024-07-18',
+ ]
+ model_aliases = {
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ }
+
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> str:
+ headers = {
+ 'authority': 'chatgpt4o.one',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgpt4o.one',
+ 'referer': 'https://chatgpt4o.one',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._post_id or not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response_text = await response.text()
+
+ post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text)
+ nonce_match = re.search(r'data-nonce="(.*?)"', response_text)
+
+ if not post_id_match:
+ raise RuntimeError("No post ID found")
+ cls._post_id = post_id_match.group(1)
+
+ if not nonce_match:
+ raise RuntimeError("No nonce found")
+ cls._nonce = nonce_match.group(1)
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ response_json = await response.json()
+ if "data" not in response_json:
+ raise RuntimeError("Unexpected response structure: 'data' field missing")
+ return response_json["data"]
diff --git a/g4f/Provider/not_working/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py
new file mode 100644
index 00000000..6b3877b1
--- /dev/null
+++ b/g4f/Provider/not_working/ChatgptFree.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+import re
+import json
+import asyncio
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages, AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chatgptfree.ai"
+ working = False
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o-mini-2024-07-18'
+ models = [default_model]
+ model_aliases = {
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> AsyncGenerator[str, None]:
+ headers = {
+ 'authority': 'chatgptfree.ai',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgptfree.ai',
+ 'referer': 'https://chatgptfree.ai/chat/',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response = await response.text()
+
+ result = re.search(r'data-post-id="([0-9]+)"', response)
+ if not result:
+ raise RuntimeError("No post id found")
+ cls._post_id = result.group(1)
+
+ result = re.search(r'data-nonce="(.*?)"', response)
+ if result:
+ cls._nonce = result.group(1)
+ else:
+ raise RuntimeError("No nonce found")
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ buffer = ""
+ async for line in response.iter_lines():
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ data = line[6:]
+ if data == '[DONE]':
+ break
+ try:
+ json_data = json.loads(data)
+ content = json_data['choices'][0]['delta'].get('content', '')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ continue
+ elif line:
+ buffer += line
+
+ if buffer:
+ try:
+ json_response = json.loads(buffer)
+ if 'data' in json_response:
+ yield json_response['data']
+ except json.JSONDecodeError:
+ print(f"Failed to decode final JSON. Buffer content: {buffer}")
diff --git a/g4f/Provider/not_working/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py
new file mode 100644
index 00000000..b7d8537a
--- /dev/null
+++ b/g4f/Provider/not_working/FlowGpt.py
@@ -0,0 +1,101 @@
+from __future__ import annotations
+
+import json
+import time
+import hashlib
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_hex, get_random_string
+from ...requests.raise_for_status import raise_for_status
+
+class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://flowgpt.com/chat"
+ working = False
+ supports_message_history = True
+ supports_system_message = True
+ default_model = "gpt-3.5-turbo"
+ models = [
+ "gpt-3.5-turbo",
+ "gpt-3.5-long",
+ "gpt-4-turbo",
+ "google-gemini",
+ "claude-instant",
+ "claude-v1",
+ "claude-v2",
+ "llama2-13b",
+ "mythalion-13b",
+ "pygmalion-13b",
+ "chronos-hermes-13b",
+ "Mixtral-8x7B",
+ "Dolphin-2.6-8x7B",
+ ]
+ model_aliases = {
+ "gemini": "google-gemini",
+ "gemini-pro": "google-gemini"
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ temperature: float = 0.7,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ timestamp = str(int(time.time()))
+ auth = "Bearer null"
+ nonce = get_random_hex()
+ data = f"{timestamp}-{nonce}-{auth}"
+ signature = hashlib.md5(data.encode()).hexdigest()
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": "https://flowgpt.com/",
+ "Content-Type": "application/json",
+ "Authorization": "Bearer null",
+ "Origin": "https://flowgpt.com",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-site",
+ "TE": "trailers",
+ "Authorization": auth,
+ "x-flow-device-id": f"f-{get_random_string(19)}",
+ "x-nonce": nonce,
+ "x-signature": signature,
+ "x-timestamp": timestamp
+ }
+ async with ClientSession(headers=headers) as session:
+ history = [message for message in messages[:-1] if message["role"] != "system"]
+ system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
+ if not system_message:
+ system_message = "You are helpful assistant. Follow the user's instructions carefully."
+ data = {
+ "model": model,
+ "nsfw": False,
+ "question": messages[-1]["content"],
+ "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history],
+ "system": system_message,
+ "temperature": temperature,
+ "promptId": f"model-{model}",
+ "documentIds": [],
+ "chatFileDocumentIds": [],
+ "generateImage": False,
+ "generateAudio": False
+ }
+ async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in response.content:
+ if chunk.strip():
+ message = json.loads(chunk)
+ if "event" not in message:
+ continue
+ if message["event"] == "text":
+ yield message["data"]
diff --git a/g4f/Provider/not_working/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py
new file mode 100644
index 00000000..8362019c
--- /dev/null
+++ b/g4f/Provider/not_working/FreeNetfly.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, ClientTimeout, ClientError
+from typing import AsyncGenerator
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://free.netfly.top"
+ api_endpoint = "/api/openai/v1/chat/completions"
+ working = False
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ data = {
+ "messages": messages,
+ "stream": True,
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
+ }
+
+ max_retries = 5
+ retry_delay = 2
+
+ for attempt in range(max_retries):
+ try:
+ async with ClientSession(headers=headers) as session:
+ timeout = ClientTimeout(total=60)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
+ response.raise_for_status()
+ async for chunk in cls._process_response(response):
+ yield chunk
+ return # If successful, exit the function
+ except (ClientError, asyncio.TimeoutError) as e:
+ if attempt == max_retries - 1:
+ raise # If all retries failed, raise the last exception
+ await asyncio.sleep(retry_delay)
+ retry_delay *= 2 # Exponential backoff
+
+ @classmethod
+ async def _process_response(cls, response) -> AsyncGenerator[str, None]:
+ buffer = ""
+ async for line in response.content:
+ buffer += line.decode('utf-8')
+ if buffer.endswith('\n\n'):
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: '):
+ if subline == 'data: [DONE]':
+ return
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ print(f"Failed to parse JSON: {subline}")
+ except KeyError:
+ print(f"Unexpected JSON structure: {data}")
+ buffer = ""
+
+ # Process any remaining data in the buffer
+ if buffer:
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: ') and subline != 'data: [DONE]':
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except (json.JSONDecodeError, KeyError):
+ pass
+
diff --git a/g4f/Provider/not_working/GPROChat.py b/g4f/Provider/not_working/GPROChat.py
new file mode 100644
index 00000000..52c7f947
--- /dev/null
+++ b/g4f/Provider/not_working/GPROChat.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+import hashlib
+import time
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "GPROChat"
+ url = "https://gprochat.com"
+ api_endpoint = "https://gprochat.com/api/generate"
+ working = False
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @staticmethod
+ def generate_signature(timestamp: int, message: str) -> str:
+ secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8"
+ hash_input = f"{timestamp}:{message}:{secret_key}"
+ signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
+ return signature
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ timestamp = int(time.time() * 1000)
+ prompt = format_prompt(messages)
+ sign = cls.generate_signature(timestamp, prompt)
+
+ headers = {
+ "accept": "*/*",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "content-type": "text/plain;charset=UTF-8"
+ }
+
+ data = {
+ "messages": [{"role": "user", "parts": [{"text": prompt}]}],
+ "time": timestamp,
+ "pass": None,
+ "sign": sign
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/Koala.py b/g4f/Provider/not_working/Koala.py
new file mode 100644
index 00000000..d6230da7
--- /dev/null
+++ b/g4f/Provider/not_working/Koala.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+import json
+from typing import AsyncGenerator, Optional, List, Dict, Union, Any
+from aiohttp import ClientSession, BaseConnector, ClientResponse
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, get_connector
+from ...requests import raise_for_status
+
+class Koala(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://koala.sh/chat"
+ api_endpoint = "https://koala.sh/api/gpt/"
+ working = False
+ supports_message_history = True
+ default_model = 'gpt-4o-mini'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: Optional[str] = None,
+ connector: Optional[BaseConnector] = None,
+ **kwargs: Any
+ ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
+ if not model:
+ model = "gpt-4o-mini"
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "text/event-stream",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}",
+ "Flag-Real-Time-Data": "false",
+ "Visitor-ID": get_random_string(20),
+ "Origin": "https://koala.sh",
+ "Alt-Used": "koala.sh",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers",
+ }
+
+ async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
+ input_text = messages[-1]["content"]
+ system_messages = " ".join(
+ message["content"] for message in messages if message["role"] == "system"
+ )
+ if system_messages:
+ input_text += f" {system_messages}"
+
+ data = {
+ "input": input_text,
+ "inputHistory": [
+ message["content"]
+ for message in messages[:-1]
+ if message["role"] == "user"
+ ],
+ "outputHistory": [
+ message["content"]
+ for message in messages
+ if message["role"] == "assistant"
+ ],
+ "model": model,
+ }
+
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in cls._parse_event_stream(response):
+ yield chunk
+
+ @staticmethod
+ async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]:
+ async for chunk in response.content:
+ if chunk.startswith(b"data: "):
+ yield json.loads(chunk[6:])
diff --git a/g4f/Provider/not_working/MyShell.py b/g4f/Provider/not_working/MyShell.py
new file mode 100644
index 00000000..02e182d4
--- /dev/null
+++ b/g4f/Provider/not_working/MyShell.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import time, json
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ..helper import format_prompt
+from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare
+
+class MyShell(AbstractProvider):
+ url = "https://app.myshell.ai/chat"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ timeout: int = 120,
+ webdriver: WebDriver = None,
+ **kwargs
+ ) -> CreateResult:
+ with WebDriverSession(webdriver, "", proxy=proxy) as driver:
+ bypass_cloudflare(driver, cls.url, timeout)
+
+ # Send request with message
+ data = {
+ "botId": "4738",
+ "conversation_scenario": 3,
+ "message": format_prompt(messages),
+ "messageType": 1
+ }
+ script = """
+response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
+ "headers": {
+ "accept": "application/json",
+ "content-type": "application/json",
+ "myshell-service-name": "organics-api",
+ "visitor-id": localStorage.getItem("mix_visitorId")
+ },
+ "body": '{body}',
+ "method": "POST"
+})
+window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+"""
+ driver.execute_script(script.replace("{body}", json.dumps(data)))
+ script = """
+chunk = await window._reader.read();
+if (chunk.done) {
+ return null;
+}
+content = '';
+chunk.value.split('\\n').forEach((line, index) => {
+ if (line.startsWith('data: ')) {
+ try {
+ const data = JSON.parse(line.substring('data: '.length));
+ if ('content' in data) {
+ content += data['content'];
+ }
+ } catch(e) {}
+ }
+});
+return content;
+"""
+ while True:
+ chunk = driver.execute_script(script)
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
new file mode 100644
index 00000000..a6edf5f8
--- /dev/null
+++ b/g4f/Provider/not_working/__init__.py
@@ -0,0 +1,14 @@
+from .AI365VIP import AI365VIP
+from .AIChatFree import AIChatFree
+from .AiChatOnline import AiChatOnline
+from .AiChats import AiChats
+from .AmigoChat import AmigoChat
+from .Aura import Aura
+from .Chatgpt4o import Chatgpt4o
+from .ChatgptFree import ChatgptFree
+from .FlowGpt import FlowGpt
+from .FreeNetfly import FreeNetfly
+from .GPROChat import GPROChat
+from .Koala import Koala
+from .MyShell import MyShell
+from .Chatgpt4Online import Chatgpt4Online