summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.vscode/settings.json6
-rw-r--r--README.md7
-rw-r--r--g4f/Provider/Providers/H2o.py198
-rw-r--r--testing/binghuan/BingHuan.py49
-rw-r--r--testing/binghuan/README.md7
-rw-r--r--testing/binghuan/helpers/binghuan.py221
-rw-r--r--testing/binghuan/testing.py31
-rw-r--r--testing/wewordle/README.md1
-rw-r--r--testing/wewordle/Wewordle.py97
-rw-r--r--testing/wewordle/testing.py30
10 files changed, 542 insertions, 105 deletions
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 00000000..9ee86e71
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,6 @@
+{
+ "[python]": {
+ "editor.defaultFormatter": "ms-python.autopep8"
+ },
+ "python.formatting.provider": "none"
+} \ No newline at end of file
diff --git a/README.md b/README.md
index ffc13bff..2db1f023 100644
--- a/README.md
+++ b/README.md
@@ -241,6 +241,13 @@ for token in chat_completion:
<td><a href="https://github.com/mishalhossin/Discord-Chatbot-Gpt4Free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/mishalhossin/Discord-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/mishalhossin/Coding-Chatbot-Gpt4Free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/mishalhossin/Discord-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
</tr>
+ <tr>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free"><b>LangChain gpt4free</b></a></td>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
+ <td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
+ </tr>
</tbody>
</table>
diff --git a/g4f/Provider/Providers/H2o.py b/g4f/Provider/Providers/H2o.py
index eabf94e2..4400f3a9 100644
--- a/g4f/Provider/Providers/H2o.py
+++ b/g4f/Provider/Providers/H2o.py
@@ -1,106 +1,94 @@
-from requests import Session
-from uuid import uuid4
-from json import loads
-import os
-import json
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://gpt-gm.h2o.ai'
-model = ['falcon-40b', 'falcon-7b', 'llama-13b']
-supports_stream = True
-needs_auth = False
-
-models = {
- 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
- 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
-}
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n'
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
- conversation += 'assistant:'
-
- client = Session()
- client.headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'document',
- 'sec-fetch-mode': 'navigate',
- 'sec-fetch-site': 'same-origin',
- 'sec-fetch-user': '?1',
- 'upgrade-insecure-requests': '1',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- client.get('https://gpt-gm.h2o.ai/')
- response = client.post('https://gpt-gm.h2o.ai/settings', data={
- 'ethicsModalAccepted': 'true',
- 'shareConversationsWithModelAuthors': 'true',
- 'ethicsModalAcceptedAt': '',
- 'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'searchEnabled': 'true',
- })
-
- headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'model': models[model]
- }
-
- response = client.post('https://gpt-gm.h2o.ai/conversation',
- headers=headers, json=json_data)
- conversationId = response.json()['conversationId']
-
-
- completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = {
- 'inputs': conversation,
- 'parameters': {
- 'temperature': kwargs.get('temperature', 0.4),
- 'truncate': kwargs.get('truncate', 2048),
- 'max_new_tokens': kwargs.get('max_new_tokens', 1024),
- 'do_sample': kwargs.get('do_sample', True),
- 'repetition_penalty': kwargs.get('repetition_penalty', 1.2),
- 'return_full_text': kwargs.get('return_full_text', False)
- },
- 'stream': True,
- 'options': {
- 'id': kwargs.get('id', str(uuid4())),
- 'response_id': kwargs.get('response_id', str(uuid4())),
- 'is_retry': False,
- 'use_cache': False,
- 'web_search_id': ''
- }
- })
-
- for line in completion.iter_lines():
- if b'data' in line:
- line = loads(line.decode('utf-8').replace('data:', ''))
- token = line['token']['text']
-
- if token == '<|endoftext|>':
- break
- else:
- yield (token)
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+from requests import Session
+from uuid import uuid4
+from json import loads
+import os
+import json
+import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://gpt-gm.h2o.ai'
+model = ['falcon-40b', 'falcon-7b', 'llama-13b']
+supports_stream = True
+needs_auth = False
+
+models = {
+ 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
+ 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
+ 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
+}
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ conversation = ''
+ for message in messages:
+ conversation += '%s: %s\n' % (message['role'], message['content'])
+
+ conversation += 'assistant: '
+ session = requests.Session()
+
+ response = session.get("https://gpt-gm.h2o.ai/")
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
+ "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Upgrade-Insecure-Requests": "1",
+ "Sec-Fetch-Dest": "document",
+ "Sec-Fetch-Mode": "navigate",
+ "Sec-Fetch-Site": "same-origin",
+ "Sec-Fetch-User": "?1",
+ "Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
+ }
+ data = {
+ "ethicsModalAccepted": "true",
+ "shareConversationsWithModelAuthors": "true",
+ "ethicsModalAcceptedAt": "",
+ "activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
+ "searchEnabled": "true"
+ }
+ response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
+
+
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "*/*",
+ "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
+ "Content-Type": "application/json",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Referer": "https://gpt-gm.h2o.ai/"
+ }
+ data = {
+ "model": models[model]
+ }
+
+ conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
+ data = {
+ "inputs": conversation,
+ "parameters": {
+ "temperature": kwargs.get('temperature', 0.4),
+ "truncate": kwargs.get('truncate', 2048),
+ "max_new_tokens": kwargs.get('max_new_tokens', 1024),
+ "do_sample": kwargs.get('do_sample', True),
+ "repetition_penalty": kwargs.get('repetition_penalty', 1.2),
+ "return_full_text": kwargs.get('return_full_text', False)
+ },
+ "stream": True,
+ "options": {
+ "id": kwargs.get('id', str(uuid4())),
+ "response_id": kwargs.get('response_id', str(uuid4())),
+ "is_retry": False,
+ "use_cache": False,
+ "web_search_id": ""
+ }
+ }
+
+ response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
+ generated_text = response.text.replace("\n", "").split("data:")
+ generated_text = json.loads(generated_text[-1])
+
+ return generated_text["generated_text"]
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/testing/binghuan/BingHuan.py b/testing/binghuan/BingHuan.py
new file mode 100644
index 00000000..8c859c08
--- /dev/null
+++ b/testing/binghuan/BingHuan.py
@@ -0,0 +1,49 @@
+import os,sys
+import json
+import subprocess
+# from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://b.ai-huan.xyz'
+model = ['gpt-3.5-turbo', 'gpt-4']
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ path = os.path.dirname(os.path.realpath(__file__))
+ config = json.dumps({
+ 'messages': messages,
+ 'model': model}, separators=(',', ':'))
+ cmd = ['python', f'{path}/helpers/binghuan.py', config]
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ for line in iter(p.stdout.readline, b''):
+ yield line.decode('cp1252')
+
+
+
+# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+# '(%s)' % ', '.join(
+# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
+
+
+# Temporary For ChatCompletion Class
+class ChatCompletion:
+ @staticmethod
+ def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs):
+ kwargs['auth'] = auth
+
+ if provider and needs_auth and not auth:
+ print(
+ f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
+ sys.exit(1)
+
+ try:
+ return (_create_completion(model, messages, stream, **kwargs)
+ if stream else ''.join(_create_completion(model, messages, stream, **kwargs)))
+ except TypeError as e:
+ print(e)
+ arg: str = str(e).split("'")[1]
+ print(
+ f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr)
+ sys.exit(1) \ No newline at end of file
diff --git a/testing/binghuan/README.md b/testing/binghuan/README.md
new file mode 100644
index 00000000..642f1fee
--- /dev/null
+++ b/testing/binghuan/README.md
@@ -0,0 +1,7 @@
+https://github.com/xtekky/gpt4free/issues/40#issuecomment-1630946450
+flow chat process is realy like real Bing (create conversation,listern to websocket and more)
+so i just use code Bing Provider from https://gitler.moe/g4f/gpt4free/ version and replace API endpoint and some conversationstyles and work fine
+
+but bing dont realy support multi/continues conversation (using prompt template from original Provider : def convert(messages) : https://github.com/xtekky/gpt4free/blob/e594500c4e7a8443e9b3f4af755c72f42dae83f0/g4f/Provider/Providers/Bing.py#L322)
+
+also i have problem with emoji encoding idk how to fix that \ No newline at end of file
diff --git a/testing/binghuan/helpers/binghuan.py b/testing/binghuan/helpers/binghuan.py
new file mode 100644
index 00000000..203bbe45
--- /dev/null
+++ b/testing/binghuan/helpers/binghuan.py
@@ -0,0 +1,221 @@
+# Original Code From : https://gitler.moe/g4f/gpt4free
+# https://gitler.moe/g4f/gpt4free/src/branch/main/g4f/Provider/Providers/helpers/bing.py
+import sys
+import ssl
+import uuid
+import json
+import time
+import random
+import asyncio
+import certifi
+# import requests
+from curl_cffi import requests
+import websockets
+import browser_cookie3
+
+config = json.loads(sys.argv[1])
+
+ssl_context = ssl.create_default_context()
+ssl_context.load_verify_locations(certifi.where())
+
+
+
+conversationstyles = {
+ 'gpt-4': [ #'precise'
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave",
+ "clgalileo",
+ "gencontentv3"
+ ],
+ 'balanced': [
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "harmonyv3",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave"
+ ],
+ 'gpt-3.5-turbo': [ #'precise'
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3imaginative",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave",
+ "gencontentv3"
+ ]
+}
+
+def format(msg: dict) -> str:
+ return json.dumps(msg) + '\x1e'
+
+def get_token():
+ return
+
+ try:
+ cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
+ return cookies['_U']
+ except:
+ print('Error: could not find bing _U cookie in edge browser.')
+ exit(1)
+
+class AsyncCompletion:
+ async def create(
+ prompt : str = None,
+ optionSets : list = None,
+ token : str = None): # No auth required anymore
+
+ create = None
+ for _ in range(5):
+ try:
+ create = requests.get('https://b.ai-huan.xyz/turing/conversation/create',
+ headers = {
+ 'host': 'b.ai-huan.xyz',
+ 'accept-encoding': 'gzip, deflate, br',
+ 'connection': 'keep-alive',
+ 'authority': 'b.ai-huan.xyz',
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'max-age=0',
+ 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"110.0.1587.69"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
+ 'x-edge-shopping-flag': '1',
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+ }
+ )
+
+ conversationId = create.json()['conversationId']
+ clientId = create.json()['clientId']
+ conversationSignature = create.json()['conversationSignature']
+
+ except Exception as e:
+ time.sleep(0.5)
+ continue
+
+ if create == None: raise Exception('Failed to create conversation.')
+
+ wss: websockets.WebSocketClientProtocol or None = None
+
+ wss = await websockets.connect('wss://sydney.vcanbb.chat/sydney/ChatHub', max_size = None, ssl = ssl_context,
+ extra_headers = {
+ 'accept': 'application/json',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"109.0.1518.78"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': "",
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'x-ms-client-request-id': str(uuid.uuid4()),
+ 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
+ 'Referer': 'https://b.ai-huan.xyz/search?q=Bing+AI&showconv=1&FORM=hpcodx',
+ 'Referrer-Policy': 'origin-when-cross-origin',
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+ }
+ )
+
+ await wss.send(format({'protocol': 'json', 'version': 1}))
+ await wss.recv()
+
+ struct = {
+ 'arguments': [
+ {
+ 'source': 'cib',
+ 'optionsSets': optionSets,
+ 'isStartOfSession': True,
+ 'message': {
+ 'author': 'user',
+ 'inputMethod': 'Keyboard',
+ 'text': prompt,
+ 'messageType': 'Chat'
+ },
+ 'conversationSignature': conversationSignature,
+ 'participant': {
+ 'id': clientId
+ },
+ 'conversationId': conversationId
+ }
+ ],
+ 'invocationId': '0',
+ 'target': 'chat',
+ 'type': 4
+ }
+
+ await wss.send(format(struct))
+
+ base_string = ''
+
+ final = False
+ while not final:
+ objects = str(await wss.recv()).split('\x1e')
+ for obj in objects:
+ if obj is None or obj == '':
+ continue
+
+ response = json.loads(obj)
+ #print(response, flush=True, end='')
+ if response.get('type') == 1 and response['arguments'][0].get('messages',):
+ response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
+
+ yield (response_text.replace(base_string, ''))
+ base_string = response_text
+
+ elif response.get('type') == 2:
+ final = True
+
+ await wss.close()
+
+# i thing bing realy donset understand multi message (based on prompt template)
+def convert(messages):
+ context = ""
+ for message in messages:
+ context += "[%s](#message)\n%s\n\n" % (message['role'],
+ message['content'])
+ return context
+
+async def run(optionSets, messages):
+ prompt = messages[-1]['content']
+ if(len(messages) > 1):
+ prompt = convert(messages)
+ async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets):
+ try:
+ print(value, flush=True, end='')
+ except UnicodeEncodeError as e:
+ # emoji encoding problem
+ print(value.encode('utf-8'), flush=True, end='')
+
+optionSet = conversationstyles[config['model']]
+asyncio.run(run(optionSet, config['messages'])) \ No newline at end of file
diff --git a/testing/binghuan/testing.py b/testing/binghuan/testing.py
new file mode 100644
index 00000000..2db0b427
--- /dev/null
+++ b/testing/binghuan/testing.py
@@ -0,0 +1,31 @@
+from BingHuan import ChatCompletion
+
+# Test 1
+response = ChatCompletion.create(model="gpt-3.5-turbo",
+ provider="BingHuan",
+ stream=False,
+ messages=[{'role': 'user', 'content': 'who are you?'}])
+
+print(response)
+
+# Test 2
+# this prompt will return emoji in end of response
+response = ChatCompletion.create(model="gpt-3.5-turbo",
+ provider="BingHuan",
+ stream=False,
+ messages=[{'role': 'user', 'content': 'what you can do?'}])
+
+print(response)
+
+
+# Test 3
+response = ChatCompletion.create(model="gpt-4",
+ provider="BingHuan",
+ stream=False,
+ messages=[
+ {'role': 'user', 'content': 'now your name is Bob'},
+ {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'},
+ {'role': 'user', 'content': 'what your name again?'},
+ ])
+
+print(response) \ No newline at end of file
diff --git a/testing/wewordle/README.md b/testing/wewordle/README.md
new file mode 100644
index 00000000..ec2289c2
--- /dev/null
+++ b/testing/wewordle/README.md
@@ -0,0 +1 @@
+original from website https://chat-gpt.com/chat https://github.com/xtekky/gpt4free/issues/40#issuecomment-1629152431, i got api https://wewordle.org/gptapi/v1/web/turbo but it got limit so i try to try reverse they android app and i got api https://wewordle.org/gptapi/v1/android/turbo and just randomize user id to bypass limit \ No newline at end of file
diff --git a/testing/wewordle/Wewordle.py b/testing/wewordle/Wewordle.py
new file mode 100644
index 00000000..0d79c5c7
--- /dev/null
+++ b/testing/wewordle/Wewordle.py
@@ -0,0 +1,97 @@
+import os,sys
+import requests
+import json
+import random
+import time
+import string
+# from ...typing import sha256, Dict, get_type_hints
+
+url = "https://wewordle.org/gptapi/v1/android/turbo"
+model = ['gpt-3.5-turbo']
+supports_stream = False
+needs_auth = False
+
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ base = ''
+ for message in messages:
+ base += '%s: %s\n' % (message['role'], message['content'])
+ base += 'assistant:'
+ # randomize user id and app id
+ _user_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=16))
+ _app_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=31))
+ # make current date with format utc
+ _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
+ headers = {
+ 'accept': '*/*',
+ 'pragma': 'no-cache',
+ 'Content-Type': 'application/json',
+ 'Connection':'keep-alive'
+ # user agent android client
+ # 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
+
+ }
+ data = {
+ "user": _user_id,
+ "messages": [
+ {"role": "user", "content": base}
+ ],
+ "subscriber": {
+ "originalPurchaseDate": None,
+ "originalApplicationVersion": None,
+ "allPurchaseDatesMillis": {},
+ "entitlements": {
+ "active": {},
+ "all": {}
+ },
+ "allPurchaseDates": {},
+ "allExpirationDatesMillis": {},
+ "allExpirationDates": {},
+ "originalAppUserId": f"$RCAnonymousID:{_app_id}",
+ "latestExpirationDate": None,
+ "requestDate": _request_date,
+ "latestExpirationDateMillis": None,
+ "nonSubscriptionTransactions": [],
+ "originalPurchaseDateMillis": None,
+ "managementURL": None,
+ "allPurchasedProductIdentifiers": [],
+ "firstSeen": _request_date,
+ "activeSubscriptions": []
+ }
+ }
+ response = requests.post(url, headers=headers, data=json.dumps(data))
+ if response.status_code == 200:
+ _json = response.json()
+ if 'message' in _json:
+ yield _json['message']['content']
+ else:
+ print(f"Error Occurred::{response.status_code}")
+ return None
+
+# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+# '(%s)' % ', '.join(
+# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
+
+
+# Temporary For ChatCompletion Class
+class ChatCompletion:
+ @staticmethod
+ def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs):
+ kwargs['auth'] = auth
+
+ if provider and needs_auth and not auth:
+ print(
+ f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
+ sys.exit(1)
+
+ try:
+
+
+ return (_create_completion(model, messages, stream, **kwargs)
+ if stream else ''.join(_create_completion(model, messages, stream, **kwargs)))
+ except TypeError as e:
+ print(e)
+ arg: str = str(e).split("'")[1]
+ print(
+ f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr)
+ sys.exit(1)
diff --git a/testing/wewordle/testing.py b/testing/wewordle/testing.py
new file mode 100644
index 00000000..cebcaeed
--- /dev/null
+++ b/testing/wewordle/testing.py
@@ -0,0 +1,30 @@
+from Wewordle import ChatCompletion
+
+# Test 1
+response = ChatCompletion.create(model="gpt-3.5-turbo",
+ provider="Wewordle",
+ stream=False,
+ messages=[{'role': 'user', 'content': 'who are you?'}])
+
+print(response)
+
+# Test 2
+response = ChatCompletion.create(model="gpt-3.5-turbo",
+ provider="Wewordle",
+ stream=False,
+ messages=[{'role': 'user', 'content': 'what you can do?'}])
+
+print(response)
+
+
+# Test 3
+response = ChatCompletion.create(model="gpt-3.5-turbo",
+ provider="Wewordle",
+ stream=False,
+ messages=[
+ {'role': 'user', 'content': 'now your name is Bob'},
+ {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'},
+ {'role': 'user', 'content': 'what your name again?'},
+ ])
+
+print(response)