From f8411aa1e15e891df3f0538e15de1a6084469acf Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Sun, 1 Oct 2023 19:12:33 +0200 Subject: =?UTF-8?q?aivvm's=20no=20life=20creator=20keeps=20patching=20it,?= =?UTF-8?q?=20but=20I'm=20just=20better=20=F0=9F=98=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- g4f/Provider/Aivvm.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 1ba6d6f1..4f1651da 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -37,16 +37,17 @@ class Aivvm(BaseProvider): headers = { "accept" : "*/*", - "accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7", + "accept-language" : "en-US,en;q=0.9", "content-type" : "application/json", - "sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"", + "sec-ch-ua" : '"Brave";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', "sec-ch-ua-mobile" : "?0", - "sec-ch-ua-platform": "\"Bandóz\"", + "sec-ch-ua-platform": "\"Windows\"", "sec-fetch-dest" : "empty", "sec-fetch-mode" : "cors", "sec-fetch-site" : "same-origin", "Referer" : "https://chat.aivvm.com/", "Referrer-Policy" : "same-origin", + "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36" } json_data = { -- cgit v1.2.3 From 58c45522ea4f94f90c3ebbf350eba0b5715848ae Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Mon, 9 Oct 2023 20:53:31 +0200 Subject: add cool testing for gpt-3.5 and and gpt-4 --- etc/testing/test_all.py | 67 +++++++++++++++++++++++++++++++++++++ etc/testing/test_chat_completion.py | 8 ++--- g4f/Provider/Aivvm.py | 42 ++++++++++++----------- g4f/Provider/DeepAi.py | 5 ++- g4f/models.py | 20 +++++++---- 5 files changed, 112 insertions(+), 30 deletions(-) create mode 100644 etc/testing/test_all.py diff --git a/etc/testing/test_all.py b/etc/testing/test_all.py new file mode 100644 index 00000000..73134e3f --- /dev/null +++ b/etc/testing/test_all.py @@ -0,0 +1,67 @@ +import asyncio +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).parent.parent.parent)) + +import g4f + + +async def test(model: g4f.Model): + try: + try: + for response in g4f.ChatCompletion.create( + model=model, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, + stream=True + ): + print(response, end="") + + print() + except: + for response in await g4f.ChatCompletion.create_async( + model=model, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, + stream=True + ): + print(response, end="") + + print() + + return True + except Exception as e: + print(model.name, "not working:", e) + print(e.__traceback__.tb_next) + return False + + +async def start_test(): + models_to_test = [ + # GPT-3.5 4K Context + g4f.models.gpt_35_turbo, + g4f.models.gpt_35_turbo_0613, + + # GPT-3.5 16K Context + g4f.models.gpt_35_turbo_16k, + g4f.models.gpt_35_turbo_16k_0613, + + # GPT-4 8K Context + g4f.models.gpt_4, + g4f.models.gpt_4_0613, + + # GPT-4 32K Context + g4f.models.gpt_4_32k, + g4f.models.gpt_4_32k_0613, + ] + + models_working = [] + + for model in models_to_test: + if await test(model): + models_working.append(model.name) + + print("working models:", models_working) + + +asyncio.run(start_test()) diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py index ee523b86..7058ab4c 100644 --- a/etc/testing/test_chat_completion.py +++ b/etc/testing/test_chat_completion.py @@ -7,10 +7,10 @@ import g4f, asyncio print("create:", end=" ", flush=True) for response in g4f.ChatCompletion.create( - model=g4f.models.default, - provider=g4f.Provider.GptForLove, - messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}], - temperature=0.0, + model=g4f.models.gpt_4_32k_0613, + provider=g4f.Provider.Aivvm, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, stream=True ): print(response, end="", flush=True) diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 1ba6d6f1..ac15ac16 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -3,6 +3,7 @@ import requests from .base_provider import BaseProvider from ..typing import CreateResult +from json import dumps # to recreate this easily, send a post request to https://chat.aivvm.com/api/models models = { @@ -35,20 +36,6 @@ class Aivvm(BaseProvider): elif model not in models: raise ValueError(f"Model is not supported: {model}") - headers = { - "accept" : "*/*", - "accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7", - "content-type" : "application/json", - "sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"", - "sec-ch-ua-mobile" : "?0", - "sec-ch-ua-platform": "\"Bandóz\"", - "sec-fetch-dest" : "empty", - "sec-fetch-mode" : "cors", - "sec-fetch-site" : "same-origin", - "Referer" : "https://chat.aivvm.com/", - "Referrer-Policy" : "same-origin", - } - json_data = { "model" : models[model], "messages" : messages, @@ -57,12 +44,29 @@ class Aivvm(BaseProvider): "temperature" : kwargs.get("temperature", 0.7) } - response = requests.post( - "https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) + headers = { + "accept" : "text/event-stream", + "accept-language" : "en-US,en;q=0.9", + "content-type" : "application/json", + "content-length" : str(len(dumps(json_data))), + "sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"", + "sec-ch-ua-mobile" : "?0", + "sec-ch-ua-platform": "\"Windows\"", + "sec-fetch-dest" : "empty", + "sec-fetch-mode" : "cors", + "sec-fetch-site" : "same-origin", + "sec-gpc" : "1", + "referrer" : "https://chat.aivvm.com/" + } + + response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) response.raise_for_status() - for chunk in response.iter_content(chunk_size=None): - yield chunk.decode('utf-8') + for chunk in response.iter_content(): + try: + yield chunk.decode("utf-8") + except UnicodeDecodeError: + yield chunk.decode("unicode-escape") @classmethod @property @@ -74,4 +78,4 @@ class Aivvm(BaseProvider): ('temperature', 'float'), ] param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' \ No newline at end of file + return f'g4f.provider.{cls.__name__} supports: ({param})' diff --git a/g4f/Provider/DeepAi.py b/g4f/Provider/DeepAi.py index bac3e3fe..9a4f922c 100644 --- a/g4f/Provider/DeepAi.py +++ b/g4f/Provider/DeepAi.py @@ -65,7 +65,10 @@ f = function () { response.raise_for_status() async for stream in response.content.iter_any(): if stream: - yield stream.decode() + try: + yield stream.decode("utf-8") + except UnicodeDecodeError: + yield stream.decode("unicode-escape") def get_api_key(user_agent: str): diff --git a/g4f/models.py b/g4f/models.py index b4247703..aa2b3bd6 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -23,6 +23,7 @@ from .Provider import ( GptGod, AiAsk, GptGo, + Aivvm, Ylokh, Bard, Aibn, @@ -72,7 +73,9 @@ gpt_35_turbo = Model( gpt_4 = Model( name = 'gpt-4', base_provider = 'openai', - best_provider = Bing + best_provider = RetryProvider([ + Aivvm, Bing + ]) ) # Bard @@ -165,26 +168,31 @@ gpt_35_turbo_16k = Model( gpt_35_turbo_16k_0613 = Model( name = 'gpt-3.5-turbo-16k-0613', - base_provider = 'openai') + base_provider = 'openai', + best_provider = Aivvm) gpt_35_turbo_0613 = Model( name = 'gpt-3.5-turbo-0613', - base_provider = 'openai' + base_provider = 'openai', + best_provider = Aivvm ) gpt_4_0613 = Model( name = 'gpt-4-0613', - base_provider = 'openai' + base_provider = 'openai', + best_provider = Aivvm ) gpt_4_32k = Model( name = 'gpt-4-32k', - base_provider = 'openai' + base_provider = 'openai', + best_provider = Aivvm ) gpt_4_32k_0613 = Model( name = 'gpt-4-32k-0613', - base_provider = 'openai' + base_provider = 'openai', + best_provider = Aivvm ) text_ada_001 = Model( -- cgit v1.2.3 From 417ce27422f19cb43ddec8cf2af43bdbb1b795e6 Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Tue, 10 Oct 2023 09:13:18 +0200 Subject: Update Aivvm.py --- g4f/Provider/Aivvm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 83495a22..3e4bbaeb 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -45,12 +45,12 @@ class Aivvm(AsyncGeneratorProvider): "temperature" : kwargs.get("temperature", 0.7) } headers = { - "Accept": "*/*", + "Accept": "text/event-stream", "Origin": cls.url, "Referer": f"{cls.url}/", } async with StreamSession( - impersonate="chrome107", + impersonate="chrome117", headers=headers, proxies={"https": proxy}, timeout=timeout @@ -73,4 +73,4 @@ class Aivvm(AsyncGeneratorProvider): ('temperature', 'float'), ] param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' \ No newline at end of file + return f'g4f.provider.{cls.__name__} supports: ({param})' -- cgit v1.2.3 From 3de672d0239e48375d029db2b224eff4515fc097 Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:22:13 +0200 Subject: Aivvm is the best provider no cap --- g4f/Provider/Aivvm.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index ac15ac16..05f12320 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -44,11 +44,13 @@ class Aivvm(BaseProvider): "temperature" : kwargs.get("temperature", 0.7) } + data = dumps(json_data) + headers = { "accept" : "text/event-stream", "accept-language" : "en-US,en;q=0.9", "content-type" : "application/json", - "content-length" : str(len(dumps(json_data))), + "content-length" : str(len(data)), "sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"", "sec-ch-ua-mobile" : "?0", "sec-ch-ua-platform": "\"Windows\"", @@ -56,10 +58,11 @@ class Aivvm(BaseProvider): "sec-fetch-mode" : "cors", "sec-fetch-site" : "same-origin", "sec-gpc" : "1", - "referrer" : "https://chat.aivvm.com/" + "referrer" : "https://chat.aivvm.com/", + "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36" } - response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) + response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True) response.raise_for_status() for chunk in response.iter_content(): -- cgit v1.2.3 From d719860e2af8a825a9e2eaf6dc97c375ff535cbb Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:47:41 +0200 Subject: make internet searching better --- etc/testing/test_gui.py | 6 ++++++ g4f/gui/server/internet.py | 48 ++++++++++++++++++++++++++++++++++++---------- g4f/gui/server/provider.py | 4 +++- requirements.txt | 3 +-- 4 files changed, 48 insertions(+), 13 deletions(-) create mode 100644 etc/testing/test_gui.py diff --git a/etc/testing/test_gui.py b/etc/testing/test_gui.py new file mode 100644 index 00000000..cc3ae379 --- /dev/null +++ b/etc/testing/test_gui.py @@ -0,0 +1,6 @@ +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).parent.parent.parent)) + +from g4f.gui import run_gui +run_gui() diff --git a/g4f/gui/server/internet.py b/g4f/gui/server/internet.py index 0d9636a3..75edb9cb 100644 --- a/g4f/gui/server/internet.py +++ b/g4f/gui/server/internet.py @@ -1,28 +1,56 @@ -from requests import get from datetime import datetime +from duckduckgo_search import DDGS + +ddgs = DDGS(timeout=20) + def search(internet_access, prompt): print(prompt) try: - if internet_access == False: + if not internet_access: return [] - search = get('https://ddg-api.herokuapp.com/search', params={ - 'query': prompt['content'], - 'limit': 3 - }) + results = duckduckgo_search(q=prompt) + + if not search: + return [] blob = '' - for index, result in enumerate(search.json()): - blob += f'[{index}] "{result["snippet"]}"\nURL:{result["link"]}\n\n' + for index, result in enumerate(results): + blob += f'[{index}] "{result["body"]}"\nURL:{result["href"]}\n\n' date = datetime.now().strftime('%d/%m/%y') - blob += f'current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.' + blob += f'Current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.' return [{'role': 'user', 'content': blob}] except Exception as e: - return [] \ No newline at end of file + print("Couldn't search DuckDuckGo:", e) + print(e.__traceback__.tb_next) + return [] + + +def duckduckgo_search(q: str, max_results: int = 3, safesearch: str = "moderate", region: str = "us-en") -> list | None: + if region is None: + region = "us-en" + + if safesearch is None: + safesearch = "moderate" + + if q is None: + return None + + results = [] + + try: + for r in ddgs.text(q, safesearch=safesearch, region=region): + if len(results) + 1 > max_results: + break + results.append(r) + except Exception as e: + print(e) + + return results diff --git a/g4f/gui/server/provider.py b/g4f/gui/server/provider.py index 286f881b..230b5f5f 100644 --- a/g4f/gui/server/provider.py +++ b/g4f/gui/server/provider.py @@ -1,6 +1,8 @@ import g4f +from g4f import BaseProvider -def get_provider(provider: str) -> g4f.Provider.BaseProvider: + +def get_provider(provider: str) -> BaseProvider | None: if isinstance(provider, str): print(provider) diff --git a/requirements.txt b/requirements.txt index 88084aad..75549d68 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,5 +10,4 @@ flask flask-cors typing-extensions PyExecJS -flask -flask-cors \ No newline at end of file +duckduckgo-search \ No newline at end of file -- cgit v1.2.3 From 4d4ad976ece414caae84c281794c03f226a0afed Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:49:25 +0200 Subject: remove unused import in Bing.py --- g4f/Provider/Bing.py | 1 - 1 file changed, 1 deletion(-) diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index f8c6a87a..f1b50f7c 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -1,7 +1,6 @@ from __future__ import annotations import random -import uuid import json import os import uuid -- cgit v1.2.3 From 2c105cb5951792dae57a673381b17570801b3bfb Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:53:50 +0200 Subject: fix provider thing in gui --- g4f/gui/server/backend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 8f4b529f..cf1f9428 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -40,7 +40,7 @@ class Backend_Api: def stream(): if provider: answer = g4f.ChatCompletion.create(model=model, - provider=provider, messages=messages, stream=True) + provider=g4f.Provider.ProviderUtils.convert.get(provider), messages=messages, stream=True) else: answer = g4f.ChatCompletion.create(model=model, messages=messages, stream=True) -- cgit v1.2.3 From 9239c5720002f31b67bdd1ee805e2cd9b528d085 Mon Sep 17 00:00:00 2001 From: Commenter123321 <36051603+Commenter123321@users.noreply.github.com> Date: Tue, 10 Oct 2023 15:11:17 +0200 Subject: fix provider thing in gui x2 --- g4f/gui/client/html/index.html | 6 +++++- g4f/gui/server/backend.py | 6 +++--- g4f/gui/server/provider.py | 7 +------ g4f/models.py | 21 ++++++++++++++------- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html index bb472706..c15584b8 100644 --- a/g4f/gui/client/html/index.html +++ b/g4f/gui/client/html/index.html @@ -118,9 +118,13 @@