From c1adfbee8e15406dbdce75f87de47dc1c0dd17df Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 15 Oct 2023 19:10:25 +0200 Subject: Add Llama2 and NoowAi Provider --- README.md | 4 +- g4f/Provider/ChatForAi.py | 2 +- g4f/Provider/H2o.py | 109 ------------------------------------ g4f/Provider/Llama2.py | 76 +++++++++++++++++++++++++ g4f/Provider/NoowAi.py | 66 ++++++++++++++++++++++ g4f/Provider/__init__.py | 7 ++- g4f/Provider/deprecated/H2o.py | 107 +++++++++++++++++++++++++++++++++++ g4f/Provider/deprecated/__init__.py | 3 +- g4f/models.py | 4 +- 9 files changed, 263 insertions(+), 115 deletions(-) delete mode 100644 g4f/Provider/H2o.py create mode 100644 g4f/Provider/Llama2.py create mode 100644 g4f/Provider/NoowAi.py create mode 100644 g4f/Provider/deprecated/H2o.py diff --git a/README.md b/README.md index 9d0b150f..ac52abea 100644 --- a/README.md +++ b/README.md @@ -325,12 +325,12 @@ asyncio.run(run_all()) ##### Proxy Support: -All providers support specifying a proxy in the create function. +All providers support specifying a proxy in the create functions. ```py import g4f -response = await g4f.ChatCompletion.create( +response = g4f.ChatCompletion.create( model=g4f.models.default, messages=[{"role": "user", "content": "Hello"}], proxy="http://host:port", diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py index c93e76ee..718affeb 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/ChatForAi.py @@ -44,7 +44,7 @@ class ChatForAi(AsyncGeneratorProvider): **kwargs }, "botSettings": {}, - "prompt": prompt, + "prompt": prompt, "messages": messages, "timestamp": timestamp, "sign": generate_signature(timestamp, prompt, conversation_id) diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py deleted file mode 100644 index 9fac92a4..00000000 --- a/g4f/Provider/H2o.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import annotations - -import json -import uuid - -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class H2o(AsyncGeneratorProvider): - url = "https://gpt-gm.h2o.ai" - working = False - model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1" - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = model if model else cls.model - headers = {"Referer": cls.url + "/"} - - async with ClientSession( - headers=headers - ) as session: - data = { - "ethicsModalAccepted": "true", - "shareConversationsWithModelAuthors": "true", - "ethicsModalAcceptedAt": "", - "activeModel": model, - "searchEnabled": "true", - } - async with session.post( - f"{cls.url}/settings", - proxy=proxy, - data=data - ) as response: - response.raise_for_status() - - async with session.post( - f"{cls.url}/conversation", - proxy=proxy, - json={"model": model}, - ) as response: - response.raise_for_status() - conversationId = (await response.json())["conversationId"] - - data = { - "inputs": format_prompt(messages), - "parameters": { - "temperature": 0.4, - "truncate": 2048, - "max_new_tokens": 1024, - "do_sample": True, - "repetition_penalty": 1.2, - "return_full_text": False, - **kwargs - }, - "stream": True, - "options": { - "id": str(uuid.uuid4()), - "response_id": str(uuid.uuid4()), - "is_retry": False, - "use_cache": False, - "web_search_id": "", - }, - } - async with session.post( - f"{cls.url}/conversation/{conversationId}", - proxy=proxy, - json=data - ) as response: - start = "data:" - async for line in response.content: - line = line.decode("utf-8") - if line and line.startswith(start): - line = json.loads(line[len(start):-1]) - if not line["token"]["special"]: - yield line["token"]["text"] - - async with session.delete( - f"{cls.url}/conversation/{conversationId}", - proxy=proxy, - json=data - ) as response: - response.raise_for_status() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("truncate", "int"), - ("max_new_tokens", "int"), - ("do_sample", "bool"), - ("repetition_penalty", "float"), - ("return_full_text", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama2.py new file mode 100644 index 00000000..b59fde12 --- /dev/null +++ b/g4f/Provider/Llama2.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider + +models = { + "7B": {"name": "Llama 2 7B", "version": "d24902e3fa9b698cc208b5e63136c4e26e828659a9f09827ca6ec5bb83014381", "shortened":"7B"}, + "13B": {"name": "Llama 2 13B", "version": "9dff94b1bed5af738655d4a7cbcdcde2bd503aa85c94334fe1f42af7f3dd5ee3", "shortened":"13B"}, + "70B": {"name": "Llama 2 70B", "version": "2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf", "shortened":"70B"}, + "Llava": {"name": "Llava 13B", "version": "6bc1c7bb0d2a34e413301fee8f7cc728d2d4e75bfab186aa995f63292bda92fc", "shortened":"Llava"} +} + +class Llama2(AsyncGeneratorProvider): + url = "https://www.llama2.ai" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + if not model: + model = "70B" + if model not in models: + raise ValueError(f"Model are not supported: {model}") + version = models[model]["version"] + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/", + "Content-Type": "text/plain;charset=UTF-8", + "Origin": cls.url, + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Pragma": "no-cache", + "Cache-Control": "no-cache", + "TE": "trailers" + } + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + data = { + "prompt": prompt, + "version": version, + "systemPrompt": kwargs.get("system_message", "You are a helpful assistant."), + "temperature": kwargs.get("temperature", 0.75), + "topP": kwargs.get("top_p", 0.9), + "maxTokens": kwargs.get("max_tokens", 1024), + "image": None + } + started = False + async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + if not started: + chunk = chunk.lstrip() + started = True + yield chunk.decode() + +def format_prompt(messages: Messages): + messages = [ + f"[INST]{message['content']}[/INST]" + if message["role"] == "user" + else message["content"] + for message in messages + ] + return "\n".join(messages) \ No newline at end of file diff --git a/g4f/Provider/NoowAi.py b/g4f/Provider/NoowAi.py new file mode 100644 index 00000000..93748258 --- /dev/null +++ b/g4f/Provider/NoowAi.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +import random, string, json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider + + +class NoowAi(AsyncGeneratorProvider): + url = "https://noowai.com" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/", + "Content-Type": "application/json", + "Origin": cls.url, + "Alt-Used": "noowai.com", + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Pragma": "no-cache", + "Cache-Control": "no-cache", + "TE": "trailers" + } + async with ClientSession(headers=headers) as session: + data = { + "botId": "default", + "customId": "d49bc3670c3d858458576d75c8ea0f5d", + "session": "N/A", + "chatId": random_string(), + "contextId": 25, + "messages": messages, + "newMessage": messages[-1]["content"], + "stream": True + } + async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: + response.raise_for_status() + async for line in response.content: + if line.startswith(b"data: "): + try: + line = json.loads(line[6:]) + assert "type" in line + except: + raise RuntimeError(f"Broken line: {line.decode()}") + if line["type"] == "live": + yield line["data"] + elif line["type"] == "end": + break + +def random_string(length: int = 10): + return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 26f523c7..ae6ca996 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -23,9 +23,10 @@ from .GptChatly import GptChatly from .GptForLove import GptForLove from .GptGo import GptGo from .GptGod import GptGod -from .H2o import H2o from .Liaobots import Liaobots +from .Llama2 import Llama2 from .Myshell import Myshell +from .NoowAi import NoowAi from .Opchatgpts import Opchatgpts from .Phind import Phind from .Vercel import Vercel @@ -82,9 +83,11 @@ class ProviderUtils: 'HuggingChat': HuggingChat, 'Komo': Komo, 'Liaobots': Liaobots, + 'Llama2': Llama2, 'Lockchat': Lockchat, 'MikuChat': MikuChat, 'Myshell': Myshell, + 'NoowAi': NoowAi, 'Opchatgpts': Opchatgpts, 'OpenAssistant': OpenAssistant, 'OpenaiChat': OpenaiChat, @@ -148,8 +151,10 @@ __all__ = [ 'H2o', 'HuggingChat', 'Liaobots', + 'Llama2', 'Lockchat', 'Myshell', + 'NoowAi', 'Opchatgpts', 'Raycast', 'OpenaiChat', diff --git a/g4f/Provider/deprecated/H2o.py b/g4f/Provider/deprecated/H2o.py new file mode 100644 index 00000000..47290a3e --- /dev/null +++ b/g4f/Provider/deprecated/H2o.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import json +import uuid + +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, format_prompt + + +class H2o(AsyncGeneratorProvider): + url = "https://gpt-gm.h2o.ai" + model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1" + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = model if model else cls.model + headers = {"Referer": cls.url + "/"} + + async with ClientSession( + headers=headers + ) as session: + data = { + "ethicsModalAccepted": "true", + "shareConversationsWithModelAuthors": "true", + "ethicsModalAcceptedAt": "", + "activeModel": model, + "searchEnabled": "true", + } + async with session.post( + f"{cls.url}/settings", + proxy=proxy, + data=data + ) as response: + response.raise_for_status() + + async with session.post( + f"{cls.url}/conversation", + proxy=proxy, + json={"model": model}, + ) as response: + response.raise_for_status() + conversationId = (await response.json())["conversationId"] + + data = { + "inputs": format_prompt(messages), + "parameters": { + "temperature": 0.4, + "truncate": 2048, + "max_new_tokens": 1024, + "do_sample": True, + "repetition_penalty": 1.2, + "return_full_text": False, + **kwargs + }, + "stream": True, + "options": { + "id": str(uuid.uuid4()), + "response_id": str(uuid.uuid4()), + "is_retry": False, + "use_cache": False, + "web_search_id": "", + }, + } + async with session.post( + f"{cls.url}/conversation/{conversationId}", + proxy=proxy, + json=data + ) as response: + start = "data:" + async for line in response.content: + line = line.decode("utf-8") + if line and line.startswith(start): + line = json.loads(line[len(start):-1]) + if not line["token"]["special"]: + yield line["token"]["text"] + + async with session.delete( + f"{cls.url}/conversation/{conversationId}", + proxy=proxy, + ) as response: + response.raise_for_status() + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("truncate", "int"), + ("max_new_tokens", "int"), + ("do_sample", "bool"), + ("repetition_penalty", "float"), + ("return_full_text", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index b37b7edd..db48c3fb 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -11,4 +11,5 @@ from .Wuguokai import Wuguokai from .V50 import V50 from .FastGpt import FastGpt from .Aivvm import Aivvm -from .Vitalentum import Vitalentum \ No newline at end of file +from .Vitalentum import Vitalentum +from .H2o import H2o \ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index c2d9b89b..a0b35ff6 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -16,6 +16,7 @@ from .Provider import ( Yqcloud, Myshell, FreeGpt, + NoowAi, Vercel, Aichat, GPTalk, @@ -51,8 +52,9 @@ gpt_35_long = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider = RetryProvider([ - AiAsk, Aichat, ChatgptDemo, FreeGpt, GptGo, Liaobots, You, + AiAsk, Aichat, ChatgptDemo, FreeGpt, Liaobots, You, GPTalk, ChatgptLogin, GptChatly, GptForLove, Opchatgpts, + NoowAi, ]) ) -- cgit v1.2.3