From 82bd6f91808a383781807262c4ae1f3de9740531 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Wed, 20 Sep 2023 06:12:34 +0200 Subject: Cache "snlm0e" in Bard Improve error handling in ChatgptLogin Fix async example in readme --- g4f/Provider/H2o.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'g4f/Provider/H2o.py') diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py index 30090a58..d92bd6d1 100644 --- a/g4f/Provider/H2o.py +++ b/g4f/Provider/H2o.py @@ -23,7 +23,7 @@ class H2o(AsyncGeneratorProvider): **kwargs ) -> AsyncGenerator: model = model if model else cls.model - headers = {"Referer": "https://gpt-gm.h2o.ai/"} + headers = {"Referer": cls.url + "/"} async with ClientSession( headers=headers @@ -36,14 +36,14 @@ class H2o(AsyncGeneratorProvider): "searchEnabled": "true", } async with session.post( - "https://gpt-gm.h2o.ai/settings", + f"{cls.url}/settings", proxy=proxy, data=data ) as response: response.raise_for_status() async with session.post( - "https://gpt-gm.h2o.ai/conversation", + f"{cls.url}/conversation", proxy=proxy, json={"model": model}, ) as response: @@ -71,7 +71,7 @@ class H2o(AsyncGeneratorProvider): }, } async with session.post( - f"https://gpt-gm.h2o.ai/conversation/{conversationId}", + f"{cls.url}/conversation/{conversationId}", proxy=proxy, json=data ) as response: @@ -83,6 +83,14 @@ class H2o(AsyncGeneratorProvider): if not line["token"]["special"]: yield line["token"]["text"] + async with session.delete( + f"{cls.url}/conversation/{conversationId}", + proxy=proxy, + json=data + ) as response: + response.raise_for_status() + + @classmethod @property def params(cls): -- cgit v1.2.3