summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-11-12 08:04:33 +0100
committerkqlio67 <kqlio67@users.noreply.github.com>2024-11-12 08:04:33 +0100
commitfde29c53e8c8c53cd289414db873c134273f7c68 (patch)
tree11b165378d0d161fe48fc7b6bd6b05fa6bf9d2f3
parentUpdate (docs/ README.md g4f/client/client.py) (diff)
downloadgpt4free-fde29c53e8c8c53cd289414db873c134273f7c68.tar
gpt4free-fde29c53e8c8c53cd289414db873c134273f7c68.tar.gz
gpt4free-fde29c53e8c8c53cd289414db873c134273f7c68.tar.bz2
gpt4free-fde29c53e8c8c53cd289414db873c134273f7c68.tar.lz
gpt4free-fde29c53e8c8c53cd289414db873c134273f7c68.tar.xz
gpt4free-fde29c53e8c8c53cd289414db873c134273f7c68.tar.zst
gpt4free-fde29c53e8c8c53cd289414db873c134273f7c68.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/HuggingChat.py46
-rw-r--r--g4f/models.py10
2 files changed, 49 insertions, 7 deletions
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index a3f0157e..d4a4b497 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -19,6 +19,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/Qwen2.5-72B-Instruct',
'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
+ 'Qwen/Qwen2.5-Coder-32B-Instruct',
'meta-llama/Llama-3.2-11B-Vision-Instruct',
'NousResearch/Hermes-3-Llama-3.1-8B',
'mistralai/Mistral-Nemo-Instruct-2407',
@@ -30,6 +31,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
"nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
+ "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct",
"llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
"mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
@@ -83,12 +85,33 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
raise RuntimeError(f"Request failed with status code: {response.status_code}, response: {response.text}")
conversationId = response.json().get('conversationId')
- response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01')
+
+ # Get the data response and parse it properly
+ response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11')
+
+ # Split the response content by newlines and parse each line as JSON
+ try:
+ json_data = None
+ for line in response.text.split('\n'):
+ if line.strip():
+ try:
+ parsed = json.loads(line)
+ if isinstance(parsed, dict) and "nodes" in parsed:
+ json_data = parsed
+ break
+ except json.JSONDecodeError:
+ continue
+
+ if not json_data:
+ raise RuntimeError("Failed to parse response data")
+
+ data: list = json_data["nodes"][1]["data"]
+ keys: list[int] = data[data[0]["messages"]]
+ message_keys: dict = data[keys[0]]
+ messageId: str = data[message_keys["id"]]
- data: list = response.json()["nodes"][1]["data"]
- keys: list[int] = data[data[0]["messages"]]
- message_keys: dict = data[keys[0]]
- messageId: str = data[message_keys["id"]]
+ except (KeyError, IndexError, TypeError) as e:
+ raise RuntimeError(f"Failed to extract message ID: {str(e)}")
settings = {
"inputs": format_prompt(messages),
@@ -120,7 +143,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'data': (None, json.dumps(settings, separators=(',', ':'))),
}
- response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}',
+ response = requests.post(
+ f'https://huggingface.co/chat/conversation/{conversationId}',
cookies=session.cookies,
headers=headers,
files=files,
@@ -142,10 +166,18 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
elif line["type"] == "stream":
token = line["token"].replace('\u0000', '')
full_response += token
+ if stream:
+ yield token
elif line["type"] == "finalAnswer":
break
full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip()
- yield full_response
+ if not stream:
+ yield full_response
+
+ @classmethod
+ def supports_model(cls, model: str) -> bool:
+ """Check if the model is supported by the provider."""
+ return model in cls.models or model in cls.model_aliases
diff --git a/g4f/models.py b/g4f/models.py
index dd87d8de..a0cee01d 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -361,6 +361,13 @@ qwen_2_72b = Model(
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
)
+# qwen 2.5
+qwen_2_5_coder_32b = Model(
+ name = 'qwen-2.5-coder-32b',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
+)
+
### Upstage ###
solar_mini = Model(
name = 'solar-mini',
@@ -703,6 +710,9 @@ class ModelUtils:
# qwen 2
'qwen-2-72b': qwen_2_72b,
+
+# qwen 2.5
+'qwen-2.5-coder-32b': qwen_2_5_coder_32b,
### Upstage ###