diff options
author | kqlio67 <kqlio67@users.noreply.github.com> | 2024-11-11 18:16:02 +0100 |
---|---|---|
committer | kqlio67 <kqlio67@users.noreply.github.com> | 2024-11-11 18:16:02 +0100 |
commit | 8e8410c8989a21b6aad1c60f01600ce1d9dac2e7 (patch) | |
tree | 2b3a4600be457dcf11de338ce1925814cae8a896 | |
parent | Update (g4f/Provider/Cloudflare.py) (diff) | |
download | gpt4free-8e8410c8989a21b6aad1c60f01600ce1d9dac2e7.tar gpt4free-8e8410c8989a21b6aad1c60f01600ce1d9dac2e7.tar.gz gpt4free-8e8410c8989a21b6aad1c60f01600ce1d9dac2e7.tar.bz2 gpt4free-8e8410c8989a21b6aad1c60f01600ce1d9dac2e7.tar.lz gpt4free-8e8410c8989a21b6aad1c60f01600ce1d9dac2e7.tar.xz gpt4free-8e8410c8989a21b6aad1c60f01600ce1d9dac2e7.tar.zst gpt4free-8e8410c8989a21b6aad1c60f01600ce1d9dac2e7.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/Cloudflare.py | 47 | ||||
-rw-r--r-- | g4f/models.py | 31 |
2 files changed, 15 insertions, 63 deletions
diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py index 34d7c585..8fb37bef 100644 --- a/g4f/Provider/Cloudflare.py +++ b/g4f/Provider/Cloudflare.py @@ -1,5 +1,6 @@ from __future__ import annotations +from aiohttp import ClientSession import asyncio import json import uuid @@ -10,7 +11,6 @@ from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt - class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): label = "Cloudflare AI" url = "https://playground.ai.cloudflare.com" @@ -22,8 +22,6 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): default_model = '@cf/meta/llama-3.1-8b-instruct-awq' models = [ - '@hf/google/gemma-7b-it', - '@cf/meta/llama-2-7b-chat-fp16', '@cf/meta/llama-2-7b-chat-int8', @@ -38,21 +36,12 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): '@hf/mistral/mistral-7b-instruct-v0.2', - '@cf/microsoft/phi-2', - - '@cf/qwen/qwen1.5-0.5b-chat', - '@cf/qwen/qwen1.5-1.8b-chat', - '@cf/qwen/qwen1.5-14b-chat-awq', '@cf/qwen/qwen1.5-7b-chat-awq', '@cf/defog/sqlcoder-7b-2', ] model_aliases = { - #"falcon-7b": "@cf/tiiuae/falcon-7b-instruct", - - "gemma-7b": "@hf/google/gemma-7b-it", - "llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16", "llama-2-7b": "@cf/meta/llama-2-7b-chat-int8", @@ -65,11 +54,6 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct", - "phi-2": "@cf/microsoft/phi-2", - - "qwen-1.5-0-5b": "@cf/qwen/qwen1.5-0.5b-chat", - "qwen-1.5-1-8b": "@cf/qwen/qwen1.5-1.8b-chat", - "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq", "qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq", #"sqlcoder-7b": "@cf/defog/sqlcoder-7b-2", @@ -90,6 +74,7 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + max_tokens: int = 2048, **kwargs ) -> AsyncResult: model = cls.get_model(model) @@ -117,20 +102,19 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): scraper = cloudscraper.create_scraper() - - prompt = messages[-1]['content'] - data = { "messages": [ - {"role": "user", "content": prompt} + {"role": "user", "content": format_prompt(messages)} ], "lora": None, "model": model, - "max_tokens": 2048, + "max_tokens": max_tokens, "stream": True } - max_retries = 5 + max_retries = 3 + full_response = "" + for attempt in range(max_retries): try: response = scraper.post( @@ -138,31 +122,28 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): headers=headers, cookies=cookies, json=data, - stream=True + stream=True, + proxies={'http': proxy, 'https': proxy} if proxy else None ) if response.status_code == 403: await asyncio.sleep(2 ** attempt) continue - + response.raise_for_status() - skip_tokens = ["</s>", "<s>", "</s>", "[DONE]", "<|endoftext|>", "<|end|>"] - filtered_response = "" - for line in response.iter_lines(): if line.startswith(b'data: '): if line == b'data: [DONE]': + if full_response: + yield full_response break try: content = json.loads(line[6:].decode('utf-8')) - response_text = content['response'] - if not any(token in response_text for token in skip_tokens): - filtered_response += response_text + if 'response' in content and content['response'] != '</s>': + yield content['response'] except Exception: continue - - yield filtered_response.strip() break except Exception as e: if attempt == max_retries - 1: diff --git a/g4f/models.py b/g4f/models.py index ec0ebd32..3b82270e 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -250,7 +250,7 @@ hermes_3 = Model( phi_2 = Model( name = "phi-2", base_provider = "Microsoft", - best_provider = IterListProvider([Cloudflare, Airforce]) + best_provider = IterListProvider([Airforce]) ) phi_3_5_mini = Model( @@ -286,12 +286,6 @@ gemma_2b = Model( best_provider = IterListProvider([ReplicateHome]) ) -gemma_7b = Model( - name = 'gemma-7b', - base_provider = 'Google', - best_provider = Cloudflare -) - ### Anthropic ### claude_2_1 = Model( @@ -358,30 +352,12 @@ command_r_plus = Model( ### Qwen ### # qwen 1_5 -qwen_1_5_5b = Model( - name = 'qwen-1.5-5b', - base_provider = 'Qwen', - best_provider = Cloudflare -) - qwen_1_5_7b = Model( name = 'qwen-1.5-7b', base_provider = 'Qwen', best_provider = Cloudflare ) -qwen_1_5_8b = Model( - name = 'qwen-1.5-8b', - base_provider = 'Qwen', - best_provider = Cloudflare -) - -qwen_1_5_14b = Model( - name = 'qwen-1.5-14b', - base_provider = 'Qwen', - best_provider = IterListProvider([Cloudflare]) -) - # qwen 2 qwen_2_72b = Model( name = 'qwen-2-72b', @@ -690,7 +666,6 @@ class ModelUtils: ### Microsoft ### -'phi-2': phi_2, 'phi-3.5-mini': phi_3_5_mini, @@ -702,7 +677,6 @@ class ModelUtils: # gemma 'gemma-2b': gemma_2b, -'gemma-7b': gemma_7b, ### Anthropic ### @@ -737,10 +711,7 @@ class ModelUtils: ### Qwen ### # qwen 1.5 -'qwen-1.5-5b': qwen_1_5_5b, 'qwen-1.5-7b': qwen_1_5_7b, -'qwen-1.5-8b': qwen_1_5_8b, -'qwen-1.5-14b': qwen_1_5_14b, # qwen 2 'qwen-2-72b': qwen_2_72b, |