summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2023-12-22 13:54:52 +0100
committerGitHub <noreply@github.com>2023-12-22 13:54:52 +0100
commit558d8ea51e059a16bda139c731284530dc535f1f (patch)
treefb7c04a0f0e74edea56aa42f8fc4c2f8cb20ea06
parentMerge pull request #1359 from yifeikong/patch-1 (diff)
parentUpdate models.py (diff)
downloadgpt4free-558d8ea51e059a16bda139c731284530dc535f1f.tar
gpt4free-558d8ea51e059a16bda139c731284530dc535f1f.tar.gz
gpt4free-558d8ea51e059a16bda139c731284530dc535f1f.tar.bz2
gpt4free-558d8ea51e059a16bda139c731284530dc535f1f.tar.lz
gpt4free-558d8ea51e059a16bda139c731284530dc535f1f.tar.xz
gpt4free-558d8ea51e059a16bda139c731284530dc535f1f.tar.zst
gpt4free-558d8ea51e059a16bda139c731284530dc535f1f.zip
-rw-r--r--g4f/Provider/Bing.py16
-rw-r--r--g4f/models.py7
2 files changed, 19 insertions, 4 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index f1255553..dccfc5b1 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -60,7 +60,10 @@ class Bing(AsyncGeneratorProvider):
for key, value in default_cookies.items():
if key not in cookies:
cookies[key] = value
- return stream_generate(prompt, tone, image, context, proxy, cookies, web_search)
+
+ gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
+
+ return stream_generate(prompt, tone, image, context, proxy, cookies, web_search, gpt4_turbo)
def create_context(messages: Messages):
return "".join(
@@ -377,7 +380,7 @@ def compress_image_to_base64(img, compression_rate) -> str:
except Exception as e:
raise e
-def create_message(conversation: Conversation, prompt: str, tone: str, context: str = None, web_search: bool = False) -> str:
+def create_message(conversation: Conversation, prompt: str, tone: str, context: str = None, web_search: bool = False, gpt4_turbo: bool = False) -> str:
options_sets = Defaults.optionsSets
if tone == Tones.creative:
options_sets.append("h3imaginative")
@@ -387,8 +390,12 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
options_sets.append("galileo")
else:
options_sets.append("harmonyv3")
+
if not web_search:
options_sets.append("nosearchall")
+
+ if gpt4_turbo:
+ options_sets.append("dlgpt4t")
request_id = str(uuid.uuid4())
struct = {
@@ -444,7 +451,8 @@ async def stream_generate(
context: str = None,
proxy: str = None,
cookies: dict = None,
- web_search: bool = False
+ web_search: bool = False,
+ gpt4_turbo: bool = False
):
async with ClientSession(
timeout=ClientTimeout(total=900),
@@ -456,7 +464,7 @@ async def stream_generate(
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
await wss.receive(timeout=900)
- await wss.send_str(create_message(conversation, prompt, tone, context, web_search))
+ await wss.send_str(create_message(conversation, prompt, tone, context, web_search, gpt4_turbo))
response_txt = ''
returned_text = ''
diff --git a/g4f/models.py b/g4f/models.py
index 26cd0fb0..b1e85a5b 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -88,6 +88,12 @@ gpt_4 = Model(
])
)
+gpt_4_turbo = Model(
+ name = 'gpt-4-turbo',
+ base_provider = 'openai',
+ best_provider = Bing
+)
+
llama2_7b = Model(
name = "meta-llama/Llama-2-7b-chat-hf",
base_provider = 'huggingface',
@@ -293,6 +299,7 @@ class ModelUtils:
'gpt-4-0613' : gpt_4_0613,
'gpt-4-32k' : gpt_4_32k,
'gpt-4-32k-0613' : gpt_4_32k_0613,
+ 'gpt-4-turbo' : gpt_4_turbo,
# Llama 2
'llama2-7b' : llama2_7b,