diff options
author | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2024-04-07 10:36:13 +0200 |
---|---|---|
committer | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2024-04-07 10:36:13 +0200 |
commit | b35dfcd1b01c575b65e0299ef71d285dc8f41459 (patch) | |
tree | cfe5f4a390af62fafefd1d27ca2c82a23cdcab49 /g4f/local/_engine.py | |
parent | Update Gemini.py (diff) | |
download | gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.gz gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.bz2 gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.lz gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.xz gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.zst gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/local/_engine.py | 42 |
1 files changed, 0 insertions, 42 deletions
diff --git a/g4f/local/_engine.py b/g4f/local/_engine.py deleted file mode 100644 index 917de16c..00000000 --- a/g4f/local/_engine.py +++ /dev/null @@ -1,42 +0,0 @@ -import os - -from gpt4all import GPT4All -from ._models import models - -class LocalProvider: - @staticmethod - def create_completion(model, messages, stream, **kwargs): - if model not in models: - raise ValueError(f"Model '{model}' not found / not yet implemented") - - model = models[model] - model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models/') - full_model_path = os.path.join(model_dir, model['path']) - - if not os.path.isfile(full_model_path): - print(f"Model file '{full_model_path}' not found.") - download = input(f'Do you want to download {model["path"]} ? [y/n]') - - if download in ['y', 'Y']: - GPT4All.download_model(model['path'], model_dir) - else: - raise ValueError(f"Model '{model['path']}' not found.") - - model = GPT4All(model_name=model['path'], - #n_threads=8, - verbose=False, - allow_download=False, - model_path=model_dir) - - system_template = next((message['content'] for message in messages if message['role'] == 'system'), - 'A chat between a curious user and an artificial intelligence assistant.') - - prompt_template = 'USER: {0}\nASSISTANT: ' - conversation = '\n'.join(f"{msg['role'].upper()}: {msg['content']}" for msg in messages) + "\nASSISTANT: " - - with model.chat_session(system_template, prompt_template): - if stream: - for token in model.generate(conversation, streaming=True): - yield token - else: - yield model.generate(conversation)
\ No newline at end of file |