diff options
-rw-r--r-- | README.md | 11 | ||||
-rw-r--r-- | g4f/models.py | 30 |
2 files changed, 36 insertions, 5 deletions
@@ -18,6 +18,17 @@ pip install -U g4f ```sh docker pull hlohaus789/g4f ``` +# To do +As per the survey, here is a list of improvements to come +- [ ] Improve Documentation (on g4f.mintlify.app) & Do video tutorials +- [ ] Improve the provider status list & updates +- [ ] Tutorials on how to reverse sites to write your own wrapper (PoC only ofc) +- [ ] Improve the Bing wrapper. (might write a new wrapper in golang as it is very fast) +- [ ] Write a standard provider performance test to improve the stability +- [ ] update the repository to include the new openai library syntax (ex: `Openai()` class) +- [ ] Potential support and development of local models +- [ ] improve compatibility and error handling + ## 🆕 What's New - <a href="./README-DE.md"><img src="https://img.shields.io/badge/öffnen in-🇩🇪 deutsch-bleu.svg" alt="Öffnen en DE"></a> diff --git a/g4f/models.py b/g4f/models.py index e0d6121d..e58ccef2 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -120,10 +120,10 @@ llama2_70b = Model( codellama_34b_instruct = Model( name = "codellama/CodeLlama-34b-Instruct-hf", base_provider = "huggingface", - best_provider = RetryProvider([HuggingChat, PerplexityLabs]) + best_provider = RetryProvider([HuggingChat, PerplexityLabs, DeepInfra]) ) -# Mistal +# Mistral mixtral_8x7b = Model( name = "mistralai/Mixtral-8x7B-Instruct-v0.1", base_provider = "huggingface", @@ -136,14 +136,31 @@ mistral_7b = Model( best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityLabs]) ) -# Dolphin +# Misc models dolphin_mixtral_8x7b = Model( name = "cognitivecomputations/dolphin-2.6-mixtral-8x7b", base_provider = "huggingface", best_provider = DeepInfra ) -# OpenChat +lzlv_70b = Model( + name = "lizpreciatior/lzlv_70b_fp16_hf", + base_provider = "huggingface", + best_provider = DeepInfra +) + +airoboros_70b = Model( + name = "deepinfra/airoboros-70b", + base_provider = "huggingface", + best_provider = DeepInfra +) + +airoboros_l2_70b = Model( + name = "jondurbin/airoboros-l2-70b-gpt4-1.4.1", + base_provider = "huggingface", + best_provider = DeepInfra +) + openchat_35 = Model( name = "openchat/openchat_3.5", base_provider = "huggingface", @@ -243,6 +260,9 @@ class ModelUtils: 'mixtral-8x7b': mixtral_8x7b, 'mistral-7b': mistral_7b, 'dolphin-mixtral-8x7b': dolphin_mixtral_8x7b, + 'lzlv-70b': lzlv_70b, + 'airoboros-70b': airoboros_70b, + 'airoboros-l2-70b': airoboros_l2_70b, 'openchat_3.5': openchat_35, 'gemini-pro': gemini_pro, 'bard': bard, @@ -250,4 +270,4 @@ class ModelUtils: 'pi': pi } -_all_models = list(ModelUtils.convert.keys()) +_all_models = list(ModelUtils.convert.keys())
\ No newline at end of file |