summaryrefslogblamecommitdiffstats
path: root/g4f/Provider/local/Ollama.py
blob: c503a46a58cb737bb6c1aa46914e012015eeabe7 (plain) (tree)
1
2
3
4
5
6
7


                                  
         
 

                                           









                              


                                                        
                                                       
                                                            







                                             
                             

                     



                                                        

                                                        
         
from __future__ import annotations

import requests
import os

from ..needs_auth.Openai import Openai
from ...typing import AsyncResult, Messages

class Ollama(Openai):
    label = "Ollama"
    url = "https://ollama.com"
    needs_auth = False
    working = True

    @classmethod
    def get_models(cls):
        if not cls.models:
            host = os.getenv("OLLAMA_HOST", "127.0.0.1")
            port = os.getenv("OLLAMA_PORT", "11434")
            url = f"http://{host}:{port}/api/tags"
            models = requests.get(url).json()["models"]
            cls.models = [model["name"] for model in models]
            cls.default_model = cls.models[0]
        return cls.models

    @classmethod
    def create_async_generator(
        cls,
        model: str,
        messages: Messages,
        api_base: str = None,
        **kwargs
    ) -> AsyncResult:
        if not api_base:
            host = os.getenv("OLLAMA_HOST", "localhost")
            port = os.getenv("OLLAMA_PORT", "11434")
            api_base: str = f"http://{host}:{port}/v1"
        return super().create_async_generator(
            model, messages, api_base=api_base, **kwargs
        )