summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2024-10-15 11:52:43 +0200
committerGitHub <noreply@github.com>2024-10-15 11:52:43 +0200
commit2dcdce5422cd01cd058490d4daef5f69300cca89 (patch)
treec966c5f7151a25ab6719a9231cad90b411467291
parentMerge pull request #2272 from kqlio67/main (diff)
parentMerge branch 'xtekky:main' into setollamahost (diff)
downloadgpt4free-2dcdce5422cd01cd058490d4daef5f69300cca89.tar
gpt4free-2dcdce5422cd01cd058490d4daef5f69300cca89.tar.gz
gpt4free-2dcdce5422cd01cd058490d4daef5f69300cca89.tar.bz2
gpt4free-2dcdce5422cd01cd058490d4daef5f69300cca89.tar.lz
gpt4free-2dcdce5422cd01cd058490d4daef5f69300cca89.tar.xz
gpt4free-2dcdce5422cd01cd058490d4daef5f69300cca89.tar.zst
gpt4free-2dcdce5422cd01cd058490d4daef5f69300cca89.zip
-rw-r--r--docker-compose.yml4
-rw-r--r--g4f/Provider/Ollama.py13
2 files changed, 13 insertions, 4 deletions
diff --git a/docker-compose.yml b/docker-compose.yml
index 1b99ba97..3f8bc4ea 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -12,4 +12,6 @@ services:
ports:
- '8080:8080'
- '1337:1337'
- - '7900:7900' \ No newline at end of file
+ - '7900:7900'
+ environment:
+ - OLLAMA_HOST=host.docker.internal
diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/Ollama.py
index a44aaacd..f9116541 100644
--- a/g4f/Provider/Ollama.py
+++ b/g4f/Provider/Ollama.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import requests
+import os
from .needs_auth.Openai import Openai
from ..typing import AsyncResult, Messages
@@ -14,9 +15,11 @@ class Ollama(Openai):
@classmethod
def get_models(cls):
if not cls.models:
- url = 'http://127.0.0.1:11434/api/tags'
+ host = os.getenv("OLLAMA_HOST", "127.0.0.1")
+ port = os.getenv("OLLAMA_PORT", "11434")
+ url = f"http://{host}:{port}/api/tags"
models = requests.get(url).json()["models"]
- cls.models = [model['name'] for model in models]
+ cls.models = [model["name"] for model in models]
cls.default_model = cls.models[0]
return cls.models
@@ -25,9 +28,13 @@ class Ollama(Openai):
cls,
model: str,
messages: Messages,
- api_base: str = "http://localhost:11434/v1",
+ api_base: str = None,
**kwargs
) -> AsyncResult:
+ if not api_base:
+ host = os.getenv("OLLAMA_HOST", "localhost")
+ port = os.getenv("OLLAMA_PORT", "11434")
+ api_base: str = f"http://{host}:{port}/v1"
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
) \ No newline at end of file