summaryrefslogtreecommitdiff
path: root/Omni/Dev/Beryllium/Ollama.nix
diff options
context:
space:
mode:
authorBen Sima <ben@bsima.me>2025-02-04 21:17:29 -0500
committerBen Sima <ben@bsima.me>2025-02-04 21:17:29 -0500
commitd97ddbe2d844e5fae8bfa4fd4f1a047df81e36b5 (patch)
tree13611b5ee72ec08cfe3f4a58ec5d40e53be043f2 /Omni/Dev/Beryllium/Ollama.nix
parent9fd4b5da05b7ff5c248e3e3f96f13e7c98ec72f6 (diff)
Deploy open-webui
This is a very nice web ui frontend similar to ChatGPT that can use both OpenAI and Ollama as backends at the same time. Currently I'm just using it locally but eventually I think I'll expose it over the internet and use it as my go-to LLM interface.
Diffstat (limited to 'Omni/Dev/Beryllium/Ollama.nix')
-rw-r--r--Omni/Dev/Beryllium/Ollama.nix3
1 files changed, 2 insertions, 1 deletions
diff --git a/Omni/Dev/Beryllium/Ollama.nix b/Omni/Dev/Beryllium/Ollama.nix
index 0018f49..3f2398e 100644
--- a/Omni/Dev/Beryllium/Ollama.nix
+++ b/Omni/Dev/Beryllium/Ollama.nix
@@ -10,6 +10,7 @@ If you want to spend time on it, spend time over there.
*/
let
pkg = pkgs.unstable.ollama;
+ ports = import ../../Cloud/Ports.nix;
in {
systemd.services.ollama = {
description = "ollama";
@@ -17,7 +18,7 @@ in {
wantedBy = ["multi-user.target"];
environment = {
- OLLAMA_HOST = "localhost:11434";
+ OLLAMA_HOST = "0.0.0.0:${toString ports.ollama}";
# Where to store LLM model files.
HOME = "%S/ollama";
OLLAMA_MODELS = "%S/ollama/models";