summaryrefslogtreecommitdiff
path: root/Omni/Dev/Beryllium
diff options
context:
space:
mode:
Diffstat (limited to 'Omni/Dev/Beryllium')
-rw-r--r--Omni/Dev/Beryllium/Configuration.nix1
-rw-r--r--Omni/Dev/Beryllium/Ollama.nix3
-rw-r--r--Omni/Dev/Beryllium/OpenWebui.nix14
3 files changed, 17 insertions, 1 deletions
diff --git a/Omni/Dev/Beryllium/Configuration.nix b/Omni/Dev/Beryllium/Configuration.nix
index a371649..4a792ef 100644
--- a/Omni/Dev/Beryllium/Configuration.nix
+++ b/Omni/Dev/Beryllium/Configuration.nix
@@ -83,6 +83,7 @@ in {
hardware.nvidia.powerManagement.finegrained = false;
hardware.nvidia.open = true;
hardware.nvidia.nvidiaSettings = true;
+ hardware.nvidia-container-toolkit.enable = true;
hardware.keyboard.zsa.enable = true;
diff --git a/Omni/Dev/Beryllium/Ollama.nix b/Omni/Dev/Beryllium/Ollama.nix
index 0018f49..3f2398e 100644
--- a/Omni/Dev/Beryllium/Ollama.nix
+++ b/Omni/Dev/Beryllium/Ollama.nix
@@ -10,6 +10,7 @@ If you want to spend time on it, spend time over there.
*/
let
pkg = pkgs.unstable.ollama;
+ ports = import ../../Cloud/Ports.nix;
in {
systemd.services.ollama = {
description = "ollama";
@@ -17,7 +18,7 @@ in {
wantedBy = ["multi-user.target"];
environment = {
- OLLAMA_HOST = "localhost:11434";
+ OLLAMA_HOST = "0.0.0.0:${toString ports.ollama}";
# Where to store LLM model files.
HOME = "%S/ollama";
OLLAMA_MODELS = "%S/ollama/models";
diff --git a/Omni/Dev/Beryllium/OpenWebui.nix b/Omni/Dev/Beryllium/OpenWebui.nix
new file mode 100644
index 0000000..7b95331
--- /dev/null
+++ b/Omni/Dev/Beryllium/OpenWebui.nix
@@ -0,0 +1,14 @@
+{config, ...}: let
+ ports = import ../../Cloud/Ports.nix;
+in {
+ config.virtualisation.oci-containers.backend = "docker";
+ config.virtualisation.oci-containers.containers.open-webui = {
+ image = "ghcr.io/open-webui/open-webui:main";
+ volumes = ["/var/lib/open-webui:/app/backend/data"];
+ environment = {
+ OLLAMA_BASE_URL = "http://127.0.0.1:${toString ports.ollama}";
+ PORT = ports.open-webui;
+ };
+ extraOptions = ["--network=host"];
+ };
+}