mirror of
https://github.com/NixOS/nixpkgs.git
synced 2024-11-25 16:33:15 +00:00
nixos/ollama: split cuda and rocm from service test
Cuda and rocm are platform specific, only working on linux. Also, ofborg won't run any of the service test at all because cuda is unfreely licensed.
This commit is contained in:
parent
fdd0aa0aee
commit
aa8e02582d
@ -685,7 +685,9 @@ in {
|
||||
ocis = handleTest ./ocis.nix {};
|
||||
oddjobd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./oddjobd.nix {};
|
||||
oh-my-zsh = handleTest ./oh-my-zsh.nix {};
|
||||
ollama = handleTest ./ollama.nix {};
|
||||
ollama = runTest ./ollama.nix;
|
||||
ollama-cuda = runTestOn ["x86_64-linux" "aarch64-linux"] ./ollama-cuda.nix;
|
||||
ollama-rocm = runTestOn ["x86_64-linux" "aarch64-linux"] ./ollama-rocm.nix;
|
||||
ombi = handleTest ./ombi.nix {};
|
||||
openarena = handleTest ./openarena.nix {};
|
||||
openldap = handleTest ./openldap.nix {};
|
||||
|
17
nixos/tests/ollama-cuda.nix
Normal file
17
nixos/tests/ollama-cuda.nix
Normal file
@ -0,0 +1,17 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "ollama-cuda";
|
||||
meta.maintainers = with lib.maintainers; [ abysssol ];
|
||||
|
||||
nodes.cuda =
|
||||
{ ... }:
|
||||
{
|
||||
services.ollama.enable = true;
|
||||
services.ollama.acceleration = "cuda";
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
cuda.wait_for_unit("multi-user.target")
|
||||
cuda.wait_for_open_port(11434)
|
||||
'';
|
||||
}
|
17
nixos/tests/ollama-rocm.nix
Normal file
17
nixos/tests/ollama-rocm.nix
Normal file
@ -0,0 +1,17 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
name = "ollama-rocm";
|
||||
meta.maintainers = with lib.maintainers; [ abysssol ];
|
||||
|
||||
nodes.rocm =
|
||||
{ ... }:
|
||||
{
|
||||
services.ollama.enable = true;
|
||||
services.ollama.acceleration = "rocm";
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
rocm.wait_for_unit("multi-user.target")
|
||||
rocm.wait_for_open_port(11434)
|
||||
'';
|
||||
}
|
@ -1,56 +1,53 @@
|
||||
import ./make-test-python.nix ({ pkgs, lib, ... }:
|
||||
{ lib, ... }:
|
||||
let
|
||||
mainPort = 11434;
|
||||
altPort = 11435;
|
||||
|
||||
curlRequest = port: request:
|
||||
"curl http://127.0.0.1:${toString port}/api/generate -d '${builtins.toJSON request}'";
|
||||
|
||||
prompt = {
|
||||
model = "tinydolphin";
|
||||
prompt = "lorem ipsum";
|
||||
options = {
|
||||
seed = 69;
|
||||
temperature = 0;
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "ollama";
|
||||
meta = with lib.maintainers; {
|
||||
maintainers = [ abysssol ];
|
||||
};
|
||||
meta.maintainers = with lib.maintainers; [ abysssol ];
|
||||
|
||||
nodes = {
|
||||
cpu = { ... }: {
|
||||
services.ollama.enable = true;
|
||||
};
|
||||
cpu =
|
||||
{ ... }:
|
||||
{
|
||||
services.ollama.enable = true;
|
||||
};
|
||||
|
||||
rocm = { ... }: {
|
||||
services.ollama.enable = true;
|
||||
services.ollama.acceleration = "rocm";
|
||||
};
|
||||
|
||||
cuda = { ... }: {
|
||||
services.ollama.enable = true;
|
||||
services.ollama.acceleration = "cuda";
|
||||
};
|
||||
|
||||
altAddress = { ... }: {
|
||||
services.ollama.enable = true;
|
||||
services.ollama.port = altPort;
|
||||
};
|
||||
altAddress =
|
||||
{ ... }:
|
||||
{
|
||||
services.ollama.enable = true;
|
||||
services.ollama.port = altPort;
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
vms = [ cpu, rocm, cuda, altAddress ];
|
||||
import json
|
||||
|
||||
def curl_request_ollama(prompt, port):
|
||||
json_prompt = json.dumps(prompt)
|
||||
return f"""curl http://127.0.0.1:{port}/api/generate -d '{json_prompt}'"""
|
||||
|
||||
prompt = {
|
||||
"model": "tinydolphin",
|
||||
"prompt": "lorem ipsum",
|
||||
"options": {
|
||||
"seed": 69,
|
||||
"temperature": 0,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
vms = [
|
||||
(cpu, ${toString mainPort}),
|
||||
(altAddress, ${toString altPort}),
|
||||
]
|
||||
|
||||
start_all()
|
||||
for vm in vms:
|
||||
vm.wait_for_unit("multi-user.target")
|
||||
|
||||
stdout = cpu.succeed("""${curlRequest mainPort prompt}""", timeout=100)
|
||||
|
||||
stdout = altAddress.succeed("""${curlRequest altPort prompt}""", timeout=100)
|
||||
for (vm, port) in vms:
|
||||
vm.wait_for_unit("multi-user.target")
|
||||
vm.wait_for_open_port(port)
|
||||
stdout = vm.succeed(curl_request_ollama(prompt, port), timeout = 100)
|
||||
'';
|
||||
})
|
||||
}
|
||||
|
@ -212,6 +212,8 @@ goBuild ((lib.optionalAttrs enableRocm {
|
||||
};
|
||||
} // lib.optionalAttrs stdenv.isLinux {
|
||||
inherit ollama-rocm ollama-cuda;
|
||||
service-cuda = nixosTests.ollama-cuda;
|
||||
service-rocm = nixosTests.ollama-rocm;
|
||||
};
|
||||
|
||||
meta = {
|
||||
|
Loading…
Reference in New Issue
Block a user