mirror of
https://github.com/NixOS/nixpkgs.git
synced 2024-11-24 07:53:19 +00:00
ebaa226853
* elk7: 7.11.1 -> 7.16.1 * nixosTests.elk: Improve reliability and compatibility with ELK 7.x - Use comparisons in jq instead of grepping - Match for `.hits.total.value` if version >= 7, otherwise it always passes - Make curl fail if requests fails * nixos/filebeat: Add initial module and test Filebeat is an open source file harvester, mostly used to fetch logs files and feed them into logstash. This module can be used instead of journalbeat if used with `filebeat7` and configured with the `journald` input. * python3Packages.parsedmarc.tests: Fix breakage - Don't use the deprecated elasticsearch7-oss package - Improve jq query robustness and add tracing * rl-2205: Note the addition of the filebeat service * elk6: 6.8.3 -> 6.8.21 The latest version includes a fix for CVE-2021-44228. * nixos/journalbeat: Add a loose dependency on elasticsearch Avoid unnecssary back-off when elasticsearch is running on the same host.
306 lines
11 KiB
Nix
306 lines
11 KiB
Nix
# To run the test on the unfree ELK use the folllowing command:
|
|
# cd path/to/nixpkgs
|
|
# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-6
|
|
|
|
{ system ? builtins.currentSystem,
|
|
config ? {},
|
|
pkgs ? import ../.. { inherit system config; },
|
|
}:
|
|
|
|
let
|
|
inherit (pkgs) lib;
|
|
|
|
esUrl = "http://localhost:9200";
|
|
|
|
mkElkTest = name : elk :
|
|
import ./make-test-python.nix ({
|
|
inherit name;
|
|
meta = with pkgs.lib.maintainers; {
|
|
maintainers = [ eelco offline basvandijk ];
|
|
};
|
|
nodes = {
|
|
one =
|
|
{ pkgs, lib, ... }: {
|
|
# Not giving the machine at least 2060MB results in elasticsearch failing with the following error:
|
|
#
|
|
# OpenJDK 64-Bit Server VM warning:
|
|
# INFO: os::commit_memory(0x0000000085330000, 2060255232, 0)
|
|
# failed; error='Cannot allocate memory' (errno=12)
|
|
#
|
|
# There is insufficient memory for the Java Runtime Environment to continue.
|
|
# Native memory allocation (mmap) failed to map 2060255232 bytes for committing reserved memory.
|
|
#
|
|
# When setting this to 2500 I got "Kernel panic - not syncing: Out of
|
|
# memory: compulsory panic_on_oom is enabled" so lets give it even a
|
|
# bit more room:
|
|
virtualisation.memorySize = 3000;
|
|
|
|
# For querying JSON objects returned from elasticsearch and kibana.
|
|
environment.systemPackages = [ pkgs.jq ];
|
|
|
|
services = {
|
|
|
|
journalbeat = {
|
|
enable = elk ? journalbeat;
|
|
package = elk.journalbeat;
|
|
extraConfig = pkgs.lib.mkOptionDefault (''
|
|
logging:
|
|
to_syslog: true
|
|
level: warning
|
|
metrics.enabled: false
|
|
output.elasticsearch:
|
|
hosts: [ "127.0.0.1:9200" ]
|
|
journalbeat.inputs:
|
|
- paths: []
|
|
seek: cursor
|
|
'');
|
|
};
|
|
|
|
filebeat = {
|
|
enable = elk ? filebeat;
|
|
package = elk.filebeat;
|
|
inputs.journald.id = "everything";
|
|
|
|
inputs.log = {
|
|
enabled = true;
|
|
paths = [
|
|
"/var/lib/filebeat/test"
|
|
];
|
|
};
|
|
|
|
settings = {
|
|
logging.level = "info";
|
|
};
|
|
};
|
|
|
|
metricbeat = {
|
|
enable = true;
|
|
package = elk.metricbeat;
|
|
modules.system = {
|
|
metricsets = ["cpu" "load" "memory" "network" "process" "process_summary" "uptime" "socket_summary"];
|
|
enabled = true;
|
|
period = "5s";
|
|
processes = [".*"];
|
|
cpu.metrics = ["percentages" "normalized_percentages"];
|
|
core.metrics = ["percentages"];
|
|
};
|
|
settings = {
|
|
output.elasticsearch = {
|
|
hosts = ["127.0.0.1:9200"];
|
|
};
|
|
};
|
|
};
|
|
|
|
logstash = {
|
|
enable = true;
|
|
package = elk.logstash;
|
|
inputConfig = ''
|
|
exec { command => "echo -n flowers" interval => 1 type => "test" }
|
|
exec { command => "echo -n dragons" interval => 1 type => "test" }
|
|
'';
|
|
filterConfig = ''
|
|
if [message] =~ /dragons/ {
|
|
drop {}
|
|
}
|
|
'';
|
|
outputConfig = ''
|
|
file {
|
|
path => "/tmp/logstash.out"
|
|
codec => line { format => "%{message}" }
|
|
}
|
|
elasticsearch {
|
|
hosts => [ "${esUrl}" ]
|
|
}
|
|
'';
|
|
};
|
|
|
|
elasticsearch = {
|
|
enable = true;
|
|
package = elk.elasticsearch;
|
|
};
|
|
|
|
kibana = {
|
|
enable = true;
|
|
package = elk.kibana;
|
|
};
|
|
|
|
elasticsearch-curator = {
|
|
enable = true;
|
|
actionYAML = ''
|
|
---
|
|
actions:
|
|
1:
|
|
action: delete_indices
|
|
description: >-
|
|
Delete indices older than 1 second (based on index name), for logstash-
|
|
prefixed indices. Ignore the error if the filter does not result in an
|
|
actionable list of indices (ignore_empty_list) and exit cleanly.
|
|
options:
|
|
allow_ilm_indices: true
|
|
ignore_empty_list: True
|
|
disable_action: False
|
|
filters:
|
|
- filtertype: pattern
|
|
kind: prefix
|
|
value: logstash-
|
|
- filtertype: age
|
|
source: name
|
|
direction: older
|
|
timestring: '%Y.%m.%d'
|
|
unit: seconds
|
|
unit_count: 1
|
|
'';
|
|
};
|
|
};
|
|
};
|
|
};
|
|
|
|
passthru.elkPackages = elk;
|
|
testScript =
|
|
let
|
|
valueObject = lib.optionalString (lib.versionAtLeast elk.elasticsearch.version "7") ".value";
|
|
in ''
|
|
import json
|
|
|
|
|
|
def expect_hits(message):
|
|
dictionary = {"query": {"match": {"message": message}}}
|
|
return (
|
|
"curl --silent --show-error --fail-with-body '${esUrl}/_search' "
|
|
+ "-H 'Content-Type: application/json' "
|
|
+ "-d '{}' ".format(json.dumps(dictionary))
|
|
+ " | tee /dev/console"
|
|
+ " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
|
|
)
|
|
|
|
|
|
def expect_no_hits(message):
|
|
dictionary = {"query": {"match": {"message": message}}}
|
|
return (
|
|
"curl --silent --show-error --fail-with-body '${esUrl}/_search' "
|
|
+ "-H 'Content-Type: application/json' "
|
|
+ "-d '{}' ".format(json.dumps(dictionary))
|
|
+ " | tee /dev/console"
|
|
+ " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} == 0 end'"
|
|
)
|
|
|
|
|
|
def has_metricbeat():
|
|
dictionary = {"query": {"match": {"event.dataset": {"query": "system.cpu"}}}}
|
|
return (
|
|
"curl --silent --show-error --fail-with-body '${esUrl}/_search' "
|
|
+ "-H 'Content-Type: application/json' "
|
|
+ "-d '{}' ".format(json.dumps(dictionary))
|
|
+ " | tee /dev/console"
|
|
+ " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
|
|
)
|
|
|
|
|
|
start_all()
|
|
|
|
one.wait_for_unit("elasticsearch.service")
|
|
one.wait_for_open_port(9200)
|
|
|
|
# Continue as long as the status is not "red". The status is probably
|
|
# "yellow" instead of "green" because we are using a single elasticsearch
|
|
# node which elasticsearch considers risky.
|
|
#
|
|
# TODO: extend this test with multiple elasticsearch nodes
|
|
# and see if the status turns "green".
|
|
one.wait_until_succeeds(
|
|
"curl --silent --show-error --fail-with-body '${esUrl}/_cluster/health'"
|
|
+ " | jq -es 'if . == [] then null else .[] | .status != \"red\" end'"
|
|
)
|
|
|
|
with subtest("Perform some simple logstash tests"):
|
|
one.wait_for_unit("logstash.service")
|
|
one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers")
|
|
one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons")
|
|
|
|
with subtest("Kibana is healthy"):
|
|
one.wait_for_unit("kibana.service")
|
|
one.wait_until_succeeds(
|
|
"curl --silent --show-error --fail-with-body 'http://localhost:5601/api/status'"
|
|
+ " | jq -es 'if . == [] then null else .[] | .status.overall.state == \"green\" end'"
|
|
)
|
|
|
|
with subtest("Metricbeat is running"):
|
|
one.wait_for_unit("metricbeat.service")
|
|
|
|
with subtest("Metricbeat metrics arrive in elasticsearch"):
|
|
one.wait_until_succeeds(has_metricbeat())
|
|
|
|
with subtest("Logstash messages arive in elasticsearch"):
|
|
one.wait_until_succeeds(expect_hits("flowers"))
|
|
one.wait_until_succeeds(expect_no_hits("dragons"))
|
|
|
|
'' + lib.optionalString (elk ? journalbeat) ''
|
|
with subtest(
|
|
"A message logged to the journal is ingested by elasticsearch via journalbeat"
|
|
):
|
|
one.wait_for_unit("journalbeat.service")
|
|
one.execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat")
|
|
one.wait_until_succeeds(
|
|
expect_hits("Supercalifragilisticexpialidocious")
|
|
)
|
|
'' + lib.optionalString (elk ? filebeat) ''
|
|
with subtest(
|
|
"A message logged to the journal is ingested by elasticsearch via filebeat"
|
|
):
|
|
one.wait_for_unit("filebeat.service")
|
|
one.execute("echo 'Superdupercalifragilisticexpialidocious' | systemd-cat")
|
|
one.wait_until_succeeds(
|
|
expect_hits("Superdupercalifragilisticexpialidocious")
|
|
)
|
|
one.execute(
|
|
"echo 'SuperdupercalifragilisticexpialidociousIndeed' >> /var/lib/filebeat/test"
|
|
)
|
|
one.wait_until_succeeds(
|
|
expect_hits("SuperdupercalifragilisticexpialidociousIndeed")
|
|
)
|
|
'' + ''
|
|
with subtest("Elasticsearch-curator works"):
|
|
one.systemctl("stop logstash")
|
|
one.systemctl("start elasticsearch-curator")
|
|
one.wait_until_succeeds(
|
|
'! curl --silent --show-error --fail-with-body "${esUrl}/_cat/indices" | grep logstash | grep ^'
|
|
)
|
|
'';
|
|
}) { inherit pkgs system; };
|
|
in {
|
|
ELK-6 = mkElkTest "elk-6-oss" {
|
|
name = "elk-6-oss";
|
|
elasticsearch = pkgs.elasticsearch6-oss;
|
|
logstash = pkgs.logstash6-oss;
|
|
kibana = pkgs.kibana6-oss;
|
|
journalbeat = pkgs.journalbeat6;
|
|
metricbeat = pkgs.metricbeat6;
|
|
};
|
|
# We currently only package upstream binaries.
|
|
# Feel free to package an SSPL licensed source-based package!
|
|
# ELK-7 = mkElkTest "elk-7-oss" {
|
|
# name = "elk-7";
|
|
# elasticsearch = pkgs.elasticsearch7-oss;
|
|
# logstash = pkgs.logstash7-oss;
|
|
# kibana = pkgs.kibana7-oss;
|
|
# filebeat = pkgs.filebeat7;
|
|
# metricbeat = pkgs.metricbeat7;
|
|
# };
|
|
unfree = lib.dontRecurseIntoAttrs {
|
|
ELK-6 = mkElkTest "elk-6" {
|
|
elasticsearch = pkgs.elasticsearch6;
|
|
logstash = pkgs.logstash6;
|
|
kibana = pkgs.kibana6;
|
|
journalbeat = pkgs.journalbeat6;
|
|
metricbeat = pkgs.metricbeat6;
|
|
};
|
|
ELK-7 = mkElkTest "elk-7" {
|
|
elasticsearch = pkgs.elasticsearch7;
|
|
logstash = pkgs.logstash7;
|
|
kibana = pkgs.kibana7;
|
|
filebeat = pkgs.filebeat7;
|
|
metricbeat = pkgs.metricbeat7;
|
|
};
|
|
};
|
|
}
|