diff --git a/README.md b/README.md index 09483c9d7c43ae1b48053fe5956ec10fc5e132a9..ac47e7c3b99b863136f2b7f148d1ff16d6b41326 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[PACKAGE]: ../../raw/master/mkp/nvdct-0.9.7-20241230.mkp "nvdct-0.9.7-20241230.mkp" +[PACKAGE]: ../../raw/master/mkp/nvdct-0.9.8-20250205.mkp "nvdct-0.9.8-20250205.mkp" # Network Visualization Data Creation Tool (NVDCT) This script creates the topology data file needed for the [Checkmk Exchange Network visualization](https://exchange.checkmk.com/p/network-visualization) plugin.\ diff --git a/mkp/nvdct-0.9.8-20250205.mkp b/mkp/nvdct-0.9.8-20250205.mkp new file mode 100644 index 0000000000000000000000000000000000000000..823b375ef1b71252b7cb8541fee51db3b1a2e5e8 Binary files /dev/null and b/mkp/nvdct-0.9.8-20250205.mkp differ diff --git a/source/bin/nvdct/conf/nvdct.toml b/source/bin/nvdct/conf/nvdct.toml old mode 100755 new mode 100644 index d31f64ef5324aaa0a7f4f30d3febc13f7684f8d7..524e395d38864e29bd7ef959f39cd45f7400feff --- a/source/bin/nvdct/conf/nvdct.toml +++ b/source/bin/nvdct/conf/nvdct.toml @@ -16,8 +16,8 @@ # [0-9-a-zA-Z\.\_\-]{1,253} -> host L2_SEED_DEVICES = [ # "CORE01", - # "LOCATION01", - # "LOCATION02", + # "CORE2", + # "router01", ] # drop CDP/LLDP neighbours names @@ -44,11 +44,11 @@ L3_IGNORE_IP = [ # ignore IPs by wildcard # if comparing an ip address: -# each 0 bit in the wildcad has to be exacly as in the pattern -# each 1 bit in the wildacrd will be ignored +# each 0 bit in the wildcard has to be exactly as in the pattern +# each 1 bit in the wildcard will be ignored L3V4_IGNORE_WILDCARD = [ # [ pattern , wildcard ] - # ["172.17.0.1", "0.0.255.0"], # ignore all IPs ending with 1 from 172.17.128.0/16 + # ["172.17.0.1", "0.0.255.0"], # ignore all IPs ending with 1 from 172.17.0.0/16 # ["172.17.128.0", "0.0.127.3"], # ignore all IPs ending with 0-3 from 172.17.128.0/17 # ["172.17.128.3", "0.0.127.0"], # ignore all IPs ending with 3 from 172.17.128.0/17 ] @@ -62,7 +62,7 @@ L3_SUMMARIZE = [ # "fd00::/8" ] -# topologies will not be deleted by "--keep" +# topologies will not be deleted by "--keep_max_topologies" PROTECTED_TOPOLOGIES = [ # "2023-10-17T14:08:05.10", # "your_important_topology" @@ -82,17 +82,17 @@ STATIC_CONNECTIONS = [ # connection: "left_host"<->"right_host" ] -# list customers to include/excluse, use with option --filter-costumers INCLUDE/EXCLUDE +# list customers to include/exclude, use with option --filter-costumers INCLUDE/EXCLUDE # [0-9-a-zA-Z\.\_\-]{1,16} -> customer -CUSTOMERS = [ +FILTER_BY_CUSTOMER = [ # "customer1", # "customer2", # "customer3", ] -# list site to include/excluse, use with option --filter-sites INCLUDE/EXCLUDE +# list site to include/exclude, use with option --filter-sites INCLUDE/EXCLUDE # [0-9-a-zA-Z\.\_\-]{1,16} -> site -SITES = [ +FILTER_BY_SITE = [ # "site1", # "site2", # "site3", @@ -100,7 +100,7 @@ SITES = [ # map inventory CDP/LLDP neighbour name to Checkmk host name # [0-9-a-zA-Z\.\_\-]{1,253} -> host -[L2_HOST_MAP] +[L2_NEIGHBOUR_TO_HOST_MAP] # "inventory_neighbour1" = "cmk_host1" # "inventory_neighbour2" = "cmk_host2" # "inventory_neighbour3" = "cmk_host3" @@ -114,7 +114,7 @@ SITES = [ # replace _network objects_ in L§ topologies (takes place after summarize) # [0-9-a-zA-Z\.\_\-]{1,253} -> host -[L3_REPLACE] +[L3_REPLACE_NETWORKS] # "10.193.172.0/24" = "MPLS" # "10.194.8.0/23" = "MPLS" # "10.194.12.0/24" = "MPLS" @@ -123,7 +123,7 @@ SITES = [ [EMBLEMS] # can use misc icons from CMK or upload your own in the misc category -# for built-in icons use "icon_" as prefix to the name from CMK +# for built-in icons use "icon_" as l2_prefix to the name from CMK # max size 80x80px # emblems will only be used for non CMK objects # "host_node" = "icon_alert_unreach" @@ -145,31 +145,46 @@ SITES = [ "1e9" = 3 # 1 gbit "1e10" = 5 # 10 gbit +[FILTER_BY_FOLDER] +# "/folder1/subfolder1" = "INCLUDE" | "EXCLUDE" +# "/folder2/subfolder2" = "INCLUDE" | "EXCLUDE" + +[FILTER_BY_HOST_LABEL] +# "hostlabel1:value" = "INCLUDE" | "EXCLUDE" +# "hostlabel2:value" = "INCLUDE" | "EXCLUDE" + +[FILTER_BY_HOST_TAG] +# "host_tag1:value" = "INCLUDE" | "EXCLUDE" +# "host_tag2:value" = "INCLUDE" | "EXCLUDE" + [SETTINGS] # api_port = 5001 # backend = "MULTISITE" | "RESTAPI" | "LIVESTATUS" -# case = "LOWER" | "UPPER" -# default = false -# display_l2_neighbours = false -# dont_compare = false +# default = false | true +# dont_compare = false | true # filter_customers = "INCLUDE" |"EXCLUDE" # filter_sites = "INCLUDE" | "EXCLUDE" -# include_l3_hosts = false -# include_l3_loopback = false # most likely dropped from inventory (SNMP) before -keep = 10 -# layers = ["LLDP", "CDP", "L3v4", "STATIC", "CUSTOM"] +# keep_max_topologies = 10 +# l2_case = "OFF" | "LOWER" | "UPPER" | "IGNORE" | "AUTO" +# l2_display_neighbours = false | true +# l2_display_ports = false | true +# l2_prefix = "" +# l2_remove_domain = "OFF" | "ON" | "AUTO" +# l2_skip_external = false | true +# l3_display_devices = false | true +# l3_include_hosts = false | true +# l3_include_loopback = false | true # most likely dropped from inventory (SNMP) before +# l3_skip_cidr_0 = false | true +# l3_skip_cidr_32_128 = false | true +# l3_skip_if = false | true +# l3_skip_ip = false | true +# l3_skip_public = false | true +# layers = ["LLDP", "CDP", "L3v4", "STATIC"] # log_file = "~/var/log/nvdct.log" -# log_level = "WARNING" -# log_to_stdout = false -min_age = 1 -output_directory = 'nvdct' # remove to get date formated directory -# pre_fetch = false -# prefix = "" -# quiet = true -# remove_domain = false -# skip_l3_cidr_0 = false -# skip_l3_cidr_32_128 = false -# skip_l3_if = false -# skip_l3_ip = false -# skip_l3_public = false +# log_level = "WARNING" | "DEBUG" | "INFO" | "EROR" | "FATAL" | "CRITICAL" | "OFF" +# log_to_stdout = false | true +# min_topology_age = 1 +output_directory = "nvdct" # remove to get date formated directory +# pre_fetch = false | true +# quiet = false | true # time_format = "%Y-%m-%dT%H:%M:%S.%m" diff --git a/source/bin/nvdct/lib/args.py b/source/bin/nvdct/lib/args.py index 1d1be87c70b9d3d706106a3ca7d1d9dbb93349a8..3f9a81f629ee74ce192dc5669815b72bf3d4efb6 100755 --- a/source/bin/nvdct/lib/args.py +++ b/source/bin/nvdct/lib/args.py @@ -10,32 +10,39 @@ # # options used # -b --backend +# -c --config # -d --default # -l --layers # -o --output-directory -# -p --prefix -# -u --user-data-file # -v --version # --api-port (deprecated ?) -# --case -# --check-user-data-only -# --display-l2-neighbours +# --check-config-only # --dont-compare # --filter-customers # --filter-sites -# --fix-toml -# --include-l3-hosts -# --keep +# --keep-max-topologies +# --l2-case +# --l2-display-ports +# --l2-display-neighbours +# --l2-prefix +# --l2-remove-domain +# --l2-skip-external +# --l3-display-devices +# --l3-include-hosts +# --l3-include-loopback +# --l3-skip-cidr-0 +# --l3-skip-cidr-32-128 +# --l3-skip-if +# --l3-skip-ip +# --l3-skip-public # --log-file # --log-level # --log-to-stdout -# --min-age +# --min-topology-age # --pre-fetch # --quiet -# --remove-domain -# --skip-l3-if -# --skip-l3-ip # --time-format +# --update-config from argparse import ( @@ -47,19 +54,21 @@ from pathlib import Path from lib.constants import ( Backends, + CONFIG_FILE, Case, CliLong, + CliShort, ExitCodes, IncludeExclude, Layers, LogLevels, MinVersions, NVDCT_VERSION, + RemoveDomain, SCRIPT, TIME_FORMAT_ARGPARSER, TomlSections, URLs, - USER_DATA_FILE, ) @@ -86,36 +95,29 @@ def parse_arguments() -> arg_Namespace: f' {ExitCodes.AUTOMATION_SECRET_NOT_FOUND} - Automation secret not found\n' f' {ExitCodes.NO_LAYER_CONFIGURED} - No layer to work on\n' '\nUsage:\n' - f'{SCRIPT} -u ~/local/bin/nvdct/conf/my_{USER_DATA_FILE} \n\n' + f'{SCRIPT} -u ~/local/bin/nvdct/conf/my_{CONFIG_FILE} \n\n' ) - parser.add_argument( - '-b', CliLong.BACKEND, + CliShort.BACKEND, CliLong.BACKEND, choices=[Backends.LIVESTATUS, Backends.MULTISITE, Backends.RESTAPI], # default='MULTISITE', help='Backend used to retrieve the topology data\n' f' - {Backends.LIVESTATUS} : fetches data via local Livestatus (local site only)\n' f' - {Backends.MULTISITE} : like LIVESTATUS but for distributed environments (default)\n' - f' - {Backends.RESTAPI} : uses the CMK REST API.', + f' - {Backends.RESTAPI} : uses the Checkmk REST API.', ) parser.add_argument( - '-d', CliLong.DEFAULT, action='store_const', const=True, # default=False, - help='Set the created topology data as default. Will be created automatically\n' - 'if it doesnt exists.', + CliShort.CONFIG, CliLong.CONFIG, type=str, + help='Set the name of the config file.\n' + f'Default is ~/local/bin/nvdct/conf/{CONFIG_FILE}', ) parser.add_argument( - '-o', CliLong.OUTPUT_DIRECTORY, type=str, - help='Directory name where to save the topology data.\n' - 'I.e.: my_topology. Default is the actual date/time\n' - f'in "{CliLong.TIME_FORMAT}" format.\n' - 'NOTE: the directory is a sub directory under "~/var/check_mk/topology/data/"\n', - ) - parser.add_argument( - '-p', CliLong.PREFIX, type=str, - help='Prepends each host with the prefix. (Needs more testing)\n' + CliShort.DEFAULT, CliLong.DEFAULT, action='store_const', const=True, # default=False, + help='Set the created topology data as default. Will be created automatically\n' + 'if it doesnt exists.', ) parser.add_argument( - '-l', CliLong.LAYERS, + CliShort.LAYERS, CliLong.LAYERS, nargs='+', choices=[ Layers.CDP, @@ -125,69 +127,34 @@ def parse_arguments() -> arg_Namespace: ], # default=['CDP'], help=( - f' - {Layers.CDP} : needs inv_cdp_cache package at least in version {MinVersions.CDP}\n' - f' - {Layers.LLDP} : needs inv_lldp_cache package at least in version {MinVersions.LLDP}\n' - f' - {Layers.L3V4} : needs inv_ip_address package at least in version {MinVersions.SNMP_IP_ADDRESSES} for SNMP based hosts\n' - f' for Linux based hosts inv_lnx_ip_if in version {MinVersions.LINUX_IP_ADDRESSES}\n' - f' for Windows based hosts inv_win_ip_if in version {MinVersions.WINDOWS_IP_ADDRESSES}\n' - f' - {Layers.STATIC} : creates a topology base on the "[{TomlSections.STATIC_CONNECTIONS}]" section in the TOML file\n' + f' - {Layers.CDP} : needs inv_cdp_cache package at least in version {MinVersions.CDP}\n' + f' - {Layers.LLDP} : needs inv_lldp_cache package at least in version {MinVersions.LLDP}\n' + f' - {Layers.L3V4} : needs inv_ip_address package at least in version {MinVersions.SNMP_IP_ADDRESSES} for SNMP based hosts\n' + f' for Linux based hosts inv_lnx_ip_if in version {MinVersions.LINUX_IP_ADDRESSES}\n' + f' for Windows based hosts inv_win_ip_if in version {MinVersions.WINDOWS_IP_ADDRESSES}\n' + f' - {Layers.STATIC} : creates a topology base on the "[{TomlSections.STATIC_CONNECTIONS}]" section in the TOML file\n' ) ) parser.add_argument( - '-u', CliLong.USER_DATA_FILE, type=str, - help='Set the name of the user provided data file\n' - f'Default is ~/local/bin/nvdct/conf/{USER_DATA_FILE}\n', + CliShort.OUTPUT_DIRECTORY, CliLong.OUTPUT_DIRECTORY, type=str, + help='Directory name where to save the topology data.\n' + 'I.e.: my_topology. Default is the actual date/time\n' + f'in "{CliLong.TIME_FORMAT}" format.\n' + 'NOTE: the directory is a sub directory under "~/var/check_mk/topology/data/"\n', ) parser.add_argument( - '-v', CliLong.VERSION, action='version', + CliShort.VERSION, CliLong.VERSION, action='version', version=f'{Path(SCRIPT).name} version: {NVDCT_VERSION}', help='Print version of this script and exit', ) - parser.add_argument( - CliLong.ADJUST_TOML, action='store_const', const=True, # default=False, - help='Adjusts old options in TOML file.', - ) parser.add_argument( CliLong.API_PORT, type=int, # default=False, help='TCP Port to access the REST API. By NVDCT will try to automatically\n' 'detect the site apache port.', ) parser.add_argument( - CliLong.CASE, - choices=[Case.LOWER, Case.UPPER], - # default='NONE', - help='Change L2 neighbour name to all lower/upper case before matching to CMK host', - ) - parser.add_argument( - CliLong.CHECK_USER_DATA_ONLY, action='store_const', const=True, # default=False, - help=f'Only tries to read/parse the user data from {USER_DATA_FILE} and exits.', - ) - parser.add_argument( - CliLong.LOG_FILE, type=str, - help='Set the log file. Default is "~/var/log/nvdct.log"\n', - ) - parser.add_argument( - CliLong.LOG_LEVEL, - # nargs='+', - choices=[ - LogLevels.CRITICAL, - LogLevels.FATAL, - LogLevels.ERROR, - LogLevels.WARNING, - LogLevels.INFO, - LogLevels.DEBUG, - LogLevels.OFF - ], - # default='WARNING', - help=f'Sets the log level. The default is "{LogLevels.WARNING}"\n' - ) - parser.add_argument( - CliLong.LOG_TO_STDOUT, action='store_const', const=True, # default=False, - help='Send log to stdout.', - ) - parser.add_argument( - CliLong.DISPLAY_L2_NEIGHBOURS, action='store_const', const=True, # default=False, - help='Use L2 neighbour name as display name in L2 topologies', + CliLong.CHECK_CONFIG, action='store_const', const=True, # default=False, + help=f'Only tries to read/parse the config file from {CONFIG_FILE} and exits.', ) parser.add_argument( CliLong.DONT_COMPARE, action='store_const', const=True, # default=False, @@ -201,69 +168,133 @@ def parse_arguments() -> arg_Namespace: CliLong.FILTER_CUSTOMERS, choices=[IncludeExclude.INCLUDE, IncludeExclude.EXCLUDE], # default='INCLUDE', - help=f'{IncludeExclude.INCLUDE}/{IncludeExclude.EXCLUDE} customer list "[{TomlSections.CUSTOMERS}]" from TOML file.\n' - f'Note: {Backends.MULTISITE} backend only.', + help=f'{IncludeExclude.INCLUDE}/{IncludeExclude.EXCLUDE} customer list "[{TomlSections.FILTER_BY_CUSTOMER}]" from TOML file.' + f'NOTE: {Backends.MULTISITE} backend only.', ) parser.add_argument( CliLong.FILTER_SITES, choices=[IncludeExclude.EXCLUDE, IncludeExclude.EXCLUDE], # default='INCLUDE', - help=f'{IncludeExclude.INCLUDE}/{IncludeExclude.EXCLUDE} site list "[{TomlSections.SITES}]" from TOML file.\n' + help=f'{IncludeExclude.INCLUDE}/{IncludeExclude.EXCLUDE} site list "[{TomlSections.FILTER_BY_SITE}]" from TOML file.' ) parser.add_argument( - CliLong.INCLUDE_L3_HOSTS, action='store_const', const=True, # default=False, - help='Include hosts (single IP objects) in layer 3 topologies', + CliLong.KEEP_MAX_TOPOLOGIES, type=int, + help='Number of topologies to keep. The oldest topologies above keep\n' + 'will be deleted.\n' + 'NOTE: The default/protected topologies will be kept always.' ) parser.add_argument( - CliLong.INCLUDE_L3_LOOPBACK, action='store_const', const=True, # default=False, - help='Include loopback ip-addresses in layer 3 topologies', + CliLong.L2_CASE, + choices=[Case.INSENSITIVE, Case.LOWER, Case.UPPER, Case.AUTO, Case.OFF], + help='Change L2 neighbour name case before matching to Checkmk host.\n' + f'- {Case.OFF} : Do not change the case of the neighbour name.' + f'- {Case.INSENSITIVE} : search for a matching host by ignoring the case of neighbour name and host name\n' + f'- {Case.LOWER} : change to all lower case\n' + f'- {Case.UPPER} : change to all upper case, without the domain part of the neighbour name\n' + f'- {Case.AUTO} : try all the above variants\n' + f'Default is let the case of the neighbour name untouched ("OFF").\n' + f'Takes place after "{CliLong.L2_REMOVE_DOMAIN}" and before "{CliLong.L2_PREFIX}"', ) parser.add_argument( - CliLong.REMOVE_DOMAIN, action='store_const', const=True, # default=False, - help='Remove the domain name from the L2 neighbor name before matching CMK host.', + CliLong.L2_DISPLAY_PORTS, action='store_const', const=True, # default=False, + help='Use L2 port names as display name for interfaces in L2 topologies', ) parser.add_argument( - CliLong.KEEP, type=int, - help='Number of topologies to keep. The oldest topologies above keep\n' - 'will be deleted.\n' - 'NOTE: The default/protected topologies will be kept always.\n' + CliLong.L2_DISPLAY_NEIGHBOURS, action='store_const', const=True, # default=False, + help='Use L2 neighbour name as display name in L2 topologies', ) parser.add_argument( - CliLong.MIN_AGE, type=int, - help=f'The minimum number of days before a topology is deleted by "{CliLong.KEEP}"' + CliLong.L2_PREFIX, type=str, + help=f'Prepends each L2 neighbour name with the prefix before matching to a Checkmk host name.\n' + f'Takes place after "{CliLong.L2_REMOVE_DOMAIN}" and "{CliLong.L2_CASE}"' ) parser.add_argument( - CliLong.PRE_FETCH, action='store_const', const=True, # default=False, - help=f'Try to fetch host data, with less API calls. Can improve {Backends.RESTAPI} backend\n' - 'performance', + CliLong.L2_REMOVE_DOMAIN, + choices=[RemoveDomain.OFF, RemoveDomain.ON, RemoveDomain.AUTO], + help=f'Handle the the domain name part of a neighbour name before matching it to a Checkmk host.\n' + f'- {RemoveDomain.OFF} : dont touch the neighbour name, keep host name and domain part\n' + f'- {RemoveDomain.ON} : will remove the domain part from the neighbour name, keep only the host name part\n' + f'- {RemoveDomain.AUTO} : try all of the above variants\n' + f'Default: "{RemoveDomain.OFF}". Takes place after "{CliLong.L2_REMOVE_DOMAIN}" and before "{CliLong.L2_PREFIX}"', ) parser.add_argument( - CliLong.QUIET, action='store_const', const=True, # default=False, - help='Suppress all output to stdtout', + CliLong.L2_SKIP_EXTERNAL, action='store_const', const=True, # default=False, + help='Skip L2 neighbours external to Checkmk (that have no matching host)', ) parser.add_argument( - CliLong.SKIP_L3_CIDR_0, action='store_const', const=True, # default=False, + CliLong.L3_DISPLAY_DEVICES, action='store_const', const=True, # default=False, + help='Use L3 device names as display name for interfaces in L3 topologies', + ) + parser.add_argument( + CliLong.L3_INCLUDE_HOSTS, action='store_const', const=True, # default=False, + help='Include hosts (single IP objects) in layer 3 topologies', + ) + parser.add_argument( + CliLong.L3_INCLUDE_LOOPBACK, action='store_const', const=True, # default=False, + help='Include loopback ip-addresses in layer 3 topologies', + ) + parser.add_argument( + CliLong.L3_SKIP_CIDR_0, action='store_const', const=True, # default=False, help='Skip ip-address with CIDR "/0" in layer 3 topologies', ) parser.add_argument( - CliLong.SKIP_L3_CIDR_32_128, action='store_const', const=True, # default=False, + CliLong.L3_SKIP_CIDR_32_128, action='store_const', const=True, # default=False, help='Skip ip-address with CIDR "/32" or "/128" in layer 3 topologies', ) parser.add_argument( - CliLong.SKIP_L3_IF, action='store_const', const=True, # default=False, + CliLong.L3_SKIP_IF, action='store_const', const=True, # default=False, help='Dont show interface in layer 3 topologies', ) parser.add_argument( - CliLong.SKIP_L3_IP, action='store_const', const=True, # default=False, + CliLong.L3_SKIP_IP, action='store_const', const=True, # default=False, help='Dont show ip-addresses in layer 3 topologies', ) parser.add_argument( - CliLong.SKIP_L3_PUBLIC, action='store_const', const=True, # default=False, + CliLong.L3_SKIP_PUBLIC, action='store_const', const=True, # default=False, help='Skip public ip-addresses in layer 3 topologies', ) + parser.add_argument( + CliLong.LOG_FILE, type=str, + help='Set the log file. Default is "~/var/log/nvdct.log"', + ) + parser.add_argument( + CliLong.LOG_LEVEL, + # nargs='+', + choices=[ + LogLevels.CRITICAL, + LogLevels.FATAL, + LogLevels.ERROR, + LogLevels.WARNING, + LogLevels.INFO, + LogLevels.DEBUG, + LogLevels.OFF + ], + # default='WARNING', + help=f'Sets the log level. The default is "{LogLevels.WARNING}"' + ) + parser.add_argument( + CliLong.LOG_TO_STDOUT, action='store_const', const=True, # default=False, + help='Send log to stdout.', + ) + parser.add_argument( + CliLong.MIN_TOPOLOGY_AGE, type=int, + help=f'The minimum number of days before a topology is deleted by "{CliLong.KEEP_MAX_TOPOLOGIES}"' + ) + parser.add_argument( + CliLong.PRE_FETCH, action='store_const', const=True, # default=False, + help=f'Try to fetch host data, with less API calls. Can improve {Backends.RESTAPI} backend\n' + 'performance', + ) + parser.add_argument( + CliLong.QUIET, action='store_const', const=True, # default=False, + help='Suppress all output to stdtout', + ) parser.add_argument( CliLong.TIME_FORMAT, type=str, help=f'Format string to render the time. (default: "{TIME_FORMAT_ARGPARSER}")', ) - + parser.add_argument( + CliLong.UPDATE_CONFIG, action='store_const', const=True, # default=False, + help='Adjusts old options in TOML file.', + ) return parser.parse_args() diff --git a/source/bin/nvdct/lib/backends.py b/source/bin/nvdct/lib/backends.py index 80adf5a63295c9b824dd841482ea1be0c247db03..8fce63c9be89a7c6c98fdcaf29c773ba3c6351ee 100755 --- a/source/bin/nvdct/lib/backends.py +++ b/source/bin/nvdct/lib/backends.py @@ -12,14 +12,18 @@ # 2024-09-25: fixed crash on missing "customer" section in site config file # 2024-12-22: refactoring, leave only backend specific stuff in the backend # removed not strictly needed properties, renamed functions to better understand what the do +# 2025-01-21: added support for RESTAPI post requests (CMK >= 2.3.0p23, Werk #17003) +# fixed REST API query for interface services +# 2025-01-25: fixed check if post can be used from abc import abstractmethod from ast import literal_eval -from collections.abc import Mapping, MutableSequence, Sequence +from collections.abc import Mapping, MutableMapping, MutableSet, MutableSequence, Sequence +from json import dumps from pathlib import Path from requests import session from sys import exit as sys_exit -from typing import Dict, List, Tuple, MutableMapping +from typing import Dict, List, Tuple, Set from livestatus import MultiSiteConnection, SiteConfigurations, SiteId @@ -29,102 +33,135 @@ from lib.constants import ( CacheItems, Case, ExitCodes, + HostFilter, IncludeExclude, InvPaths, + L2InvColumns, + LiveStatusOperator, + MIN_CMK_VERSION_POST, OMD_ROOT, - TomlSections, + RemoveDomain, ) from lib.utils import ( LOGGER, get_data_form_live_status, - get_table_from_inventory, + get_data_from_inventory, + get_local_cmk_version, ) HOST_EXIST: Dict = {'exists': True} -def hosts_to_query(hosts: List[str]) -> Tuple[str, List[str]]: - # WORKAROUND for: Apache HTTP Error 414: Request URI too long - # https://httpd.apache.org/docs/current/mod/core.html#limitrequestfieldsize - # Default: LimitRequestFieldSize 8190 - # max seems to be 8013 (inventory)/7831 (interfaces), so 7800 is a save distance from it - _max_str_len = 7800 - # 8190 - 8013 - 168 = 9 chrs -> Inventory - # 8190 - 7813 - 356 = 21 chrs -> Interfaces - # host (19/19): http://localhost:80 -> sould not count - # uri (57/60): /build/check_mk/api/1.0/domain-types/host/collections/all / /build/check_mk/api/1.0/domain-types/service/collections/all # noqa: E501 - # query (77/231): ?query=%7B%22op%22%3A+%22~~%22%2C+%22left%22%3A+%22name%22%2C+%22right%22%3A+ / ?query=%7B%22op%22%3A+%22and%22%2C+%22expr%22%3A+%5B%7B%22op%22%3A+%22~%22%2C+%22left%22%3A+%22description%22%2C+%22right%22%3A+%22Interface+%22%7D%2C%7B%22op%22%3A+%22~~%22%2C+%22left%22%3A+%22host_name%22%2C+%22right%22%3A+%22%5E # noqa: E501 - # collumns (34/65): &columns=name&columns=mk_inventory / &columns=host_name&columns=description&columns=long_plugin_output # noqa: E501 - # %22 -> " - # %24 -> $ - # %2C -> , - # %3A -> : - # %5B -> [ - # %5D -> ] - # %7B -> { - # %7D -> } - - temp_hosts = [] - open_hosts = [] - for host in hosts: - temp_hosts.append(f'^{host}$') - hosts_str = '|'.join(temp_hosts) - # 6 comes from 3 chrs ($|^) between hosts, coded as 9 chr(%24%7C%5E) in the URL - # 3 are already in the str, so we need to add 6 for each host - if len(hosts_str) > _max_str_len - len(hosts) * 6: - open_hosts = hosts.copy() - hosts_str = f'^{hosts[0]}$' - hosts = hosts[1:] - count = 1 - for host in hosts: - count += 1 - if len(hosts_str) + 9 + len(host) + (6 * count) < _max_str_len: - hosts_str = hosts_str + f'|^{host}$' - open_hosts.remove(host) - else: - break - - LOGGER.debug(f'hosts len: {len(hosts_str) + (len(hosts) - len(open_hosts)) * 6}') - LOGGER.debug(f'open hosts {open_hosts}') - - return hosts_str, open_hosts class HostCache: def __init__( self, backend: str, pre_fetch: bool, + ): - LOGGER.info('init HOST_CACHE') + LOGGER.info(f'{backend} init HOST_CACHE') self.cache: Dict = {} self.neighbour_to_host: MutableMapping[str, str] = {} - self._inventory_pre_fetch_list: List[str] = [ - InvPaths.INTERFACES, - ] + self._inventory_pre_fetch_list: List[str] = [InvPaths.INTERFACES] self.backend: str = str(backend) self.case: str = '' - self.l2_host_map: Dict[str, str] = {} + self.l2_neighbour_to_host_map: Dict[str, str] = {} self.l2_neighbour_replace_regex: List[Tuple[str, str]] = [] self.pre_fetch: bool = bool(pre_fetch) self.prefix: str = '' - self.remove_domain: bool = False + self.remove_domain: str = RemoveDomain.OFF + self.filter_include: MutableSequence[str] = [] + self.filter_exclude: MutableSequence[str] = [] + self.no_case_host_map: MutableMapping[str, str] = {} + + self.use_post = True + + if self.backend == f'[{Backends.RESTAPI}]' and get_local_cmk_version() < MIN_CMK_VERSION_POST: + self.use_post = False + LOGGER.info( + f'{self.backend} Using get request, Checkmk version {get_local_cmk_version()} < {MIN_CMK_VERSION_POST}' + ) + else: + LOGGER.info( + f'{self.backend} Using post request, Checkmk version {get_local_cmk_version()} >= {MIN_CMK_VERSION_POST}' + ) if self.pre_fetch: for host in self.query_all_hosts(): self.cache[host] = HOST_EXIST.copy() - def init_neighbour_to_host( + def init_filter_lists( + self, + filter_by_folder: Mapping[str, Sequence[str]], + filter_by_host_label: Mapping[str, Sequence[str]], + filter_by_host_tag: Mapping[str, Sequence[str]], + ): + for folder in filter_by_folder[IncludeExclude.INCLUDE]: + self.filter_include += self.query_hosts_by_filter(HostFilter.FOLDER, folder, LiveStatusOperator.SUPERSET) + + for folder in filter_by_folder[IncludeExclude.EXCLUDE]: + self.filter_exclude += self.query_hosts_by_filter(HostFilter.FOLDER, folder, LiveStatusOperator.SUPERSET) + + for host_label in filter_by_host_label[IncludeExclude.INCLUDE]: + self.filter_include += self.query_hosts_by_filter(HostFilter.LABELS, host_label, LiveStatusOperator.EQUAL) + + for host_label in filter_by_host_label[IncludeExclude.EXCLUDE]: + self.filter_exclude += self.query_hosts_by_filter(HostFilter.LABELS, host_label, LiveStatusOperator.EQUAL) + + for host_tag in filter_by_host_tag[IncludeExclude.INCLUDE]: + self.filter_include += self.query_hosts_by_filter(HostFilter.TAGS, host_tag, LiveStatusOperator.EQUAL) + + for host_tag in filter_by_host_tag[IncludeExclude.EXCLUDE]: + self.filter_exclude += self.query_hosts_by_filter(HostFilter.TAGS, host_tag, LiveStatusOperator.EQUAL) + + # drop any host from the include list that is on the exclude list + # -> Issue: can render filter_include empty -> disabling include + if self.filter_include: + self.filter_include = [host for host in self.filter_include if host not in self.filter_exclude] + if not self.filter_include: + self.filter_include.append(self.filter_exclude[0]) + + LOGGER.info(f'{self.backend} {IncludeExclude.INCLUDE} # of hosts: {len(self.filter_include)}') + LOGGER.info(f'{self.backend} {IncludeExclude.EXCLUDE} # of hosts: {len(self.filter_exclude)}') + LOGGER.debug(f'{self.backend} {IncludeExclude.INCLUDE} hosts: {self.filter_include}') + LOGGER.debug(f'{self.backend} {IncludeExclude.EXCLUDE} hosts: {self.filter_exclude}') + + def init_neighbour_to_host_map( self, case: str, l2_host_map: Dict[str, str], prefix: str, - remove_domain: bool, + remove_domain: str, ): self.case: str = case - self.l2_host_map: Dict[str, str] = l2_host_map self.prefix: str = prefix - self.remove_domain: bool = remove_domain + self.remove_domain: str = remove_domain + + for host, data in self.cache.items(): + try: + self.neighbour_to_host[data[CacheItems.inventory][InvPaths.LLDP_GLOBAL][L2InvColumns.GLOBALID]] = host + except (KeyError, TypeError): + pass + try: + self.neighbour_to_host[data[CacheItems.inventory][InvPaths.LLDP_GLOBAL][L2InvColumns.GLOBALNAME]] = host + except (KeyError, TypeError): + pass + try: + self.neighbour_to_host[data[CacheItems.inventory][InvPaths.CDP_GLOBAL][L2InvColumns.GLOBALNAME]] = host + except (KeyError, TypeError): + pass + + for neighbour, host in l2_host_map.items(): + self.neighbour_to_host[neighbour] = host + + def filter_host_list(self, host_list: MutableSequence[str] | MutableSet[str]) -> Sequence[str] | MutableSet[str]: + if self.filter_include: + host_list = [host for host in host_list if host in self.filter_include] + if self.filter_exclude: + host_list = [host for host in host_list if host not in self.filter_exclude] + return host_list def get_inventory_data(self, hosts: List[str]) -> Dict[str, Dict]: """ @@ -136,17 +173,13 @@ class HostCache: the inventory data as dictionary """ - inventory_data: Dict[str, Dict | None] = {} # init inventory_data with None - for host in hosts: - inventory_data[host] = None - + inventory_data: Dict[str, Dict | None] = {host: None for host in hosts} open_hosts = hosts.copy() while open_hosts: - hosts_str, open_hosts = hosts_to_query(open_hosts) + hosts_str, open_hosts = self.hosts_to_query(open_hosts) for host, inventory in self.query_inventory_data(hosts_str).items(): inventory_data[host] = inventory - return inventory_data def get_interface_data(self, hosts: List[str]) -> Dict[str, Dict | None]: @@ -165,7 +198,7 @@ class HostCache: host_data[host] = None open_hosts = hosts.copy() while open_hosts: - hosts_str, open_hosts = hosts_to_query(open_hosts) + hosts_str, open_hosts = self.hosts_to_query(open_hosts) host_data.update(self.query_interface_data(hosts_str)) return host_data @@ -179,6 +212,9 @@ class HostCache: except KeyError: pass + if self.pre_fetch: + return False + # get host from CMK and init host in cache if exists := self.query_host(host): self.cache[host] = HOST_EXIST.copy() @@ -187,16 +223,13 @@ class HostCache: return exists - def get_hosts_by_label(self, label: str) -> Sequence[str]: - """ - Returns list of hosts from CMK filtered by label - Args: - label: hostlabel to filter by + def is_host_allowed(self, host: str) -> bool: + if self.filter_include and host not in self.filter_include: + return False + if self.filter_exclude and host in self.filter_exclude: + return False - Returns: - List of hosts - """ - return self.query_hosts_by_label(label) + return True def fill_cache(self, hosts: List[str]) -> None: """ @@ -209,6 +242,7 @@ class HostCache: Returns: None, the data is directly writen to self.cache """ + inventory_of_hosts: Mapping[str, Mapping | None] = self.get_inventory_data(hosts=hosts) if inventory_of_hosts: for host, inventory in inventory_of_hosts.items(): @@ -216,14 +250,13 @@ class HostCache: self.cache[host] = HOST_EXIST.copy() self.cache[host][CacheItems.inventory] = {} self.cache[host][CacheItems.inventory].update({ - entry: get_table_from_inventory( + entry: get_data_from_inventory( inventory=inventory, raw_path=entry ) for entry in self._inventory_pre_fetch_list }) interfaces_of_hosts: Mapping[str, Mapping | None] = self.get_interface_data(hosts) - for host, interfaces in interfaces_of_hosts.items(): if host not in self.cache: self.cache[host] = HOST_EXIST.copy() @@ -244,7 +277,7 @@ class HostCache: """ if self.host_exists(host=host): if self.cache[host] == HOST_EXIST: - LOGGER.info(f'fetch data for: {host}') + LOGGER.info(f'{self.backend} fetch data for: {host}') self.fill_cache(hosts=[host]) try: return self.cache[host][item][path] @@ -256,61 +289,161 @@ class HostCache: def add_inventory_path(self, path: str) -> None: self._inventory_pre_fetch_list = list(set(self._inventory_pre_fetch_list + [path])) - def get_host_from_neighbour(self, neighbour: str) -> str | None: + def get_host_from_neighbour( + self, + raw_neighbour: str, + neighbour: str, + neighbour_id: str | None = None, + ) -> str | None: """ Tries to get the CMK host name from a L2 neighbour name. It will test: + - the neighbour id + - the raw neighbour name + - the adjusted neighbour name + - map the neighbour to a host via L2_NEIGHBOUR_TO_HOST_MAP - the neighbour without domain name - - map the neighbour to a host via L2_HOST_MAP - the neighbour in UPPER case (without domain) - the neighbour in lower case (including domain) - - the neighbour with prefix + - the neighbour with prefix (UPPER/lower case, with/without domain) Args: + raw_neighbour: the L2 neighbour name "as is" in the HW/SW inventory neighbour: the L2 neighbour name to find a CMK host for + neighbour_id: the L2 neighbour id to find a CMK host for (LLDP only) Returns: The CMK host name for the L2 neighbour or None if no host is found - """ + + try: + match = self.neighbour_to_host[neighbour_id] + LOGGER.debug(f'{self.backend} Match by neighbour id: |{neighbour_id}| -> |{match}|') + return match + except KeyError: + pass + + try: + match = self.neighbour_to_host[raw_neighbour] + LOGGER.debug(f'{self.backend} Match by raw neighbour: |{raw_neighbour}| -> |{match}|') + return match + except KeyError: + pass + try: - return self.neighbour_to_host[neighbour] + match = self.neighbour_to_host[neighbour] + LOGGER.debug(f'{self.backend} Match by neighbour: |{neighbour}| -> |{match}|') + return match except KeyError: pass - host = neighbour + LOGGER.debug(f'{self.backend} neighbour not in list: |{neighbour}|') + + host: str = neighbour - # rewrite neighbour if inventory neighbour and checkmk host don't match - if host in self.l2_host_map: - LOGGER.info(f'Replace neighbour by [{TomlSections.L2_HOST_MAP}]: {neighbour} -> {host}') - host = self.l2_host_map[host] + possible_hosts: Set[str] | List[str] = set() - if self.remove_domain: - LOGGER.debug(f'Remove domain: {host} -> {host.split(".")[0]}') - host = host.split('.')[0] + host_no_domain: str = host.split('.')[0] + match self.remove_domain: + case RemoveDomain.ON: + LOGGER.debug(f'{self.backend} Remove domain: {host} -> {host_no_domain}') + host = host_no_domain + possible_hosts.add(host_no_domain) + case RemoveDomain.AUTO: + possible_hosts.add(host) + possible_hosts.add(host_no_domain) + case RemoveDomain.OFF | _: + possible_hosts.add(host) match self.case: case Case.UPPER: - LOGGER.debug(f'Change neighbour to upper case: {host} -> {host.upper()}') - host = host.upper() - + if not '.' in host: # use UPPER only for names without domain + LOGGER.debug(f'{self.backend} Change neighbour to upper case: {host} -> {host.upper()}') + possible_hosts.add(host.upper) case Case.LOWER: - LOGGER.debug(f'Change neighbour to lower case: {host} -> {host.lower()}') - host = host.lower() - case _: - pass + LOGGER.debug(f'{self.backend} Change neighbour to lower case: {host} -> {host.lower()}') + possible_hosts.add(host.lower()) + case Case.AUTO: + possible_hosts.add(host) + possible_hosts.add(host.lower()) + possible_hosts.add(host_no_domain.lower()) + possible_hosts.add(host_no_domain.upper()) + case Case.OFF | _: + possible_hosts.add(host) + + possible_hosts = [f'{self.prefix}{host}' for host in possible_hosts] + possible_hosts = set(self.filter_host_list(possible_hosts)) + # try longest match first + possible_hosts = list(possible_hosts) + possible_hosts.sort(key=len, reverse=True) + + for entry in possible_hosts: + if self.host_exists(entry): + self.neighbour_to_host[neighbour] = entry + LOGGER.debug(f'{self.backend} Matched neighbour to host: |{neighbour}| -> |{entry}|') + return entry + + if self.case in [Case.AUTO, Case.INSENSITIVE]: + if not self.no_case_host_map: + self.no_case_host_map = {host.lower():host for host in self.cache} + for entry in possible_hosts: + if entry.lower() in self.no_case_host_map: + self.neighbour_to_host[neighbour] = self.no_case_host_map[entry.lower()] + LOGGER.debug( + f'{self.backend} Matched neighbour to host: ' + f'|{neighbour}| -> |{self.no_case_host_map[entry.lower()]}|' + ) + return entry + + self.neighbour_to_host[neighbour] = None + LOGGER.debug(f'{self.backend} No match found for neighbour: |{neighbour}|') + return None - if self.prefix: - LOGGER.debug(f'Prepend neighbour with prefix: {host} -> {self.prefix}{host}') - host = f'{self.prefix}{host}' + def hosts_to_query(self, hosts: List[str]) -> Tuple[str, List[str]]: + if self.use_post: + return '|'.join(hosts), [] + + # WORKAROUND for: Apache HTTP Error 414: Request URI too long + # https://httpd.apache.org/docs/current/mod/core.html#limitrequestfieldsize + # Default: LimitRequestFieldSize 8190 + # max seems to be 8013 (inventory)/7831 (interfaces), so 7800 is a save distance from it + _max_str_len = 7800 + + # 8190 - 8013 - 168 = 9 chrs -> Inventory + # 8190 - 7813 - 356 = 21 chrs -> Interfaces + # host (19/19): http://localhost:80 -> sould not count + # uri (57/60): /build/check_mk/api/1.0/domain-types/host/collections/all / /build/check_mk/api/1.0/domain-types/service/collections/all # noqa: E501 + # query (77/231): ?query=%7B%22op%22%3A+%22~~%22%2C+%22left%22%3A+%22name%22%2C+%22right%22%3A+ / ?query=%7B%22op%22%3A+%22and%22%2C+%22expr%22%3A+%5B%7B%22op%22%3A+%22~%22%2C+%22left%22%3A+%22description%22%2C+%22right%22%3A+%22Interface+%22%7D%2C%7B%22op%22%3A+%22~~%22%2C+%22left%22%3A+%22host_name%22%2C+%22right%22%3A+%22%5E # noqa: E501 + # collumns (34/65): &columns=name&columns=mk_inventory / &columns=host_name&columns=description&columns=long_plugin_output # noqa: E501 + # %22 -> " + # %24 -> $ + # %2C -> , + # %3A -> : + # %5B -> [ + # %5D -> ] + # %7B -> { + # %7D -> } + + hosts = [host for host in hosts if self.is_host_allowed(host)] + open_hosts = [] + temp_hosts: Sequence[str] = [f'^{host}$' for host in hosts] + hosts_str = '|'.join(temp_hosts) + # 6 comes from 3 chrs ($|^) between hosts, coded as 9 chr(%24%7C%5E) in the URL + # 3 are already in the str, so we need to add 6 for each host + if len(hosts_str) > _max_str_len - len(hosts) * 6: + open_hosts = hosts.copy() + hosts_str = f'^{hosts.pop()}$' + count = 1 + for host in hosts: + count += 1 + if len(hosts_str) + 9 + len(host) + (6 * count) < _max_str_len: + hosts_str = hosts_str + f'|^{host}$' + open_hosts.remove(host) + else: + break + LOGGER.debug(f'{self.backend} hosts len: {len(hosts_str) + (len(hosts) - len(open_hosts)) * 6}') + LOGGER.debug(f'{self.backend} open hosts {open_hosts}') - if self.host_exists(host): - self.neighbour_to_host[neighbour] = host - LOGGER.debug(f'Matched neighbour to host: |{neighbour}| -> |{host}|') - return host - else: - self.neighbour_to_host[neighbour] = None - LOGGER.debug(f'No match found for neighbour: |{neighbour}|') - return None + return hosts_str, open_hosts @abstractmethod def query_host(self, host: str) -> bool: @@ -335,12 +468,13 @@ class HostCache: raise NotImplementedError @abstractmethod - def query_hosts_by_label(self, label: str) -> Sequence[str]: + def query_hosts_by_filter(self, filter_name: str, filter_value: str, filter_operator: str) -> MutableSequence[str]: """ Queries Livestatus for a list of hosts filtered by a host label Args: - label: Host label to filter list of host by - + filter_name: The column name to filter on + filter_value: the value for the attribute to filter on + filter_operator: Live status filter operator Returns: List of hosts """ raise NotImplementedError @@ -353,6 +487,7 @@ class HostCache: def query_interface_data(self, hosts: str) -> Dict[str, Dict]: raise NotImplementedError + class HostCacheLiveStatus(HostCache): def __init__( self, @@ -361,8 +496,8 @@ class HostCacheLiveStatus(HostCache): ): self.backend = backend super().__init__( - pre_fetch = pre_fetch, - backend = self.backend, + pre_fetch=pre_fetch, + backend=self.backend, ) def get_raw_data(self, query: str) -> any: @@ -378,9 +513,9 @@ class HostCacheLiveStatus(HostCache): data: Sequence[Sequence[str]] = self.get_raw_data(query=query) LOGGER.debug(f'{self.backend} data for host {host}: {data}') if [host] in data: - LOGGER.debug(f'{self.backend} Host {host} found in CMK') + LOGGER.debug(f'{self.backend} Host found in Checkmk: {host} ') return True - LOGGER.warning(f'{self.backend} Host {host} not found in CMK') + LOGGER.warning(f'{self.backend} Host not found in Checkmk: {host}') return False def query_all_hosts(self) -> Sequence[str]: @@ -397,20 +532,20 @@ class HostCacheLiveStatus(HostCache): LOGGER.warning(f'{self.backend} no hosts found') return [] - def query_hosts_by_label(self, label: str) -> Sequence[str]: + def query_hosts_by_filter(self, filter_name: str, filter_value: str, filter_operator: str) -> MutableSequence[str]: query = ( 'GET hosts\n' 'Columns: name\n' 'OutputFormat: python3\n' - f'Filter: labels = {label}\n' + f'Filter: {filter_name} {filter_operator} {filter_value}\n' ) data: Sequence[Sequence[str]] = self.get_raw_data(query=query) - LOGGER.debug(f'{self.backend} hosts matching label: {data}') + LOGGER.debug(f'{self.backend} hosts matching filter {filter_value}: {data}') if data: - LOGGER.info(f'{self.backend} # of hosts found: {len(data)}') + LOGGER.info(f'{self.backend} # of hosts matching filter {filter_value}: {len(data)}') return [host[0] for host in data] - LOGGER.warning(f'{self.backend} no hosts found matching label {label}') + LOGGER.warning(f'{self.backend} no hosts found matching filter: {filter_value}') return [] def query_inventory_data(self, hosts: str) -> Dict[str, Dict]: @@ -426,7 +561,7 @@ class HostCacheLiveStatus(HostCache): if data: for host, inventory in data: if not inventory: - LOGGER.warning(f'{self.backend} Device: {host}: no inventory data found!') + LOGGER.warning(f'{self.backend} No inventory data found for host: {host}') continue inventory = literal_eval(inventory.decode('utf-8')) inventory_data[host] = inventory @@ -445,7 +580,7 @@ class HostCacheLiveStatus(HostCache): ) interface_data = {} data: List[Tuple[str, str, str]] = self.get_raw_data(query=query) - LOGGER.debug(f'{self.backend} interface data for hosts {hosts}: {data}') + LOGGER.debug(f'{self.backend} interface data for hosts: {hosts}: {data}') if data: for host, description, long_plugin_output in data: if interface_data.get(host) is None: @@ -454,18 +589,19 @@ class HostCacheLiveStatus(HostCache): 'long_plugin_output': long_plugin_output.split('\\n') } else: - LOGGER.warning(f'{self.backend} No Interfaces items found for hosts {hosts}') + LOGGER.warning(f'{self.backend} No Interfaces items found for hosts: {hosts}') return interface_data + class HostCacheMultiSite(HostCacheLiveStatus): def __init__( - self, - pre_fetch: bool, - filter_sites: str | None = None, - sites: List[str] | None = None, - filter_customers: str | None = None, - customers: List[str] = None, + self, + pre_fetch: bool, + filter_sites: str | None = None, + sites: List[str] | None = None, + filter_customers: str | None = None, + customers: List[str] = None, ): if not sites: sites = [] @@ -481,7 +617,7 @@ class HostCacheMultiSite(HostCacheLiveStatus): self.dead_sites = [site['site']['alias'] for site in self.c.dead_sites().values()] if self.dead_sites: dead_sites = ', '.join(self.dead_sites) - LOGGER.warning(f'{self.backend} use of dead site(s) {dead_sites} is disabled') + LOGGER.warning(f'{self.backend} use of dead site(s) is disabled: {dead_sites}') self.c.set_only_sites(self.c.alive_sites()) super().__init__( pre_fetch=pre_fetch, @@ -535,7 +671,7 @@ class HostCacheMultiSite(HostCacheLiveStatus): }}) LOGGER.critical( - f'{self.backend} file {str(sites_mk.absolute())} not found. Fallback to ' + f'{self.backend} file {str(sites_mk.absolute())} not found. Falling back to ' 'local site only. Try -b RESTAPI if you have a distributed environment.' ) @@ -561,6 +697,7 @@ class HostCacheMultiSite(HostCacheLiveStatus): case _: return + class HostCacheRestApi(HostCache): def __init__( self, @@ -580,7 +717,7 @@ class HostCacheRestApi(HostCache): ).read_text().strip('\n') except FileNotFoundError as e: LOGGER.exception(f'{self.backend} automation.secret not found, {e}') - print(f'{self.backend} automation.secret not found, {e}') + print(f'{self.backend} automation secret not found, {e}') sys_exit(ExitCodes.AUTOMATION_SECRET_NOT_FOUND) self.__api_port = api_port @@ -593,21 +730,30 @@ class HostCacheRestApi(HostCache): self.__session = session() self.__session.headers['Authorization'] = f"Bearer {self.__user} {self.__secret}" self.__session.headers['Accept'] = 'application/json' - - self.sites: MutableSequence[str] = self.query_sites() + self.__session.headers['Content-Type'] = 'application/json' + self.sites: MutableSequence[str] = self.query_sites() self.filter_sites(filter_=filter_sites, sites=sites) - LOGGER.info(f'{self.backend} filtered sites : {self.sites}') + LOGGER.info(f'{self.backend} filtered sites: {self.sites}') super().__init__( pre_fetch=pre_fetch, backend=self.backend, ) - def get_raw_data(self, url: str, params: Mapping[str, object] | None): - resp = self.__session.get( - url=url, - params=params, - ) + def get_raw_data(self, url: str, params: Mapping[str, object] | None, allow_post: bool = False): + if allow_post and self.use_post: + resp = self.__session.post( + url=url, + data=dumps(params), + # timeout=3, + ) + else: + resp = self.__session.get( + url=url, + params=params, + # timeout=3, + ) LOGGER.debug(f'{self.backend} raw data: {resp.text}') + if resp.status_code == 200: return resp.json() else: @@ -617,10 +763,10 @@ class HostCacheRestApi(HostCache): LOGGER.debug(f'{self.backend} get_sites') url = f"{self.__api_url}/domain-types/site_connection/collections/all" sites = [] - if raw_data:= self.get_raw_data(url, None): + if raw_data := self.get_raw_data(url, None): raw_sites = raw_data.get("value") sites = [site.get('id') for site in raw_sites] - LOGGER.debug(f'{self.backend} sites : {sites}') + LOGGER.debug(f'{self.backend} sites: {sites}') else: LOGGER.warning(f'{self.backend} got no site information!') @@ -644,14 +790,15 @@ class HostCacheRestApi(HostCache): 'sites': self.sites, } - if raw_data := self.get_raw_data(url, params): + if raw_data := self.get_raw_data(url, params, self.use_post): try: data = raw_data['value'][0]['extensions']['name'] LOGGER.debug(f'{self.backend} data for host {host}: {data}') except IndexError: - LOGGER.warning(f'Host {host} not found in CMK') + LOGGER.warning(f'{self.backend} Host not found in Checkmk: {host}') else: if data == host: + LOGGER.debug(f'{self.backend} Host found in Checkmk: {host}') return True return False @@ -663,29 +810,31 @@ class HostCacheRestApi(HostCache): 'sites': self.sites, } - if raw_data := self.get_raw_data(url, params): + if raw_data := self.get_raw_data(url, params, self.use_post): if data := raw_data.get('value', []): LOGGER.info(f'{self.backend} # of hosts found: {len(data)}') return [host.get('extensions', {}).get('name') for host in data] + LOGGER.warning(f'{self.backend} no hosts found') return [] - def query_hosts_by_label(self, label: str) -> Sequence[str]: - query = '{"op": "=", "left": "labels", "right": "' + label + '"}' - + def query_hosts_by_filter(self, filter_name: str, filter_value: str, filter_operator: str) -> MutableSequence[str]: + query = '{"op": "' + filter_operator + '", "left": "' + filter_name + '", "right": "' + filter_value + '"}' url = f'{self.__api_url}/domain-types/host/collections/all' params = { - 'columns': ['name', 'labels'], + 'columns': ['name'], 'query': query, 'sites': self.sites, } - if raw_data := self.get_raw_data(url, params): + raw_data = self.get_raw_data(url, params, self.use_post) + LOGGER.debug(f'{self.backend} hosts matching filter {filter_value}: {raw_data}') + if raw_data: if data := raw_data.get('value'): - LOGGER.info(f'{self.backend} # of hosts found: {len(data)}') + LOGGER.info(f'{self.backend} # of hosts matching filter {filter_value}: {len(data)}') return [host['extensions']['name'] for host in data] - LOGGER.warning(f'{self.backend} no hosts found matching label {label}') + LOGGER.warning(f'{self.backend} no hosts found matching filter: {filter_value}') return [] def query_inventory_data(self, hosts: str) -> Dict[str, Dict]: @@ -699,23 +848,23 @@ class HostCacheRestApi(HostCache): inventory_data = {} - if raw_data := self.get_raw_data(url, params): - LOGGER.debug(f'{self.backend} raw inventory data: {raw_data}') + raw_data = self.get_raw_data(url, params, self.use_post) + LOGGER.debug(f'{self.backend} raw inventory data: {raw_data}') + if raw_data: if data := raw_data.get('value', []): for raw_host in data: if host := raw_host.get('extensions', {}).get('name'): inventory = raw_host['extensions'].get('mk_inventory') if not inventory: - LOGGER.warning(f'{self.backend} Device: {host}: no inventory data found!') + LOGGER.warning(f'{self.backend} No inventory data found for host: {host}!') inventory_data[host] = inventory return inventory_data def query_interface_data(self, hosts: str) -> Dict[str, Dict]: - query_host = f'{{"op": "~~", "left": "host_name", "right": "{hosts}"}}' - query_item = '{"op": "~", "left": "description", "right": "Interface "}' - query = f'{{"op": "and", "expr": [{query_item},{query_host}]}}' - + query_host = '{"op": "~~", "left": "host_name", "right": "' + hosts + '"}' + query_item = '{"op": "~", "left": "description", "right": "^Interface "}' + query = '{"op": "and", "expr": [' + query_host + ',' + query_item +']}' url = f'{self.__api_url}/domain-types/service/collections/all' params = { 'query': query, @@ -725,15 +874,16 @@ class HostCacheRestApi(HostCache): interface_data = {} - if raw_data := self.get_raw_data(url, params): - LOGGER.debug(f'{self.backend} raw interface data: {raw_data}') - + raw_data = self.get_raw_data(url, params) # , self.use_post -> Endpoint not changed to Post :-( + LOGGER.debug(f'{self.backend} interface data for host(s) {hosts}: {raw_data}') + if raw_data: if data := raw_data.get('value', []): for raw_service in data: - LOGGER.debug(f'{self.backend} data for service : {raw_service}') + LOGGER.debug(f'{self.backend} data for service: {raw_service}') service = raw_service.get('extensions') host, description, long_plugin_output = service.values() if interface_data.get(host) is None: + # LOGGER.warning(f'{self.backend} No Interfaces items found for hosts {host}') interface_data[host] = {} interface_data[host][description[10:]] = { 'long_plugin_output': long_plugin_output.split('\\n') diff --git a/source/bin/nvdct/lib/constants.py b/source/bin/nvdct/lib/constants.py index 4dd6c55b583d28827e6cff236ce345a562cff4ef..579eb0d75050c4dd8b280c62138deecfb5e4e401 100755 --- a/source/bin/nvdct/lib/constants.py +++ b/source/bin/nvdct/lib/constants.py @@ -13,45 +13,47 @@ from os import environ from typing import Final # -NVDCT_VERSION: Final[str] = '0.9.7-20241230' +NVDCT_VERSION: Final[str] = '0.9.8-20250107' # OMD_ROOT: Final[str] = environ["OMD_ROOT"] # -API_PORT: Final[int] = 5001 +API_PORT_DEFAULT: Final[int] = 5001 CACHE_INTERFACES_DATA: Final[str] = 'interface_data' CMK_SITE_CONF: Final[str] = f'{OMD_ROOT}/etc/omd/site.conf' +CONFIG_FILE: Final[str] = 'nvdct.toml' +DATAPATH: Final[str] = f'{OMD_ROOT}/var/check_mk/topology/data' LOGGER: Logger = getLogger('root)') -LOG_FILE: Final[str] = f'{OMD_ROOT}/var/log/nvdct.log' +LOG_FILE_DEFAULT: Final[str] = f'{OMD_ROOT}/var/log/nvdct.log' +MIN_CMK_VERSION_POST: Final[str] = '2.3.0p23' SCRIPT: Final[str] = '~/local/bin/nvdct/nvdct.py' -TIME_FORMAT: Final[str] = '%Y-%m-%dT%H:%M:%S.%m' TIME_FORMAT_ARGPARSER: Final[str] = '%%Y-%%m-%%dT%%H:%%M:%%S.%%m' -USER_DATA_FILE: Final[str] = 'nvdct.toml' -DATAPATH: Final[str] = f'{OMD_ROOT}/var/check_mk/topology/data' +TIME_FORMAT_DEFAULT: Final[str] = '%Y-%m-%dT%H:%M:%S.%m' -class MyEnum(Enum): +class EnumValue(Enum): def __get__(self, instance, owner): return self.value @unique -class ExitCodes(MyEnum): +class ExitCodes(EnumValue): OK = 0 BAD_OPTION_LIST = auto() BAD_TOML_FORMAT = auto() BACKEND_NOT_IMPLEMENTED = auto() AUTOMATION_SECRET_NOT_FOUND = auto() NO_LAYER_CONFIGURED = auto() + FILE_NOT_FOUND = auto() @unique -class IPVersion(MyEnum): +class IPVersion(EnumValue): IPv4 = 4 IPv6 = 6 @unique -class URLs(MyEnum): +class URLs(EnumValue): NVDCT: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/nvdct' # CDP: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_cdp_cache' # LLDP: Final[str] = 'LLDP: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_lldp_cach' @@ -63,60 +65,84 @@ class URLs(MyEnum): @unique -class Backends(MyEnum): +class Backends(EnumValue): LIVESTATUS: Final[str] = 'LIVESTATUS' MULTISITE: Final[str] = 'MULTISITE' RESTAPI: Final[str] = 'RESTAPI' @unique -class Case(MyEnum): +class Case(EnumValue): + AUTO: Final[str] = 'AUTO' + INSENSITIVE: Final[str] = 'INSENSITIVE' LOWER: Final[str] = 'LOWER' UPPER: Final[str] = 'UPPER' + OFF: Final[str] = 'OFF' + + +@unique +class RemoveDomain(EnumValue): + ON: Final[str] = 'ON' + OFF: Final[str] = 'OFF' + AUTO: Final[str] = 'AUTO' + @unique -class CacheItems(MyEnum): +class CacheItems(EnumValue): inventory = 'inventory' interfaces = 'interfaces' + @unique -class CliLong(MyEnum): - ADJUST_TOML: Final[str] = '--adjust-toml' +class CliLong(EnumValue): API_PORT: Final[str] = '--api-port' BACKEND: Final[str] = '--backend' - CASE: Final[str] = '--case' - CHECK_USER_DATA_ONLY: Final[str] = '--check-user-data-only' + CHECK_CONFIG: Final[str] = '--check-config' + CONFIG: Final[str] = '--config' DEFAULT: Final[str] = '--default' - DISPLAY_L2_NEIGHBOURS: Final[str] = '--display-l2-neighbours' DONT_COMPARE: Final[str] = '--dont-compare' FILTER_CUSTOMERS: Final[str] = '--filter-customers' FILTER_SITES: Final[str] = '--filter-sites' - INCLUDE_L3_HOSTS: Final[str] = '--include-l3-hosts' - INCLUDE_L3_LOOPBACK: Final[str] = '--include-l3-loopback' - KEEP: Final[str] = '--keep' + KEEP_MAX_TOPOLOGIES: Final[str] = '--keep-max-topologies' + L2_CASE: Final[str] = '--l2-case' + L2_DISPLAY_NEIGHBOURS: Final[str] = '--l2-display-neighbours' + L2_DISPLAY_PORTS: Final[str] = '--l2-display-ports' + L2_PREFIX: Final[str] = '--l2-prefix' + L2_REMOVE_DOMAIN: Final[str] = '--l2-remove-domain' + L2_SKIP_EXTERNAL: Final[str] = '--l2-skip-external' + L3_DISPLAY_DEVICES: Final[str] = '--l3-display-devices' + L3_INCLUDE_HOSTS: Final[str] = '--l3-include-hosts' + L3_INCLUDE_LOOPBACK: Final[str] = '--l3-include-loopback' + L3_SKIP_CIDR_0: Final[str] = '--l3-skip-cidr-0' + L3_SKIP_CIDR_32_128: Final[str] = '--l3-skip-cidr-32-128' + L3_SKIP_IF: Final[str] = '--l3-skip-if' + L3_SKIP_IP: Final[str] = '--l3-skip-ip' + L3_SKIP_PUBLIC: Final[str] = '--l3-skip-public' LAYERS: Final[str] = '--layers' LOG_FILE: Final[str] = '--log-file' LOG_LEVEL: Final[str] = '--log-level' LOG_TO_STDOUT: Final[str] = '--log-to-stdout' - MIN_AGE: Final[str] = '--min-age' + MIN_TOPOLOGY_AGE: Final[str] = '--min-topology-age' OUTPUT_DIRECTORY: Final[str] = '--output-directory' - PREFIX: Final[str] = '--prefix' PRE_FETCH: Final[str] = '--pre-fetch' QUIET: Final[str] = '--quiet' - REMOVE_DOMAIN: Final[str] = '--remove-domain' - SEED_DEVICES: Final[str] = '--seed-devices' - SKIP_L3_CIDR_0: Final[str] = '--skip-l3-cidr-0' - SKIP_L3_CIDR_32_128: Final[str] = '--skip-l3-cidr-32-128' - SKIP_L3_IF: Final[str] = '--skip-l3-if' - SKIP_L3_IP: Final[str] = '--skip-l3-ip' - SKIP_L3_PUBLIC: Final[str] = '--skip-l3-public' TIME_FORMAT: Final[str] = '--time-format' - USER_DATA_FILE: Final[str] = '--user-data-file' + UPDATE_CONFIG: Final[str] = '--update-config' VERSION: Final[str] = '--version' @unique -class EmblemNames(MyEnum): +class CliShort(EnumValue): + BACKEND: Final[str] = '-b' + CONFIG: Final[str] = '-c' + DEFAULT: Final[str] = '-d' + LAYERS: Final[str] = '-l' + OUTPUT_DIRECTORY: Final[str] = '-o' + VERSION: Final[str] = '-v' + + +@unique +class EmblemNames(EnumValue): HOST_NODE: Final[str] = 'host_node' IP_ADDRESS: Final[str] = 'ip_address' IP_NETWORK: Final[str] = 'ip_network' @@ -126,7 +152,7 @@ class EmblemNames(MyEnum): @unique -class EmblemValues(MyEnum): +class EmblemValues(EnumValue): ICON_AGGREGATION: Final[str] = 'icon_aggr' ICON_ALERT_UNREACHABLE: Final[str] = 'icon_alert_unreach' ICON_PLUGINS_CLOUD: Final[str] = 'icon_plugins_cloud' @@ -135,7 +161,7 @@ class EmblemValues(MyEnum): @unique -class HostLabels(MyEnum): +class HostLabels(EnumValue): CDP: Final[str] = "'nvdct/has_cdp_neighbours' 'yes'" L3V4_HOSTS: Final[str] = "'nvdct/l3v4_topology' 'host'" L3V4_ROUTER: Final[str] = "'nvdct/l3v4_topology' 'router'" @@ -145,36 +171,53 @@ class HostLabels(MyEnum): @unique -class IncludeExclude(MyEnum): +class HostFilter(EnumValue): + FOLDER: Final[str] = 'filename' + LABELS: Final[str] = 'labels' + TAGS: Final[str] = 'tags' + + +@unique +class LiveStatusOperator(EnumValue): + EQUAL: Final[str] = '=' + SUPERSET: Final[str] = '~' + + +@unique +class IncludeExclude(EnumValue): INCLUDE: Final[str] = 'INCLUDE' EXCLUDE: Final[str] = 'EXCLUDE' @unique -class L2InvColumns(MyEnum): +class L2InvColumns(EnumValue): NEIGHBOUR: Final[str] = 'neighbour_name' LOCALPORT: Final[str] = 'local_port' NEIGHBOURPORT: Final[str] = 'neighbour_port' + NEIGHBOURID: Final[str] = 'neighbour_id' + GLOBALID: Final[str] = 'local_id' + GLOBALNAME: Final[str] = 'local_name' @unique -class L3InvColumns(MyEnum): +class L3InvColumns(EnumValue): ADDRESS: Final[str] = 'address' DEVICE: Final[str] = 'device' CIDR: Final[str] = 'cidr' @unique -class InvPaths(MyEnum): - CDP: Final[str] = 'networking,cdp_cache,neighbours' - INTERFACES: Final[str] = 'networking,interfaces' - L3: Final[str] = 'networking,addresses' - LLDP: Final[str] = 'networking,lldp_cache,neighbours' - LLDP_ATTRIBUTE: Final[str] = 'networking,lldp_cache' +class InvPaths(EnumValue): + CDP: Final[str] = 'networking,cdp_cache,neighbours,Table,Rows' + CDP_GLOBAL: Final[str] = 'networking,cdp_cache,Attributes,Pairs' + INTERFACES: Final[str] = 'networking,interfaces,Table,Rows' + L3: Final[str] = 'networking,addresses,Table,Rows' + LLDP: Final[str] = 'networking,lldp_cache,neighbours,Table,Rows' + LLDP_GLOBAL: Final[str] = 'networking,lldp_cache,Attributes,Pairs' @unique -class Layers(MyEnum): +class Layers(EnumValue): CDP: Final[str] = 'CDP' LLDP: Final[str] = 'LLDP' L3V4: Final[str] = 'L3v4' @@ -183,7 +226,7 @@ class Layers(MyEnum): @unique -class LogLevels(MyEnum): +class LogLevels(EnumValue): CRITICAL: Final[str] = 'CRITICAL' FATAL: Final[str] = 'FATAL' ERROR: Final[str] = 'ERROR' @@ -193,7 +236,7 @@ class LogLevels(MyEnum): OFF: Final[str] = 'OFF' -class MinVersions(MyEnum): +class MinVersions(EnumValue): CDP: Final[str] = '0.7.1-20240320' LLDP: Final[str] = '0.9.3-20240320' LINUX_IP_ADDRESSES: Final[str] = '0.0.4-20241210' @@ -202,22 +245,25 @@ class MinVersions(MyEnum): @unique -class TomlSections(MyEnum): - CUSTOMERS: Final[str] = 'CUSTOMERS' +class TomlSections(EnumValue): EMBLEMS: Final[str] = 'EMBLEMS' + FILTER_BY_CUSTOMER: Final[str] = 'FILTER_BY_CUSTOMER' + FILTER_BY_FOLDER: Final[str] = 'FILTER_BY_FOLDER' + FILTER_BY_HOST_LABEL: Final[str] = 'FILTER_BY_HOST_LABEL' + FILTER_BY_HOST_TAG: Final[str] = 'FILTER_BY_HOST_TAG' + FILTER_BY_SITE: Final[str] = 'FILTER_BY_SITE' L2_DROP_NEIGHBOURS: Final[str] = 'L2_DROP_NEIGHBOURS' - L2_HOST_MAP: Final[str] = 'L2_HOST_MAP' L2_NEIGHBOUR_REPLACE_REGEX: Final[str] = 'L2_NEIGHBOUR_REPLACE_REGEX' + L2_NEIGHBOUR_TO_HOST_MAP: Final[str] = 'L2_NEIGHBOUR_TO_HOST_MAP' L2_SEED_DEVICES: Final[str] = 'L2_SEED_DEVICES' L3V4_IGNORE_WILDCARD: Final[str] = 'L3V4_IGNORE_WILDCARD' L3_IGNORE_HOSTS: Final[str] = 'L3_IGNORE_HOSTS' L3_IGNORE_IP: Final[str] = 'L3_IGNORE_IP' - L3_REPLACE: Final[str] = 'L3_REPLACE' + L3_REPLACE_NETWORKS: Final[str] = 'L3_REPLACE_NETWORKS' L3_SUMMARIZE: Final[str] = 'L3_SUMMARIZE' MAP_SPEED_TO_THICKNESS: Final[str] = 'MAP_SPEED_TO_THICKNESS' PROTECTED_TOPOLOGIES: Final[str] = 'PROTECTED_TOPOLOGIES' SETTINGS: Final[str] = 'SETTINGS' - SITES: Final[str] = 'SITES' STATIC_CONNECTIONS: Final[str] = 'STATIC_CONNECTIONS' @@ -226,35 +272,37 @@ def cli_long_to_toml(cli_param: str) -> str: @unique -class TomlSettings(MyEnum): - ADJUST_TOML: Final[str] = cli_long_to_toml(CliLong.ADJUST_TOML) +class TomlSettings(EnumValue): API_PORT: Final[str] = cli_long_to_toml(CliLong.API_PORT) BACKEND: Final[str] = cli_long_to_toml(CliLong.BACKEND) - CASE: Final[str] = cli_long_to_toml(CliLong.CASE) - CHECK_USER_DATA_ONLY: Final[str] = cli_long_to_toml(CliLong.CHECK_USER_DATA_ONLY) + CHECK_CONFIG: Final[str] = cli_long_to_toml(CliLong.CHECK_CONFIG) + CONFIG: Final[str] = cli_long_to_toml(CliLong.CONFIG) DEFAULT: Final[str] = cli_long_to_toml(CliLong.DEFAULT) - DISPLAY_L2_NEIGHBOURS: Final[str] = cli_long_to_toml(CliLong.DISPLAY_L2_NEIGHBOURS) DONT_COMPARE: Final[str] = cli_long_to_toml(CliLong.DONT_COMPARE) FILTER_CUSTOMERS: Final[str] = cli_long_to_toml(CliLong.FILTER_CUSTOMERS) FILTER_SITES: Final[str] = cli_long_to_toml(CliLong.FILTER_SITES) - INCLUDE_L3_HOSTS: Final[str] = cli_long_to_toml(CliLong.INCLUDE_L3_HOSTS) - INCLUDE_L3_LOOPBACK: Final[str] = cli_long_to_toml(CliLong.INCLUDE_L3_LOOPBACK) - KEEP: Final[str] = cli_long_to_toml(CliLong.KEEP) + KEEP_MAX_TOPOLOGIES: Final[str] = cli_long_to_toml(CliLong.KEEP_MAX_TOPOLOGIES) + L2_CASE: Final[str] = cli_long_to_toml(CliLong.L2_CASE) + L2_DISPLAY_NEIGHBOURS: Final[str] = cli_long_to_toml(CliLong.L2_DISPLAY_NEIGHBOURS) + L2_DISPLAY_PORTS: Final[str] = cli_long_to_toml(CliLong.L2_DISPLAY_PORTS) + L2_PREFIX: Final[str] = cli_long_to_toml(CliLong.L2_PREFIX) + L2_REMOVE_DOMAIN: Final[str] = cli_long_to_toml(CliLong.L2_REMOVE_DOMAIN) + L2_SKIP_EXTERNAL: Final[str] = cli_long_to_toml(CliLong.L2_SKIP_EXTERNAL) + L3_DISPLAY_DEVICES: Final[str] = cli_long_to_toml(CliLong.L3_DISPLAY_DEVICES) + L3_INCLUDE_HOSTS: Final[str] = cli_long_to_toml(CliLong.L3_INCLUDE_HOSTS) + L3_INCLUDE_LOOPBACK: Final[str] = cli_long_to_toml(CliLong.L3_INCLUDE_LOOPBACK) + L3_SKIP_CIDR_0: Final[str] = cli_long_to_toml(CliLong.L3_SKIP_CIDR_0) + L3_SKIP_CIDR_32_128: Final[str] = cli_long_to_toml(CliLong.L3_SKIP_CIDR_32_128) + L3_SKIP_IF: Final[str] = cli_long_to_toml(CliLong.L3_SKIP_IF) + L3_SKIP_IP: Final[str] = cli_long_to_toml(CliLong.L3_SKIP_IP) + L3_SKIP_PUBLIC: Final[str] = cli_long_to_toml(CliLong.L3_SKIP_PUBLIC) LAYERS: Final[str] = cli_long_to_toml(CliLong.LAYERS) LOG_FILE: Final[str] = cli_long_to_toml(CliLong.LOG_FILE) LOG_LEVEL: Final[str] = cli_long_to_toml(CliLong.LOG_LEVEL) LOG_TO_STDOUT: Final[str] = cli_long_to_toml(CliLong.LOG_TO_STDOUT) - MIN_AGE: Final[str] = cli_long_to_toml(CliLong.MIN_AGE) + MIN_TOPOLOGY_AGE: Final[str] = cli_long_to_toml(CliLong.MIN_TOPOLOGY_AGE) OUTPUT_DIRECTORY: Final[str] = cli_long_to_toml(CliLong.OUTPUT_DIRECTORY) - PREFIX: Final[str] = cli_long_to_toml(CliLong.PREFIX) PRE_FETCH: Final[str] = cli_long_to_toml(CliLong.PRE_FETCH) QUIET: Final[str] = cli_long_to_toml(CliLong.QUIET) - REMOVE_DOMAIN: Final[str] = cli_long_to_toml(CliLong.REMOVE_DOMAIN) - SKIP_L3_CIDR_0: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_CIDR_0) - SKIP_L3_CIDR_32_128: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_CIDR_32_128) - SKIP_L3_IF: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_IF) - SKIP_L3_IP: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_IP) - SKIP_L3_PUBLIC: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_PUBLIC) TIME_FORMAT: Final[str] = cli_long_to_toml(CliLong.TIME_FORMAT) - USER_DATA_FILE: Final[str] = cli_long_to_toml(CliLong.USER_DATA_FILE) - + UPDATE_CONFIG: Final[str] = cli_long_to_toml(CliLong.UPDATE_CONFIG) diff --git a/source/bin/nvdct/lib/settings.py b/source/bin/nvdct/lib/settings.py index 5b82bff0a8ce66f4ad627f4d172239e459ed5165..e069f0ccddcaefac6174632fffcf6c9abef030fe 100755 --- a/source/bin/nvdct/lib/settings.py +++ b/source/bin/nvdct/lib/settings.py @@ -9,6 +9,7 @@ # fixed path to default user data file # 2024-12-17: fixed wrong import for OMD_ROOT (copy&paste) (ThX to BH2005@forum.checkmk.com) +# 2025-01-19: moved --check-toml to utils/get_data_from_toml from collections.abc import Mapping from ipaddress import AddressValueError, NetmaskValueError, ip_address, ip_network @@ -16,32 +17,31 @@ from logging import CRITICAL, DEBUG, ERROR, FATAL, INFO, WARNING from pathlib import Path from sys import exit as sys_exit from time import strftime -from typing import Dict, List, NamedTuple, Tuple +from typing import Dict, List, NamedTuple, Set, Tuple from lib.constants import ( - API_PORT, + API_PORT_DEFAULT, Backends, + CONFIG_FILE, Case, - EmblemValues, EmblemNames, + EmblemValues, ExitCodes, IncludeExclude, LOGGER, - LOG_FILE, + LOG_FILE_DEFAULT, LogLevels, OMD_ROOT, - TIME_FORMAT, + RemoveDomain, + TIME_FORMAT_DEFAULT, TomlSections, TomlSettings, - USER_DATA_FILE, - ) from lib.utils import ( get_data_from_toml, get_local_cmk_api_port, is_valid_customer_name, is_valid_hostname, - is_valid_log_file, is_valid_output_directory, is_valid_site_name, ) @@ -83,36 +83,39 @@ class Settings: ): # cli defaults self.__settings = { - TomlSettings.ADJUST_TOML: False, TomlSettings.API_PORT: None, TomlSettings.BACKEND: Backends.MULTISITE, - TomlSettings.CASE: None, - TomlSettings.CHECK_USER_DATA_ONLY: False, + TomlSettings.CHECK_CONFIG: False, + TomlSettings.CONFIG: f'{OMD_ROOT}/local/bin/nvdct/conf/{CONFIG_FILE}', TomlSettings.DEFAULT: False, - TomlSettings.DISPLAY_L2_NEIGHBOURS: False, TomlSettings.DONT_COMPARE: False, TomlSettings.FILTER_CUSTOMERS: None, TomlSettings.FILTER_SITES: None, - TomlSettings.INCLUDE_L3_HOSTS: False, - TomlSettings.INCLUDE_L3_LOOPBACK: False, - TomlSettings.KEEP: False, + TomlSettings.KEEP_MAX_TOPOLOGIES: False, + TomlSettings.L2_CASE: None, + TomlSettings.L2_DISPLAY_PORTS: False, + TomlSettings.L2_DISPLAY_NEIGHBOURS: False, + TomlSettings.L2_PREFIX: None, + TomlSettings.L2_REMOVE_DOMAIN: None, + TomlSettings.L2_SKIP_EXTERNAL: False, + TomlSettings.L3_DISPLAY_DEVICES: False, + TomlSettings.L3_INCLUDE_HOSTS: False, + TomlSettings.L3_INCLUDE_LOOPBACK: False, + TomlSettings.L3_SKIP_CIDR_0: False, + TomlSettings.L3_SKIP_CIDR_32_128: False, + TomlSettings.L3_SKIP_IF: False, + TomlSettings.L3_SKIP_IP: False, + TomlSettings.L3_SKIP_PUBLIC: False, TomlSettings.LAYERS: [], - TomlSettings.LOG_FILE: LOG_FILE, + TomlSettings.LOG_FILE: LOG_FILE_DEFAULT, TomlSettings.LOG_LEVEL: LogLevels.WARNING, TomlSettings.LOG_TO_STDOUT: False, - TomlSettings.MIN_AGE: 0, + TomlSettings.MIN_TOPOLOGY_AGE: 0, TomlSettings.OUTPUT_DIRECTORY: None, - TomlSettings.PREFIX: None, TomlSettings.PRE_FETCH: False, TomlSettings.QUIET: False, - TomlSettings.REMOVE_DOMAIN: False, - TomlSettings.SKIP_L3_CIDR_0: False, - TomlSettings.SKIP_L3_CIDR_32_128: False, - TomlSettings.SKIP_L3_IF: False, - TomlSettings.SKIP_L3_IP: False, - TomlSettings.SKIP_L3_PUBLIC: False, - TomlSettings.TIME_FORMAT: TIME_FORMAT, - TomlSettings.USER_DATA_FILE: f'{OMD_ROOT}/local/bin/nvdct/conf/{USER_DATA_FILE}', + TomlSettings.TIME_FORMAT: TIME_FORMAT_DEFAULT, + TomlSettings.UPDATE_CONFIG: False, } # args in the form {'s, __seed_devices': 'CORE01', 'p, __path_in_inventory': None, ... }} # we will remove 's, __' @@ -121,14 +124,10 @@ class Settings: ) self.__user_data = get_data_from_toml( - file=self.__args.get(TomlSettings.USER_DATA_FILE, self.user_data_file) + file=self.__args.get(TomlSettings.CONFIG, self.config), + check_only=bool(self.__args.get(TomlSettings.CHECK_CONFIG)), ) - if self.__args.get(TomlSettings.CHECK_USER_DATA_ONLY): - LOGGER.info(msg=f'Could read/parse the user data from {self.user_data_file}') - print(f'Could read/parse the user data from {self.user_data_file}') - sys_exit(ExitCodes.OK) - # defaults -> overridden by toml -> overridden by cli self.__settings.update(self.__user_data.get(TomlSections.SETTINGS, {})) self.__settings.update(self.__args) @@ -136,20 +135,23 @@ class Settings: if self.layers: layers = list(set(self.layers)) if len(layers) != len(self.layers): - LOGGER.error( - msg='-l/--layers options must be unique. Don\'t use any layer more than once.' - ) + # logger not initialized here + # LOGGER.fatal('-l/--layers options must be unique. Don\'t use any layer more than once.') print('-l/--layers options must be unique. Don\'t use any layer more than once.') sys_exit(ExitCodes.BAD_OPTION_LIST) self.__api_port: int | None = None # init user data with defaults - self.__customers: List[str] | None = None self.__emblems: Emblems | None = None + self.__filter_by_customer: List[str] | None = None + self.__filter_by_folder: Dict[str, Set[str]] | None = None + self.__filter_by_host_label: Dict[str, Set[str]] | None = None + self.__filter_by_host_tag: Dict[str, Set[str]] | None = None + self.__filter_by_site: List[str] | None = None self.__l2_drop_neighbours: List[str] | None = None - self.__l2_host_map: Dict[str, str] | None = None self.__l2_neighbour_replace_regex: List[Tuple[str, str]] | None = None + self.__l2_neighbour_to_host_map: Dict[str, str] | None = None self.__l2_seed_devices: List[str] | None = None self.__l3_ignore_hosts: List[str] | None = None self.__l3_ignore_ip: List[ip_network] | None = None @@ -158,7 +160,6 @@ class Settings: self.__l3v4_ignore_wildcard: List[Wildcard] | None = None self.__map_speed_to_thickness: List[Thickness] | None = None self.__protected_topologies: List[str] | None = None - self.__sites: List[str] | None = None self.__static_connections: List[StaticConnection] | None = None # @@ -172,7 +173,7 @@ class Settings: else: self.__api_port = get_local_cmk_api_port() if self.__api_port is None: - self.__api_port = API_PORT + self.__api_port = API_PORT_DEFAULT return self.__api_port @@ -185,35 +186,24 @@ class Settings: ]: return str(self.__settings[TomlSettings.BACKEND]) else: # fallback to defaukt -> exit ?? - LOGGER.warning( + LOGGER.error( f'Unknown backend: {self.__settings[TomlSettings.BACKEND]}. Accepted backends are: ' - f'{Backends.LIVESTATUS}, {Backends.MULTISITE}, {Backends.RESTAPI}. Fall back to {Backends.MULTISITE}.' + f'{Backends.LIVESTATUS}, {Backends.MULTISITE}, {Backends.RESTAPI}. Falling back to {Backends.MULTISITE}.' ) return Backends.MULTISITE - @property # --case - def case(self) -> str | None: - if self.__settings[TomlSettings.CASE] in [Case.LOWER, Case.UPPER]: - return self.__settings[TomlSettings.CASE] - elif self.__settings[TomlSettings.CASE] is not None: - LOGGER.warning( - f'Unknown case setting {self.__settings[TomlSettings.CASE]}. ' - f'Accepted are {Case.LOWER}|{Case.UPPER}. Fallback to no change.' - ) - return None + @property # --check-config + def check_config(self) -> bool: + return bool(self.__settings[TomlSettings.CHECK_CONFIG]) - @property # --check-user-data-only - def check_user_data_only(self) -> bool: - return bool(self.__settings[TomlSettings.CHECK_USER_DATA_ONLY]) + @property # --config + def config(self) -> str: + return str(self.__settings[TomlSettings.CONFIG]) @property # -d --default def default(self) -> bool: return bool(self.__settings[TomlSettings.DEFAULT]) - @property # --display-l2-neighbours - def display_l2_neighbours(self) -> bool: - return bool(self.__settings[TomlSettings.DISPLAY_L2_NEIGHBOURS]) - @property # --dont-compare def dont_compare(self) -> bool: return bool(self.__settings[TomlSettings.DONT_COMPARE]) @@ -240,27 +230,74 @@ class Settings: ) return None - @property # --include-l3-hosts - def fix_toml(self) -> bool: - return bool(self.__settings[TomlSettings.ADJUST_TOML]) + @property # --l2-case + def l2_case(self) -> str | None: + if self.__settings[TomlSettings.L2_CASE] in [Case.LOWER, Case.UPPER, Case.INSENSITIVE, Case.AUTO, Case.OFF]: + return self.__settings[TomlSettings.L2_CASE] + elif self.__settings[TomlSettings.L2_CASE] is not None: + LOGGER.error( + f'Unknown case setting {self.__settings[TomlSettings.L2_CASE]}. ' + f'Accepted are {Case.LOWER}|{Case.UPPER}|{Case.INSENSITIVE}|{Case.AUTO}|{Case.OFF}. Falling back to "OFF" (no change).' + ) + return None + + @property # --l2-display-ports + def l2_display_ports(self) -> bool: + return bool(self.__settings[TomlSettings.L2_DISPLAY_PORTS]) + + @property # --l2-display-neighbours + def l2_display_neighbours(self) -> bool: + return bool(self.__settings[TomlSettings.L2_DISPLAY_NEIGHBOURS]) + + @property # --l2-prefix + def l2_prefix(self) -> str: + if self.__settings[TomlSettings.L2_PREFIX] is not None: + return str(self.__settings[TomlSettings.L2_PREFIX]) + return '' + + @property # --l2-remove-domain + def l2_remove_domain(self) -> str: + if self.__settings[TomlSettings.L2_REMOVE_DOMAIN] in [RemoveDomain.ON, RemoveDomain.OFF, RemoveDomain.AUTO]: + return self.__settings[TomlSettings.L2_REMOVE_DOMAIN] + else: + self.__settings[TomlSettings.L2_REMOVE_DOMAIN] = RemoveDomain.OFF + return RemoveDomain.OFF + + @property # --l2-skip-external + def l2_skip_external(self) -> bool: + return bool(self.__settings[TomlSettings.L2_SKIP_EXTERNAL]) + + @property # --l3-display-devices + def l3_display_devices(self) -> bool: + return bool(self.__settings[TomlSettings.L3_DISPLAY_DEVICES]) @property # --include-l3-hosts - def include_l3_hosts(self) -> bool: - return bool(self.__settings[TomlSettings.INCLUDE_L3_HOSTS]) + def l3_include_hosts(self) -> bool: + return bool(self.__settings[TomlSettings.L3_INCLUDE_HOSTS]) - @property # --skip-l3-ip - def include_l3_loopback(self) -> bool: - return bool(self.__settings[TomlSettings.INCLUDE_L3_LOOPBACK]) + @property # --l3-skip-ip + def l3_include_loopback(self) -> bool: + return bool(self.__settings[TomlSettings.L3_INCLUDE_LOOPBACK]) - @property # --keep - def keep(self) -> int | None: - if isinstance(self.__settings[TomlSettings.KEEP], int): - return max(self.__settings[TomlSettings.KEEP], 0) - return None + @property # --l3-skip-cidr-0 + def l3_skip_cidr_0(self) -> bool: + return bool(self.__settings[TomlSettings.L3_SKIP_CIDR_0]) + + @property # --l3-skip-cidr-32-128 + def l3_skip_cidr_32_128(self) -> bool: + return bool(self.__settings[TomlSettings.L3_SKIP_CIDR_32_128]) - @property # --keep-domain - def remove_domain(self) -> bool: - return bool(self.__settings[TomlSettings.REMOVE_DOMAIN]) + @property # --l3-skip-if + def l3_skip_if(self) -> bool: + return bool(self.__settings[TomlSettings.L3_SKIP_IF]) + + @property # --l3-skip-ip + def l3_skip_ip(self) -> bool: + return bool(self.__settings[TomlSettings.L3_SKIP_IP]) + + @property # --l3-skip-public + def l3_skip_public(self) -> bool: + return bool(self.__settings[TomlSettings.L3_SKIP_PUBLIC]) @property # --layers def layers(self) -> List[str]: @@ -269,11 +306,11 @@ class Settings: @property # --log-file def log_file(self) -> str: raw_log_file = str(Path(str(self.__settings[TomlSettings.LOG_FILE])).expanduser()) - if is_valid_log_file(raw_log_file): - return raw_log_file - else: - LOGGER.error(f'Falling back to {LOG_FILE}') - return LOG_FILE + if not raw_log_file.startswith(f'{OMD_ROOT}/var/log/'): + # logger not ready yet + print(f'\nInvalid log file {raw_log_file}. Falling back to {LOG_FILE_DEFAULT}') + return LOG_FILE_DEFAULT + return raw_log_file @property # --log-level def loglevel(self) -> int: @@ -292,10 +329,16 @@ class Settings: def log_to_stdtout(self) -> bool: return bool(self.__settings[TomlSettings.LOG_TO_STDOUT]) - @property # --min-age - def min_age(self) -> int: - if isinstance(self.__settings[TomlSettings.MIN_AGE], int): - return max(self.__settings[TomlSettings.MIN_AGE], 0) + @property # --keep-max-topologies + def keep_max_topologies(self) -> int | None: + if isinstance(self.__settings[TomlSettings.KEEP_MAX_TOPOLOGIES], int): + return self.__settings[TomlSettings.KEEP_MAX_TOPOLOGIES] + return None + + @property # --min-topology-age + def min_topology_age(self) -> int: + if isinstance(self.__settings[TomlSettings.MIN_TOPOLOGY_AGE], int): + return max(self.__settings[TomlSettings.MIN_TOPOLOGY_AGE], 0) else: return 0 @@ -304,19 +347,14 @@ class Settings: # init output directory with current time if not set if not self.__settings[TomlSettings.OUTPUT_DIRECTORY]: self.__settings[TomlSettings.OUTPUT_DIRECTORY] = f'{strftime(self.__settings[TomlSettings.TIME_FORMAT])}' - if is_valid_output_directory(str(self.__settings[TomlSettings.OUTPUT_DIRECTORY])): - return str(self.__settings[TomlSettings.OUTPUT_DIRECTORY]) + raw_output_directory = str(self.__settings[TomlSettings.OUTPUT_DIRECTORY]) + if is_valid_output_directory(raw_output_directory): + return raw_output_directory else: - LOGGER.error('Falling back to "nvdct"') + LOGGER.error(f'Invalid output directory {raw_output_directory}. Falling back to "nvdct".') return 'nvdct' - @property # --prefix - def prefix(self) -> str | None: - if self.__settings[TomlSettings.PREFIX] is not None: - return str(self.__settings[TomlSettings.PREFIX]) - return None - - @property # --pre-fill-cache + @property # --pre-fetch def pre_fetch(self) -> bool: return bool(self.__settings[TomlSettings.PRE_FETCH]) @@ -324,45 +362,17 @@ class Settings: def quiet(self) -> bool: return bool(self.__settings[TomlSettings.QUIET]) - @property # --skip-l3-cidr-0 - def skip_l3_cidr_0(self) -> bool: - return bool(self.__settings[TomlSettings.SKIP_L3_CIDR_0]) - - @property # --skip-l3-cidr-32-128 - def skip_l3_cidr_32_128(self) -> bool: - return bool(self.__settings[TomlSettings.SKIP_L3_CIDR_32_128]) - - @property # --skip-l3-if - def skip_l3_if(self) -> bool: - return bool(self.__settings[TomlSettings.SKIP_L3_IF]) - - @property # --skip-l3-ip - def skip_l3_ip(self) -> bool: - return bool(self.__settings[TomlSettings.SKIP_L3_IP]) - - @property # --skip-l3-public - def skip_l3_public(self) -> bool: - return bool(self.__settings[TomlSettings.SKIP_L3_PUBLIC]) - @property # --time-format def time_format(self) -> str: - return str(self.__settings[TomlSettings.TIME_FORMAT]) + return str(self.__settings[TomlSettings.TIME_FORMAT].replace('/', '_').replace('..', '__')) - @property # --user-data-file - def user_data_file(self) -> str: - return str(self.__settings[TomlSettings.USER_DATA_FILE]) + @property # --update-config + def update_config(self) -> bool: + return bool(self.__settings[TomlSettings.UPDATE_CONFIG]) # # user data setting # - @property - def customers(self) -> List[str]: - if self.__customers is None: - self.__customers = [ - str(customer) for customer in set(self.__user_data.get(TomlSections.CUSTOMERS, [])) - if is_valid_customer_name(customer)] - LOGGER.info(f'Found {len(self.__customers)} to filter on') - return self.__customers @property def emblems(self) -> Emblems: @@ -378,6 +388,82 @@ class Settings: ) return self.__emblems + @property + def filter_by_customer(self) -> List[str]: + if self.__filter_by_customer is None: + self.__filter_by_customer = [ + str(customer) for customer in set(self.__user_data.get(TomlSections.FILTER_BY_CUSTOMER, [])) + if is_valid_customer_name(customer)] + LOGGER.info(f'Found {len(self.__filter_by_customer)} customer(s) to filter on') + return self.__filter_by_customer + + @property + def filter_by_site(self) -> List[str]: + if self.__filter_by_site is None: + self.__filter_by_site = [str(site) for site in set(self.__user_data.get(TomlSections.FILTER_BY_SITE, [])) if is_valid_site_name(site)] + LOGGER.info(f'Found {len(self.__filter_by_site)} site(s) to filter on') + return self.__filter_by_site + + @staticmethod + def parse_key_value_section(section: str, data: Mapping[str, str]) -> Dict[str, set[str]]: + parsed = { + IncludeExclude.INCLUDE: set(), + IncludeExclude.EXCLUDE: set() + } + for key_value, mode in data.items(): + if mode not in [IncludeExclude.INCLUDE, IncludeExclude.EXCLUDE]: + LOGGER.error( + f'Invalid mode in {section} found: {key_value}={mode} -> line ignored' + ) + continue + match section: + case TomlSections.FILTER_BY_HOST_LABEL: + try: + key, value = key_value.split(':', 1) + except ValueError: + LOGGER.error( + f'Invalid host label found missing ":": {key_value}={mode} -> line ignored' + ) + continue + if ':' in value: + LOGGER.error( + f'Invalid host label found can not contain more than one ":": "{key_value}={mode}" -> line ignored' + ) + continue + parsed[mode].add(f"'{key}' '{value}'") + case TomlSections.FILTER_BY_FOLDER: + parsed[mode].add(f'^/wato/{key_value.strip("/")}/') + case _: + parsed[mode].add(key_value) + return parsed + + @property + def filter_by_folder(self) -> Dict[str, set[str]]: + if self.__filter_by_folder is None: + self.__filter_by_folder = self.parse_key_value_section( + section=TomlSections.FILTER_BY_FOLDER, + data=self.__user_data.get(TomlSections.FILTER_BY_FOLDER, {}) + ) + return self.__filter_by_folder + + @property + def filter_by_host_label(self) -> Dict[str, Set[str]]: + if self.__filter_by_host_label is None: + self.__filter_by_host_label = self.parse_key_value_section( + section=TomlSections.FILTER_BY_HOST_LABEL, + data=self.__user_data.get(TomlSections.FILTER_BY_HOST_LABEL, {}) + ) + return self.__filter_by_host_label + + @property + def filter_by_host_tag(self) -> Dict[str, Set[str]]: + if self.__filter_by_host_tag is None: + self.__filter_by_host_tag = self.parse_key_value_section( + section=TomlSections.FILTER_BY_HOST_TAG, + data=self.__user_data.get(TomlSections.FILTER_BY_HOST_TAG, {}) + ) + return self.__filter_by_host_tag + @property def l2_drop_neighbours(self) -> List[str]: if self.__l2_drop_neighbours is None: @@ -392,14 +478,14 @@ class Settings: return self.__l2_seed_devices @property - def l2_host_map(self) -> Dict[str, str]: - if self.__l2_host_map is None: - self.__l2_host_map = { - str(host): str(replace_host) for host, replace_host in self.__user_data.get( - TomlSections.L2_HOST_MAP, {} - ).items() if is_valid_hostname(host) + def l2_neighbour_to_host_map(self) -> Dict[str, str]: + if self.__l2_neighbour_to_host_map is None: + self.__l2_neighbour_to_host_map = { + str(neighbour): str(host) for neighbour, host in self.__user_data.get( + TomlSections.L2_NEIGHBOUR_TO_HOST_MAP, {} + ).items() if is_valid_hostname(neighbour) } - return self.__l2_host_map + return self.__l2_neighbour_to_host_map @property def l2_neighbour_replace_regex(self) -> List[Tuple[str, str]] | None: @@ -426,7 +512,7 @@ class Settings: for raw_ip_network in self.__user_data.get(TomlSections.L3_IGNORE_IP, []): try: self.__l3_ignore_ip.append(ip_network(raw_ip_network, strict=False)) - except (AddressValueError, NetmaskValueError): + except (AddressValueError, NetmaskValueError, ValueError): LOGGER.error( f'Invalid entry in {TomlSections.L3_IGNORE_IP} found: {raw_ip_network} -> ignored' ) @@ -454,6 +540,7 @@ class Settings: inverted_wildcard = '.'.join( [str(255 - int(octet)) for octet in wildcard.split('.')] ) + self.__l3v4_ignore_wildcard.append(Wildcard( int_ip_address=int(ip_address(raw_ip_address)), int_wildcard=int(ip_address(inverted_wildcard)), @@ -463,7 +550,7 @@ class Settings: ip_address(inverted_wildcard) ) )) - except (AddressValueError, NetmaskValueError): + except (AddressValueError, NetmaskValueError, ValueError): LOGGER.error( f'Invalid entry in {TomlSections.L3V4_IGNORE_WILDCARD} -> {entry} -> ignored' ) @@ -475,24 +562,24 @@ class Settings: return self.__l3v4_ignore_wildcard @property - def l3_replace(self) -> Dict[str, str]: + def l3_replace_networks(self) -> Dict[str, str]: if self.__l3_replace is None: self.__l3_replace = {} - for raw_ip_network, node in self.__user_data.get(TomlSections.L3_REPLACE, {}).items(): + for raw_ip_network, node in self.__user_data.get(TomlSections.L3_REPLACE_NETWORKS, {}).items(): try: _ip_network = ip_network(raw_ip_network) # noqa: F841 - except (AddressValueError, NetmaskValueError): + except (AddressValueError, NetmaskValueError, ValueError): LOGGER.error( - f'Invalid entry in {TomlSections.L3_REPLACE} found: {raw_ip_network} -> line ignored' + f'Invalid entry in {TomlSections.L3_REPLACE_NETWORKS} found: "{raw_ip_network}" = "{node}" -> line ignored' ) continue if not is_valid_hostname(node): - LOGGER.error(f'Invalid node name found: {node} -> line ignored ') + LOGGER.error(f'Invalid node name found: {node} -> line ignored') continue self.__l3_replace[raw_ip_network] = str(node) LOGGER.info( - f'Valid entries in {TomlSections.L3_REPLACE} found: {len(self.__l3_replace)}/' - f'{len(self.__user_data.get(TomlSections.L3_REPLACE, {}))}' + f'Valid entries in {TomlSections.L3_REPLACE_NETWORKS} found: {len(self.__l3_replace)}/' + f'{len(self.__user_data.get(TomlSections.L3_REPLACE_NETWORKS, {}))}' ) return self.__l3_replace @@ -503,7 +590,7 @@ class Settings: for raw_ip_network in self.__user_data.get(TomlSections.L3_SUMMARIZE, []): try: self.__l3_summarize.append(ip_network(raw_ip_network, strict=False)) - except (AddressValueError, NetmaskValueError): + except (AddressValueError, NetmaskValueError, ValueError): LOGGER.error( f'Invalid entry in {TomlSections.L3_SUMMARIZE} -> {raw_ip_network} -> ignored' ) @@ -555,11 +642,11 @@ class Settings: left_host, left_service, right_service, right_host = connection except ValueError: LOGGER.error( - f'Wrong entry in {TomlSections.STATIC_CONNECTIONS} -> {connection} -> ignored' + f'Invalid entry in {TomlSections.STATIC_CONNECTIONS} needs to be 4 tuples: -> {connection} -> ignored' ) continue if not right_host or not left_host: - LOGGER.warning(f'Both hosts must be set, got {connection}') + LOGGER.warning(f'Both hosts must be set, got: {connection} -> ignored') continue if not is_valid_hostname(right_host) or not is_valid_hostname(left_host): continue @@ -575,9 +662,4 @@ class Settings: ) return self.__static_connections - @property - def sites(self) -> List[str]: - if self.__sites is None: - self.__sites = [str(site) for site in set(self.__user_data.get(TomlSections.SITES, [])) if is_valid_site_name(site)] - LOGGER.info(f'Found {len(self.__sites)} to filter on') - return self.__sites + diff --git a/source/bin/nvdct/lib/topologies.py b/source/bin/nvdct/lib/topologies.py index 8594526d6b65ce6b5439b7d2f572e9e899d4e2a5..7a4634f156a401fd898ecec2cb210fb06a0a5529 100755 --- a/source/bin/nvdct/lib/topologies.py +++ b/source/bin/nvdct/lib/topologies.py @@ -6,11 +6,12 @@ # Author: thl-cmk[at]outlook[dot]com # URL : https://thl-cmk.hopto.org # Date : 2024-06-09 -# File : lib/topologies.py +# File : nvdct/lib/topologies.py # 2024-12-22: refactoring topology creation into classes # made L3 topology IP version independent # 2024-12-25: refactoring, moved function into classes +# 2025-01-22: changed: show interface service in L2 and L3 topology instead of Port/Device from HW/SW inventory from abc import abstractmethod from collections.abc import Mapping, MutableMapping, Sequence, MutableSet @@ -24,12 +25,14 @@ from lib.backends import ( ) from lib.constants import ( CACHE_INTERFACES_DATA, + HostFilter, HostLabels, IPVersion, InvPaths, - LOGGER, L2InvColumns, L3InvColumns, + LOGGER, + LiveStatusOperator, TomlSections, ) from lib.settings import ( @@ -57,10 +60,22 @@ class NvObjects: self, host: str, emblem: str | None = None, - name: str | None = None, + display_name: str | None = None, ) -> None: - if name and host in self.nv_objects: - self.nv_objects[host]['name'] = name + """ + Adds a host object to the topology. + - if there is no display name, "host" will be used as display_name + - the emblem for the host will only be used if the host is not a Checkmk host object + - if there is no "emblem" the Checkmk will use its default emblem for missing objects (white question mark on blue ground) + Args: + host: the name of the host + emblem: the emblem for the object + display_name: the name for the host to show in the topology + Returns: + None + """ + if display_name and host in self.nv_objects: + self.nv_objects[host]['name'] = display_name if host not in self.nv_objects: self.host_count += 1 @@ -69,7 +84,7 @@ class NvObjects: metadata: Dict = {} # LOGGER.debug(f'host: {host}, {host_cache.host_exists(host=host)}') if self.host_cache.host_exists(host=host) is True: - LOGGER.debug(f'host: {host} exists') + LOGGER.debug(f'host exists : {host}') link = {'core': host} else: if emblem is not None: @@ -84,7 +99,7 @@ class NvObjects: } self.nv_objects[host] = { - 'name': name if name is not None else host, + 'name': display_name if display_name is not None else host, 'link': link, 'metadata': metadata, } @@ -96,12 +111,29 @@ class NvObjects: service: str, emblem: str | None = None, metadata: Dict | None = None, - name: str | None = None, + display_name: str | None = None, ) -> None: + """ + Adds a service object to the topology. + - the service object will be added as "service@host" to the topology + - service should be the complete service name as in Checkmk, i.e. "Interface X440G2-48p-10G4 Port 52" + - if the host is a Checkmk host object it will be linked to the core + - if there is no display name, "service" will be used as display_name + - the emblem for the service will only be used if the host is not a Checkmk host object + - if there is no "emblem" the Checkmk will use its default emblem for missing objects (white question mark on blue ground) + Args: + host: the name of the host + service: the name of the service + emblem: the emblem for the object + metadata: additional data for the service + display_name: the name for the host to show in the topology + Returns: + None + """ if metadata is None: metadata = {} - if name is None: - name = service + if display_name is None: + display_name = service self.add_host(host=host) service_object = f'{service}@{host}' @@ -119,7 +151,7 @@ class NvObjects: }) self.nv_objects[service_object] = { - 'name': name, + 'name': display_name, 'link': link, 'metadata': metadata, } @@ -133,12 +165,32 @@ class NvObjects: item: str | None, emblem: str | None = None, metadata: Dict | None = None, - name: str | None = None, + display_name: str | None = None, ) -> None: + """ + Adds an interface service object to the topology. + - the interface service object will be added as "service@host" to the topology + - service ist the interface name from the H/W-inventory + - "item" should be the interface item as in Checkmk, i.e. "X440G2-48p-10G4 Port 52" + - if there is no "item" service will be used as "item" + - if the host is a Checkmk host object it will be linked to the core + - if there is no display name, "service" will be used as display_name + - the "emblem" for the service will only be used if the host is not a Checkmk host object + - if there is no "emblem" the Checkmk will use its default emblem for missing objects (white question mark on blue ground) + Args: + host: the name of the host + service: the name of the service + item: the interface name as in Checkmk + emblem: the emblem for the object + metadata: additional data for the service + display_name: the name for the host to show in the topology + Returns: + None + """ if metadata is None: metadata = {} - if name is None: - name = service + if display_name is None: + display_name = service speed = None self.add_host(host=host) @@ -167,7 +219,7 @@ class NvObjects: metadata.update({'native_speed': speed}) self.nv_objects[service_object] = { - 'name': name, + 'name': display_name, 'link': link, 'metadata': metadata, } @@ -181,6 +233,19 @@ class NvObjects: emblem: str, interface: str | None, ) -> None: + """ + Adds an ip address object to the topology. + - the ip address object will be added as "raw_ip_address@interface@host" to the topology + - if interface is None (--l2-skip-if) the object is added as "raw_ip_address@host" + Args: + host: the name of the Checkmk host + raw_ip_address: the ip address as string (i.e. "10.10.10.10") + emblem: the emblem for the object + interface: the interface of the host where the ip address belongs to + Returns: + None + """ + if interface is not None: service_object = f'{raw_ip_address}@{interface}@{host}' else: @@ -251,7 +316,20 @@ class NvObjects: return operational_data return None - def add_ip_network(self, network: str, emblem: str, ) -> None: + def add_ip_network( + self, + network: str, + emblem: str, + ) -> None: + """ + Adds an ip network object to the topology. + - the ip network object will be added as "network" to the topology + Args: + network: the network as string (i.e. "10.10.10.0/24") + emblem: the emblem for the object + Returns: + None + """ if network not in self.nv_objects: self.nv_objects[network] = { 'name': network, @@ -269,7 +347,7 @@ class NvObjects: } } - def add_tooltip_quickinfo(self, nv_object: str, name: str, value: str) -> Dict: + def add_tooltip_quickinfo(self, nv_object: str, display_name: str, value: str) -> Dict: metadata = self.nv_objects[nv_object]['metadata'] if metadata.get('tooltip') is None: metadata['tooltip'] = {} @@ -277,7 +355,7 @@ class NvObjects: metadata['tooltip']['quickinfo'] = [] metadata['tooltip']['quickinfo'].append({ - 'name': name, + 'name': display_name, 'value': value, }) @@ -343,8 +421,8 @@ class NvConnections: # metadata = add_tooltip_quickinfo(metadata, right, right_speed_str) LOGGER.warning( - f'Connection speed mismatch: {left} (speed: {left_speed_str})' - f'<->{right} (speed: {right_speed_str})' + f'Connection speed mismatch: {left} ({left_speed_str})' + f'<->{right} ({right_speed_str})' ) # for duplex/native vlan it might be a good idea to change left/right @@ -358,8 +436,8 @@ class NvConnections: ) LOGGER.warning( - f'Connection duplex mismatch: {left} (duplex: {left_duplex})' - f'<->{right} (duplex: {right_duplex})' + f'Connection duplex mismatch: {left} ({left_duplex})' + f'<->{right} ({right_duplex})' ) if left_native_vlan and right_native_vlan: if left_native_vlan != '0' and right_native_vlan != '0': # ignore VLAN 0 (Native VLAN on routed ports) @@ -372,7 +450,7 @@ class NvConnections: LOGGER.warning( f'Connection native vlan mismatch: ' - f'{left} (vlan: {left_native_vlan})<->{right} (vlan: {right_native_vlan})' + f'{left} ({left_native_vlan})<->{right} ({right_native_vlan})' ) if warning: metadata['line_config'].update({ @@ -452,9 +530,9 @@ class Topology: # try to find the item for an interface def match_entry_with_item(interface_entry: Mapping[str, str], services: Sequence[str]) -> str | None: values = [ - interface_entry.get('name'.strip()), - interface_entry.get('description'.strip()), - interface_entry.get('alias').strip() + interface_entry.get('name', '').strip(), + interface_entry.get('description','').strip(), + interface_entry.get('alias', '').strip() ] for value in values: if value in services: @@ -465,13 +543,13 @@ class Topology: # try alias+index alias_index = str(interface_entry.get('alias')).strip() + ' ' + index if alias_index in services: - LOGGER.info(f'{self.topology} match found by alias-index|{interface_entry}| <-> |{alias_index}|') + LOGGER.info(f'{self.topology} match found by alias-index |{interface_entry}| <-> |{alias_index}|') return alias_index # try description+index description_index = str(interface_entry.get('description')).strip() + ' ' + index if description_index in services: - LOGGER.info(f'{self.topology} match found by description-index|{interface_entry}| <-> |{description_index}|') + LOGGER.info(f'{self.topology} match found by description-index |{interface_entry}| <-> |{description_index}|') return description_index # for index try with padding @@ -489,7 +567,7 @@ class Topology: if f'{value} {index_padded}' in services: return f'{value} {index_padded}' - LOGGER.warning(f'{self.topology} no match found |{interface_entry}| <-> |{services}|') + LOGGER.warning(f'{self.topology} no match found |{interface_entry} | <-> |{services}|') return None # empty host/neighbour should never happen here @@ -534,7 +612,7 @@ class Topology: entry.get('name')) == str(interface).lower(): # Cisco NXOS return match_entry_with_item(entry, interface_items) - LOGGER.warning(msg=f'{self.topology} Device: {host}: service for interface |{interface}| not found') + LOGGER.warning(f'{self.topology} Service for interface not found: {host}, |{interface}|') class TopologyStatic(Topology): @@ -553,7 +631,7 @@ class TopologyStatic(Topology): def create(self): for connection in self.connections: - LOGGER.debug(msg=f'{self.topology} connection from {TomlSections.STATIC_CONNECTIONS}: {connection}') + LOGGER.debug(f'{self.topology} connection from {TomlSections.STATIC_CONNECTIONS}: {connection}') self.nv_objects.add_host( host=connection.right_host, emblem=self.emblems.host_node @@ -609,29 +687,33 @@ class TopologyStatic(Topology): class TopologyL2(Topology): def __init__( self, + display_neighbours: bool, + display_ports: bool, + drop_neighbours: List[str], emblems: Emblems, host_cache: HostCache, - l2_drop_neighbours: List[str], - l2_neighbour_replace_regex: List[Tuple[str, str]], label: str, + neighbour_replace_regex: List[Tuple[str, str]], path_in_inventory: str, seed_devices: Sequence[str], - display_l2_neighbours: bool, + skip_external: bool, ): super().__init__( emblems=emblems, host_cache=host_cache, topology = f'[L2 {label}]', ) - self.l2_drop_neighbours: List[str] = l2_drop_neighbours + self.display_neighbours: bool = display_neighbours + self.display_ports: bool = display_ports + self.drop_neighbours: List[str] = drop_neighbours + self.hosts_done: MutableSet[str] = set() + self.hosts_to_go: MutableSet[str] = set(seed_devices) self.label: str = label + self.neighbour_replace_regex: List[Tuple[str, str]] = neighbour_replace_regex self.neighbour_to_host: MutableMapping[str, str] = {} self.path_in_inventory: str = path_in_inventory - self.hosts_to_go: MutableSet[str] = set(seed_devices) self.raw_neighbour_to_neighbour: Dict[str, str] = {} - self.l2_neighbour_replace_regex: List[Tuple[str, str]] = l2_neighbour_replace_regex - self.hosts_done: MutableSet[str] = set() - self.display_l2_neighbours: bool = display_l2_neighbours + self.skip_external: bool = skip_external def create(self): if not self.hosts_to_go: @@ -641,6 +723,9 @@ class TopologyL2(Topology): while self.hosts_to_go: host = self.hosts_to_go.pop() self.hosts_done.add(host) + if not self.host_cache.is_host_allowed(host): + LOGGER.info(f'{self.topology} host dropped by filter: {host}') + continue topo_data: Sequence[Mapping[str, str]] = self.host_cache.get_data( host=host, item=CacheItems.inventory, path=self.path_in_inventory @@ -651,7 +736,7 @@ class TopologyL2(Topology): inv_data=topo_data, ) - LOGGER.info(msg=f'{self.topology} host done : {host}') + LOGGER.info(f'{self.topology} host done: {host}') def host_from_inventory( self, @@ -661,48 +746,62 @@ class TopologyL2(Topology): for topo_neighbour in inv_data: # check if required data are not empty if not (raw_neighbour := topo_neighbour.get(L2InvColumns.NEIGHBOUR)): - LOGGER.warning(f'{self.topology} incomplete data: neighbour missing {topo_neighbour}') - continue - if not (raw_local_port := topo_neighbour.get(L2InvColumns.LOCALPORT)): - LOGGER.warning(f'{self.topology} incomplete data: local port missing {topo_neighbour}') - continue - if not (raw_neighbour_port := topo_neighbour.get(L2InvColumns.NEIGHBOURPORT)): - LOGGER.warning(f'{self.topology} incomplete data: neighbour port missing {topo_neighbour}') + LOGGER.warning(f'{self.topology} incomplete data neighbour missing: {topo_neighbour}') continue + # stop here if neighbour is dropped anyway... if not (neighbour := self.adjust_raw_neighbour(raw_neighbour)): continue - if neighbour_host := self.host_cache.get_host_from_neighbour(neighbour): - if neighbour_host not in self.hosts_done: - self.hosts_to_go.add(neighbour_host) - else: - neighbour_host = neighbour + if not (raw_local_port := topo_neighbour.get(L2InvColumns.LOCALPORT)): + LOGGER.warning(f'{self.topology} incomplete data local port missing: {topo_neighbour}') + continue + + if not (raw_neighbour_port := topo_neighbour.get(L2InvColumns.NEIGHBOURPORT)): + LOGGER.warning(f'{self.topology} incomplete data neighbour port missing: {topo_neighbour}') + continue - # getting/checking interfaces - local_port = self.get_service_by_interface(host, raw_local_port) - if not local_port: + # get local interface service + if not (local_port := self.get_service_by_interface(host, raw_local_port)): local_port = raw_local_port - LOGGER.warning(msg=f'{self.topology} service not found for local_port: {host}, {raw_local_port}') + LOGGER.warning(f'{self.topology} service not found for local_port: {host}, {raw_local_port}') elif local_port != raw_local_port: - # local_port = raw_local_port # don't reset local_port LOGGER.info( - msg=f'{self.topology} map raw_local_port -> local_port: {host}, {raw_local_port} -> {local_port}' + f'{self.topology} map raw_local_port -> local_port: {host}, {raw_local_port} -> {local_port}' ) - neighbour_port = self.get_service_by_interface(neighbour_host, raw_neighbour_port) - if not neighbour_port: - neighbour_port = raw_neighbour_port - LOGGER.warning( - msg=f'{self.topology} service not found for neighbour port: {neighbour_host}, {raw_neighbour_port}' - ) - elif neighbour_port != raw_neighbour_port: - # neighbour_port = raw_neighbour_port # don't reset neighbour_port - LOGGER.info( - msg=f'{self.topology} map raw_neighbour_port -> neighbour_port: {neighbour_host}, {raw_neighbour_port} ' - f'-> {neighbour_port}' - ) + if neighbour_host := self.host_cache.get_host_from_neighbour( + neighbour=neighbour, + neighbour_id=topo_neighbour.get(L2InvColumns.NEIGHBOURID), + raw_neighbour=raw_neighbour, + ): + if not self.host_cache.is_host_allowed(neighbour_host): + LOGGER.info(f'{self.topology} neighbour dropped by include/exclude filter: {neighbour_host}') + continue + if neighbour_host not in self.hosts_done: + self.hosts_to_go.add(neighbour_host) + + if not (neighbour_port := self.get_service_by_interface(neighbour_host, raw_neighbour_port)): + neighbour_port = raw_neighbour_port + LOGGER.warning( + f'{self.topology} service not found for neighbour port: {neighbour_host}, {raw_neighbour_port}' + ) + elif neighbour_port != raw_neighbour_port: + # neighbour_port = raw_neighbour_port # don't reset neighbour_port + LOGGER.info( + f'{self.topology} map raw_neighbour_port -> ' + f'neighbour_port: {neighbour_host}, {raw_neighbour_port} -> {neighbour_port}' + ) + neighbour_name = raw_neighbour if self.display_neighbours else None + else: + # neighbour is external to cmk, use neighbour id if available as neighbour + if self.skip_external: + continue + if not (neighbour_host := topo_neighbour.get(L2InvColumns.NEIGHBOURID)): + neighbour_host = neighbour + neighbour_port = raw_neighbour_port + neighbour_name = raw_neighbour if self.display_neighbours else neighbour metadata = { 'duplex': topo_neighbour.get('duplex'), 'native_vlan': topo_neighbour.get('native_vlan'), @@ -714,21 +813,21 @@ class TopologyL2(Topology): ) self.nv_objects.add_host( host=neighbour_host, - name=raw_neighbour if self.display_l2_neighbours else None, + display_name=neighbour_name, emblem=self.emblems.host_node, ) self.nv_objects.add_interface( host=str(host), service=str(local_port), metadata=metadata, - name=str(raw_local_port), + display_name=str(raw_local_port) if self.display_ports else str(local_port), item=str(local_port), emblem=self.emblems.service_node, ) self.nv_objects.add_interface( host=str(neighbour_host), service=str(neighbour_port), - name=str(raw_neighbour_port), + display_name=str(raw_neighbour_port) if self.display_ports else str(neighbour_port), item=str(neighbour_port), emblem=self.emblems.service_node, ) @@ -760,22 +859,28 @@ class TopologyL2(Topology): except KeyError: pass - if raw_neighbour in self.l2_drop_neighbours: - LOGGER.info(msg=f'{self.topology} drop in {TomlSections.L2_DROP_NEIGHBOURS}: {raw_neighbour}') + if raw_neighbour in self.drop_neighbours: + LOGGER.info(f'{self.topology} drop in {TomlSections.L2_DROP_NEIGHBOURS}: {raw_neighbour}') self.neighbour_to_host[raw_neighbour] = None return None adjusted_neighbour = raw_neighbour - if self.l2_neighbour_replace_regex: - for re_str, replace_str in self.l2_neighbour_replace_regex: + if self.neighbour_replace_regex: + for re_str, replace_str in self.neighbour_replace_regex: re_neighbour = re_sub(re_str, replace_str, adjusted_neighbour) if not re_neighbour: - LOGGER.info(f'{self.topology} removed by {TomlSections.L2_NEIGHBOUR_REPLACE_REGEX}: (|{adjusted_neighbour}|, |{re_str}|, |{replace_str}|)') + LOGGER.info( + f'{self.topology} removed by {TomlSections.L2_NEIGHBOUR_REPLACE_REGEX}: ' + f'(|{adjusted_neighbour}|, |{re_str}|, |{replace_str}|)' + ) self.neighbour_to_host[raw_neighbour] = None return None if re_neighbour != adjusted_neighbour: - LOGGER.info(f'{self.topology} changed by {TomlSections.L2_NEIGHBOUR_REPLACE_REGEX} |{adjusted_neighbour}| to |{re_neighbour}|') + LOGGER.info( + f'{self.topology} changed by {TomlSections.L2_NEIGHBOUR_REPLACE_REGEX}: ' + f'|{adjusted_neighbour}| to |{re_neighbour}|' + ) adjusted_neighbour = re_neighbour self.neighbour_to_host[raw_neighbour] = adjusted_neighbour @@ -785,6 +890,7 @@ class TopologyL2(Topology): class TopologyL3(Topology): def __init__( self, + display_devices: bool, emblems: Emblems, host_cache: HostCache, ignore_hosts: Sequence[str], @@ -806,42 +912,58 @@ class TopologyL3(Topology): host_cache=host_cache, topology=f'[L3 IPv{version}]' ) + self.diplay_devices = display_devices self.ignore_hosts: Sequence[str] = ignore_hosts self.ignore_ips: Sequence[ip_network] = ignore_ips self.ignore_wildcard: Sequence[Wildcard] = ignore_wildcard self.include_hosts: bool = include_hosts self.replace: Mapping[str, str] = replace + self.show_loopback: bool = include_loopback self.skip_cidr_0: bool = skip_cidr_0 self.skip_cidr_32_128: bool = skip_cidr_32_128 self.skip_if: bool = skip_if self.skip_ip: bool = skip_ip self.skip_public: bool = skip_public - self.show_loopback: bool = include_loopback self.summarize: Sequence[ip_network] = summarize self.version = version def create(self): match self.version: case IPVersion.IPv4: - host_list: Sequence[str] = self.host_cache.get_hosts_by_label(HostLabels.L3V4_ROUTER) + host_list: Sequence[str] = ( + self.host_cache.query_hosts_by_filter( + HostFilter.LABELS, HostLabels.L3V4_ROUTER, LiveStatusOperator.EQUAL + ) + ) if self.include_hosts: - host_list += self.host_cache.get_hosts_by_label(HostLabels.L3V4_HOSTS) + host_list += self.host_cache.query_hosts_by_filter( + HostFilter.LABELS, HostLabels.L3V4_HOSTS, LiveStatusOperator.EQUAL + ) case IPVersion.IPv6: - host_list: Sequence[str] = self.host_cache.get_hosts_by_label(HostLabels.L3V6_ROUTER) + host_list: Sequence[str] = self.host_cache.query_hosts_by_filter( + HostFilter.LABELS, HostLabels.L3V6_ROUTER, LiveStatusOperator.EQUAL + ) if self.include_hosts: - host_list += self.host_cache.get_hosts_by_label(HostLabels.L3V6_HOSTS) + host_list += self.host_cache.query_hosts_by_filter( + HostFilter.LABELS, HostLabels.L3V6_HOSTS, LiveStatusOperator.EQUAL + ) case _: host_list = [] - LOGGER.debug(f'{self.topology} host to work on: {host_list}') + # check against filter list (host labels/attributes include/exclude) + pre_filter_len = len(host_list) + host_list = [host for host in host_list if self.host_cache.is_host_allowed(host)] + LOGGER.info(f'{self.topology} # hosts allowed: {len(host_list)}/{pre_filter_len}') + + LOGGER.debug(f'{self.topology} host(s) to work on: {host_list}') if not host_list: LOGGER.error( - msg=f'{self.topology} No (routing capable) host found. Check if "inv_ip_addresses.mkp" ' - 'added/enabled and inventory and host label discovery has run.' + f'{self.topology} No (routing capable) host found. Check if "inv_ip_addresses.mkp" ' + 'added/enabled and inventory and host label discovery has run.' ) return @@ -898,7 +1020,7 @@ class TopologyL3(Topology): continue if self.is_ignore_ip(interface_address.ip.compressed): - LOGGER.info(f'{self.topology} rop IP in {TomlSections.L3_IGNORE_IP}: {host}, {interface_address.compressed}') + LOGGER.info(f'{self.topology} drop IP in {TomlSections.L3_IGNORE_IP}: {host}, {interface_address.compressed}') continue if self.is_ignore_wildcard(interface_address.ip.compressed): @@ -914,7 +1036,7 @@ class TopologyL3(Topology): network = f'{interface_address.network.compressed}' if network in self.replace.keys(): - LOGGER.info(f'{self.topology} Replaced network in {TomlSections.L3_REPLACE}: {network} -> {self.replace[network]}') + LOGGER.info(f'{self.topology} Replaced network in {TomlSections.L3_REPLACE_NETWORKS}: {network} -> {self.replace[network]}') network = self.replace[network] emblem = self.emblems.l3_replace @@ -939,7 +1061,12 @@ class TopologyL3(Topology): self.nv_connections.add_connection(left=f'{host}', right=f'{interface_address.ip.compressed}@{host}') self.nv_connections.add_connection(left=network, right=f'{interface_address.ip.compressed}@{host}') elif self.skip_if is False and self.skip_ip is True: - self.nv_objects.add_interface(host=host, service=device, item=item) + self.nv_objects.add_interface( + display_name=device if self.diplay_devices else item, + host=host, + item=item, + service=device, + ) self.nv_objects.add_tooltip_quickinfo( f'{device}@{host}', 'IP-address', interface_address.ip.compressed ) @@ -952,7 +1079,12 @@ class TopologyL3(Topology): raw_ip_address=interface_address.ip.compressed, emblem=self.emblems.ip_address, ) - self.nv_objects.add_interface(host=host, service=device, item=item) + self.nv_objects.add_interface( + display_name=device if self.diplay_devices else item, + host=host, + item=item, + service=device, + ) self.nv_connections.add_connection( left=host, right=f'{device}@{host}') self.nv_connections.add_connection( @@ -967,7 +1099,7 @@ class TopologyL3(Topology): for network in self.summarize: try: if ip_network(raw_ip_address).subnet_of(network): - LOGGER.debug(f'{self.topology} IP address {raw_ip_address} is in subnet -> ({network})') + LOGGER.debug(f'{self.topology} IP address is in subnet: {raw_ip_address} -> ({network})') return network.compressed except TypeError: pass @@ -977,7 +1109,7 @@ class TopologyL3(Topology): for ip in self.ignore_ips: try: if ip_network(raw_ip_address).subnet_of(ip): - LOGGER.debug(f'{self.topology} IP address {raw_ip_address} is in ignore list -> ({ip})') + LOGGER.debug(f'{self.topology} IP address is in ignore list: {raw_ip_address} -> ({ip})') return True except TypeError: continue @@ -988,8 +1120,8 @@ class TopologyL3(Topology): for wildcard in self.ignore_wildcard: if int_ip_address & wildcard.int_wildcard == wildcard.bit_pattern: LOGGER.debug( - f'{self.topology} IP address {raw_ip_address} matches ignore wildcard ' - f'list ({wildcard.ip_address}/{wildcard.wildcard})' + f'{self.topology} IP address matches ignore wildcard list: ' + f'{raw_ip_address} -> ({wildcard.ip_address}/{wildcard.wildcard})' ) return True return False diff --git a/source/bin/nvdct/lib/utils.py b/source/bin/nvdct/lib/utils.py index 9d1514931eba64b82f260820bb880e328fe33a8b..4eb62eeee370b7a302cf9b3812d6ec1f55a9ea69 100755 --- a/source/bin/nvdct/lib/utils.py +++ b/source/bin/nvdct/lib/utils.py @@ -13,7 +13,7 @@ from json import dumps, loads from logging import disable as log_off, Formatter, getLogger, StreamHandler from logging.handlers import RotatingFileHandler from pathlib import Path -from re import match as re_match, findall as re_findall, sub as re_sub +from re import match as re_match from socket import socket, AF_UNIX, AF_INET, SOCK_STREAM, SHUT_WR from sys import stdout, exit as sys_exit from time import time as now_time @@ -21,18 +21,12 @@ from tomllib import loads as toml_loads, TOMLDecodeError from typing import Dict, List, TextIO from lib.constants import ( - Backends, CMK_SITE_CONF, - Case, + CONFIG_FILE, DATAPATH, - EmblemValues, - EmblemNames, ExitCodes, LOGGER, - LogLevels, OMD_ROOT, - TomlSections, - TomlSettings, ) @@ -64,7 +58,7 @@ def get_data_form_live_status(query: str) -> Dict | List | None: return None -def get_data_from_toml(file: str) -> Dict: +def get_data_from_toml(file: str, check_only:bool = False) -> Dict| bool: data = {} toml_file = Path(file) if toml_file.exists(): @@ -72,22 +66,28 @@ def get_data_from_toml(file: str) -> Dict: data = toml_loads(toml_file.read_text()) except TOMLDecodeError as e: LOGGER.exception( - msg=f'ERROR: data file {toml_file} is not in valid TOML format! ({e}),' - f' (see https://toml.io/en/)' + f'Config file {toml_file} is not in valid TOML format! ({e}),' + f' (see https://toml.io/en/)' ) sys_exit(ExitCodes.BAD_TOML_FORMAT) - + if check_only: + message: str = f'Could read/parse the data from {toml_file}' + # will not be logged as logger si not initialized yet + # LOGGER.info(message) + print(message) + sys_exit(ExitCodes.OK) else: - LOGGER.error(msg=f'WARNING: User data {file} not found.') - LOGGER.info(msg=f'TOML file read: {file}') - LOGGER.debug(msg=f'Data from toml file: {data}') + LOGGER.error(f'\nConfig {file} not found. Falling back to default config file "{CONFIG_FILE}"') + # will not be logged as logger ist not initialized. + # LOGGER.info(f'Config file read: {file}') + # LOGGER.debug(f'Data from config file: {data}') return data def rm_tree(root: Path) -> None: # safety if not str(root).startswith(DATAPATH): - LOGGER.warning(msg=f"WARNING: bad path to remove, {str(root)}, don\'t delete it.") + LOGGER.warning(f"Bad path to remove, {str(root)}, don\'t delete it.") return for p in root.iterdir(): if p.is_dir(): @@ -98,6 +98,8 @@ def rm_tree(root: Path) -> None: def remove_old_data(keep: int, min_age: int, raw_path: str, protected: Sequence[str]) -> None: + if not keep: + return path: Path = Path(raw_path) default_topo = path.joinpath('default') directories = [str(directory) for directory in list(path.iterdir())] @@ -119,7 +121,7 @@ def remove_old_data(keep: int, min_age: int, raw_path: str, protected: Sequence[ except ValueError: pass else: - LOGGER.info(msg=f'Protected topology: {directory}, will not be deleted.') + LOGGER.info(f'Protected topology: {directory}, will not be deleted.') if len(directories) < keep < 1: return @@ -137,8 +139,8 @@ def remove_old_data(keep: int, min_age: int, raw_path: str, protected: Sequence[ entry = topo_age.pop() if min_age * 86400 > now_time() - entry: LOGGER.info( - msg=f'Topology "{Path(topo_by_age[entry]).name}' - f'" not older then {min_age} day(s). not deleted.' + f'Topology "{Path(topo_by_age[entry]).name}' + f'" not older then {min_age} day(s). not deleted.' ) return LOGGER.info(f'delete old topology: {topo_by_age[entry]}') @@ -182,26 +184,6 @@ def save_data_to_file( Path(f'{DATAPATH}/default').symlink_to(target=Path(path), target_is_directory=True) -def is_mac_address(mac_address: str) -> bool: - """ - Checks if mac_address is a valid MAC address. - Will only accept MAC address in the form "AA:BB:CC:DD:EE:FF" (lower case is also ok). - Args: - mac_address: the MAC address to check - - Returns: - True if mac_address is a valid MAC address - False if mac_address not a valid MAC address - """ - re_mac_pattern = '([0-9A-Z]{2}\\:){5}[0-9A-Z]{2}' - if re_match(re_mac_pattern, mac_address.upper()): - LOGGER.debug(msg=f'mac: {mac_address}, match') - return True - else: - LOGGER.debug(msg=f'mac: {mac_address}, no match') - return False - - def is_list_of_str_equal(list1: List[str], list2: List[str]) -> bool: """ Compares two list of strings. Before comparing the list will internal sorted. @@ -228,7 +210,7 @@ def is_valid_hostname(host: str) -> bool: if re_match(re_host_pattern, host): return True else: - LOGGER.error(f'Invalid hostname found: {host}') + LOGGER.error(f'Invalid hostname found: |{host}|') return False @@ -237,7 +219,7 @@ def is_valid_site_name(site: str) -> bool: if re_match(re_host_pattern, site): return True else: - LOGGER.error(f'Invalid site name found: {site}') + LOGGER.error(f'Invalid site name found: |{site}|') return False @@ -246,7 +228,7 @@ def is_valid_customer_name(customer: str) -> bool: if re_match(re_host_pattern, customer): return True else: - LOGGER.error(f'Invalid customer name found: {customer}') + LOGGER.error(f'Invalid customer name found: |{customer}|') return False @@ -255,18 +237,12 @@ def is_valid_output_directory(directory: str) -> bool: re_host_pattern = r'^[0-9a-z-A-Z\.\-\_\:]{1,30}$' if re_match(re_host_pattern, directory): return True + return True else: LOGGER.error(f'Invalid output directory name found: {directory}') return False -def is_valid_log_file(log_file: str) -> bool: - if not log_file.startswith(f'{OMD_ROOT}/var/log/'): - LOGGER.error(f'Logg file needs to be under "{OMD_ROOT}/var/log/"! Got {Path(log_file).absolute()}') - return False - return True - - def compare_dicts(dict1: Mapping, dict2: Mapping) -> bool: # check top level keys if not is_list_of_str_equal(list(dict1.keys()), list(dict2.keys())): @@ -317,24 +293,15 @@ def is_equal_with_default(data: Mapping, file: str) -> bool: return compare_dicts(data, default_data) return False -def get_attributes_from_inventory(inventory: Dict[str, object], raw_path: str): - # print(inventory['Nodes']['networking']['Nodes']['lldp_cache']['Attributes']['Pairs']) - path: List[str] = ('Nodes,' + ',Nodes,'.join(raw_path.split(',')) + ',Attributes,Pairs').split(',') - try: - table = inventory.copy() - except AttributeError: - return None - for m in path: - try: - table = table[m] - except KeyError: - LOGGER.info(msg=f'Inventory attributes for {path} not found') - return None - return dict(table) +def get_data_from_inventory(inventory: Dict[str, object], raw_path: str) -> List | Dict | None: + # path: List[str] = ('Nodes,' + ',Nodes,'.join(raw_path.split(',')) + ',Table,Rows').split(',') + split_path: List[str] = raw_path.split(',') + path: MutableSequence[str] = [] + for entry in split_path[0:-2]: + path += ['Nodes', entry] + path += split_path[-2:] -def get_table_from_inventory(inventory: Dict[str, object], raw_path: str) -> List | None: - path: List[str] = ('Nodes,' + ',Nodes,'.join(raw_path.split(',')) + ',Table,Rows').split(',') try: table = inventory.copy() except AttributeError: @@ -343,9 +310,10 @@ def get_table_from_inventory(inventory: Dict[str, object], raw_path: str) -> Lis try: table = table[m] except KeyError: - LOGGER.info(msg=f'Inventory table for {path} not found') + LOGGER.info(f'Inventory table not found for: {path}') return None - return list(table) + + return table def configure_logger(log_file: str, log_level: int, log_to_console: bool) -> None: @@ -396,66 +364,3 @@ class StdoutQuiet: def flush(self): self._org_stdout.flush() - - -def adjust_toml(toml_file: str): - fix_options = { - 'DROP_HOSTS': TomlSections.L2_DROP_NEIGHBOURS, - 'HOST_MAP': TomlSections.L2_HOST_MAP, - 'L2_DROP_HOSTS': TomlSections.L2_DROP_NEIGHBOURS, # needs to be before DROP_HOST - 'L3V4_IGNORE_HOSTS': TomlSections.L3_IGNORE_HOSTS, - 'L3V4_IGNORE_IP': TomlSections.L3_IGNORE_IP, - 'L3V4_IRNORE_WILDCARD': TomlSections.L3V4_IGNORE_WILDCARD, - 'L3V4_REPLACE': TomlSections.L3_REPLACE, - 'L3V3_REPLACE': TomlSections.L3_REPLACE, - 'L3V4_SUMMARIZE': TomlSections.L3_SUMMARIZE, - 'SEED_DEVICES': TomlSections.L2_SEED_DEVICES, - 'icon_missinc': EmblemValues.ICON_ALERT_UNREACHABLE, - 'icon_missing': EmblemValues.ICON_ALERT_UNREACHABLE, - 'l3v4_replace': EmblemNames.L3_REPLACE, - 'l3v4_summarize': EmblemNames.L3_SUMMARIZE, - 'keep_domain = true': f'{TomlSettings.REMOVE_DOMAIN} = false', - 'keep_domain = false': f'{TomlSettings.REMOVE_DOMAIN} = true', - } - old_options = { - 'lowercase': f'{TomlSettings.CASE} = {Case.LOWER}', - 'uppercase': f'{TomlSettings.CASE} = {Case.UPPER}', - f'FILESYSTEM': {Backends.MULTISITE}, - 'debug': f'{TomlSettings.LOG_LEVEL} = {LogLevels.DEBUG}', - 'keep_domain': f'{TomlSettings.REMOVE_DOMAIN} = true/false' - } - changed: bool = False - org_file = Path(toml_file) - if org_file.exists(): - print(f'Checking file.: {org_file.name}') - org_content: str = org_file.read_text() - content: str = org_content - for old, new in fix_options.items(): - re_pattern = f'\\b{old}\\b' - count = len(re_findall(re_pattern, org_content)) - if count > 0: - changed = True - content = re_sub(re_pattern, new, content) - print(f'Found value...: "{old}" {count} times, replaced by "{new}"') - - for old, new in old_options.items(): - re_pattern = f'\\b{old}\\b' - count = len(re_findall(re_pattern, org_content)) - if count > 0: - print(f'Obsolete......: "{old}", use "{new}" instead') - - if changed: - backup_file = Path(f'{toml_file}.backup') - if not backup_file.exists(): - org_file.rename(backup_file) - print(f'Renamed TOML..: {backup_file.name}') - new_file = Path(toml_file) - new_file.open('w').write(content) - print(f'Written fixed.: {new_file.name}') - else: - print( - f'Can not create backup file {backup_file.name}, file exists. Aborting!\n' - f'Nothing has changed.' - ) - else: - print('Finished......: Nothing found to fix.') diff --git a/source/bin/nvdct/nvdct.py b/source/bin/nvdct/nvdct.py index 8d93b27f1fab784df07622d786bfaf99cf23915d..48d47168211fa332708f76952c2b8c6ba03f47c3 100755 --- a/source/bin/nvdct/nvdct.py +++ b/source/bin/nvdct/nvdct.py @@ -6,7 +6,7 @@ # Author: thl-cmk[at]outlook[dot]com # URL : https://thl-cmk.hopto.org # Date : 2023-10-08 -# File : nvdct_data.py +# File : nvdct/nvdct.py # 2023-10-10: initial release # 2023-10-16: added options --keep-max and --min-age @@ -171,8 +171,56 @@ # fixed: cleanup -> remove the oldest topologies not the newest # INCOMPATIBLE: removed: CUSTOM_LAYERS # refactoring constants -# - +# 2024-12-30: added support for lldp device id/name and cdp name from global device data to map L2 neighbour to CMK host +# 2025-01-01: added support for filter by host label/tag +# INCOMPATIBLE: +# cli options: +# changed: +# "--case" to "--l2-case" +# "--display-l2-neighbours" to "--l2-display-neighbours" +# "--include-l3-hosts" to "--l3-include-hosts" +# "--include-l3-loopback" to "--l3-include-loopback" +# "--keep" to "--keep-max-topologies" +# "--min-age" to "--min-topology-age" +# "--prefix" to "--l2-prefix" +# "--remove-domain" to "--l2-remove-domain" +# "--skip-l3-cidr-0" to "--l3-skip-cidr-0" +# "--skip-l3-cidr-32-128" to "--l3-skip-cidr-32-128" +# "--skip-l3-if" to "--l3-skip-if" +# "--skip-l3-ip" to "--l3-skip-ip" +# "--skip-l3-public" to "--l3-skip-public" +# "--check-user-data-only" ot "--check-config" +# "-u"/"--user-data-file" to "-c"/"-config" +# removed "-p" use "--l2-prefix" instead +# TOML file: +# changed: +# "L2_HOST_MAP" to "L2_NEIGHBOUR_TO_HOST_MAP" +# "L3_REPLACE" to "L3_REPLACE_NETWORKS" +# "case" to "l2_case" +# "display_l2_neighbours" to "l2_display_neighbours" +# "include_l3_hosts" to "l3_include_hosts" +# "include_l3_loopback" to "l3_include_loopback" +# "keep" to "keep_max_topologies" +# "min-age" to "min_topology_age" +# "prefix" to "l2_prefix" +# "remove_domain" to "l2_remove_domain" +# "skip_l3_cidr_0" to "l3_skip_cidr_0" +# "skip_l3_cidr_32_128" to "l3_skip_cidr_32_128" +# "skip_l3_if" to "l3_skip_if" +# "skip_l3_ip" to "l3_skip_ip" +# "skip_l3_public" to "l3_skip_public" +# 2025-01-05: added "AUTO" to --l2-case parameters +# INCOMPATIBLE: changed --l2-remove-domain from bool to "OFF" | "ON" | "AUTO" +# added support for filter by folder +# 2024-01-06: added option "--l2-skip-external" +# 2024-01-07: added option "INSENSITIVE" to --l2-case +# 2025-01-11: INCOMPATIBLE: changed "--adjust-toml" -> "--update-config" +# 2025-01-18: INCOMPATIBLE: changed "CUSTOMERS = []" -> "FILTER_BY_CUSTOMER = []" +# "SITES = []" -> "FILTER_BY_SITE = []" +# 2025-01-21: added support for Post requests (Werk #17003) +# fixed REST API query for interface services +# 2025-01-24: added option --l2-display-ports, --l3-display-devices +# 2025-02-05: added option "OFF" to --l2-case # # creating topology data json from inventory data # @@ -181,12 +229,12 @@ # https://forum.checkmk.com/t/network-visualization/41680 (from 2023-10-05) # https://exchange.checkmk.com/p/network-visualization (from 2023-10-05) # -# NOTE: the topology_data configuration (layout etc.) is saved under ~/var/check_mk/topology +# NOTE: the topology_data configuration (layout etc.) is saved under ~/var/check_mk/topology/ # # The inventory data could be created with my CDP/LLDP/IP Address/Interface nane inventory plugins: # CDP.....: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_cdp_cache # LLDP....: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_lldp_cache -# L3v4....: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_ip_addresses +# L3......: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_ip_addresses # : https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_lnx_if_ip # : https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_win_if_ip # IF Name.: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_ifname @@ -277,6 +325,8 @@ from time import strftime, time_ns from typing import List from lib.args import parse_arguments + +from lib.update_config import update_config from lib.backends import ( HostCache, HostCacheLiveStatus, @@ -286,15 +336,18 @@ from lib.backends import ( from lib.constants import ( Backends, DATAPATH, - URLs, + ExitCodes, + HostFilter, HostLabels, IPVersion, InvPaths, LOGGER, Layers, + LiveStatusOperator, NVDCT_VERSION, TomlSections, TomlSettings, + URLs, ) from lib.settings import Settings from lib.topologies import ( @@ -303,9 +356,7 @@ from lib.topologies import ( TopologyStatic, ) from lib.utils import ( - ExitCodes, StdoutQuiet, - adjust_toml, configure_logger, remove_old_data, ) @@ -323,7 +374,7 @@ def main(): log_level=settings.loglevel, ) # always logg start and end of a session (except --log-level OFF) - LOGGER.critical(msg='Data creation started') + LOGGER.critical('Data creation started') print('') print( @@ -334,8 +385,8 @@ def main(): print('') print(f'Start time....: {strftime(settings.time_format)}') - if settings.fix_toml: - adjust_toml(settings.user_data_file) + if settings.update_config: + update_config(settings.config) print(f'Time taken....: {(time_ns() - start_time) / 1e9}/s') print(f'End time......: {strftime(settings.time_format)}') print('') @@ -344,37 +395,30 @@ def main(): sys.exit() match settings.backend: + case Backends.MULTISITE: + host_cache: HostCache = HostCacheMultiSite( + customers=settings.filter_by_customer, + filter_customers=settings.filter_customers, + filter_sites=settings.filter_sites, + pre_fetch=settings.pre_fetch, + sites=settings.filter_by_site, + ) case Backends.RESTAPI: host_cache: HostCache = HostCacheRestApi( - pre_fetch=settings.pre_fetch, api_port=settings.api_port, filter_sites=settings.filter_sites, - sites=settings.sites, - ) - case Backends.MULTISITE: - host_cache: HostCache = HostCacheMultiSite( pre_fetch=settings.pre_fetch, - filter_sites=settings.filter_sites, - sites=settings.sites, - filter_customers=settings.filter_customers, - customers=settings.customers, + sites=settings.filter_by_site, ) case Backends.LIVESTATUS: host_cache: HostCache = HostCacheLiveStatus( pre_fetch=settings.pre_fetch, ) case _: - LOGGER.error(msg=f'Backend {settings.backend} not implemented') + LOGGER.error(f'Backend {settings.backend} not implemented') host_cache: HostCache | None = None # to keep linter happy sys.exit(ExitCodes.BACKEND_NOT_IMPLEMENTED) - host_cache.init_neighbour_to_host( - case=settings.case, - l2_host_map=settings.l2_host_map, - prefix=settings.prefix, - remove_domain=settings.remove_domain, - ) - jobs: MutableSequence = [] pre_fetch_layers: List[str] = [] pre_fetch_host_list: List[str] = [] @@ -390,6 +434,7 @@ def main(): case Layers.CDP | Layers.LLDP: jobs.append(layer) host_cache.add_inventory_path(InvPaths.CDP if layer == Layers.CDP else InvPaths.LLDP) + host_cache.add_inventory_path(InvPaths.CDP_GLOBAL if layer == Layers.CDP else InvPaths.LLDP_GLOBAL) pre_fetch_layers.append(HostLabels.CDP if layer == Layers.CDP else HostLabels.LLDP) case _: LOGGER.warning(f'Unknown layer {layer} dropped.') @@ -398,16 +443,25 @@ def main(): if not jobs: message = ( f'No layer to work on. Please configura at least one layer (i.e. CLI option "-l {Layers.CDP}")\n' - f'See {settings.user_data_file} -> {TomlSections.SETTINGS} -> {TomlSettings.LAYERS}' + f'See {settings.config} -> {TomlSections.SETTINGS} -> {TomlSettings.LAYERS}' ) LOGGER.warning(message) print(message) sys.exit(ExitCodes.NO_LAYER_CONFIGURED) + # init filter lists before pre-fetch + host_cache.init_filter_lists( + filter_by_folder=settings.filter_by_folder, + filter_by_host_label=settings.filter_by_host_label, + filter_by_host_tag=settings.filter_by_host_tag, + ) + if settings.pre_fetch: LOGGER.info('Pre fill cache...') for host_label in pre_fetch_layers: - if host_list := host_cache.get_hosts_by_label(host_label): + if host_list := host_cache.filter_host_list( + host_cache.query_hosts_by_filter(HostFilter.LABELS, host_label, LiveStatusOperator.EQUAL) + ): pre_fetch_host_list = list(set(pre_fetch_host_list + list(host_list))) LOGGER.info(f'Fetching data for {len(pre_fetch_host_list)} hosts start') print(f'Prefetch start: {strftime(settings.time_format)}') @@ -416,6 +470,14 @@ def main(): LOGGER.info(f'Fetching data for {len(pre_fetch_host_list)} hosts end') print(f'Prefetch end..: {strftime(settings.time_format)}') + # must not be used before pre-fetch is done + host_cache.init_neighbour_to_host_map( + case=settings.l2_case, + l2_host_map=settings.l2_neighbour_to_host_map, + prefix=settings.l2_prefix, + remove_domain=settings.l2_remove_domain, + ) + for job in jobs: match job: case Layers.STATIC: @@ -428,21 +490,22 @@ def main(): case Layers.L3V4: label = job topology = TopologyL3( + display_devices=settings.l3_display_devices, emblems=settings.emblems, host_cache=host_cache, ignore_hosts=settings.l3_ignore_hosts, ignore_ips=settings.l3_ignore_ips, ignore_wildcard=settings.l3v4_ignore_wildcard, - include_hosts=settings.include_l3_hosts, - replace=settings.l3_replace, - skip_cidr_0=settings.skip_l3_cidr_0, - skip_cidr_32_128=settings.skip_l3_cidr_32_128, - skip_if=settings.skip_l3_if, - skip_ip=settings.skip_l3_ip, - skip_public=settings.skip_l3_public, - include_loopback=settings.include_l3_loopback, + include_hosts=settings.l3_include_hosts, + include_loopback=settings.l3_include_loopback, + replace=settings.l3_replace_networks, + skip_cidr_0=settings.l3_skip_cidr_0, + skip_cidr_32_128=settings.l3_skip_cidr_32_128, + skip_if=settings.l3_skip_if, + skip_ip=settings.l3_skip_ip, + skip_public=settings.l3_skip_public, summarize=settings.l3_summarize, - version=IPVersion.IPv4 if job == Layers.L3V4 else IPVersion.IPv6 + version=IPVersion.IPv4 if job == Layers.L3V4 else IPVersion.IPv6, ) case Layers.CDP | Layers.LLDP: label = job @@ -453,16 +516,20 @@ def main(): host_label = HostLabels.LLDP inv_path = InvPaths.LLDP if not (seed_devices := settings.l2_seed_devices): - seed_devices = host_cache.get_hosts_by_label(host_label) + seed_devices = host_cache.query_hosts_by_filter( + HostFilter.LABELS, host_label, LiveStatusOperator.EQUAL + ) topology = TopologyL2( + display_neighbours=settings.l2_display_neighbours, + display_ports = settings.l2_display_ports, + drop_neighbours=settings.l2_drop_neighbours, emblems=settings.emblems, host_cache=host_cache, - l2_drop_neighbours=settings.l2_drop_neighbours, - l2_neighbour_replace_regex=settings.l2_neighbour_replace_regex, label=label, + neighbour_replace_regex=settings.l2_neighbour_replace_regex, path_in_inventory=inv_path, seed_devices=seed_devices, - display_l2_neighbours=settings.display_l2_neighbours, + skip_external=settings.l2_skip_external, ) case _: LOGGER.warning(f'Unknown layer {job}, ignoring.') @@ -487,13 +554,13 @@ def main(): f'Devices/Objects/Connections added {topology.nv_objects.host_count}/' f'{len(topology.nv_objects.nv_objects)}/{len(topology.nv_connections.nv_connections)}' ) - LOGGER.info(msg=f'{pre_message} {message}') + LOGGER.info(f'{pre_message} {message}') print(message) - if settings.keep: + if settings.keep_max_topologies: remove_old_data( - keep=settings.keep, - min_age=settings.min_age, + keep=settings.keep_max_topologies, + min_age=settings.min_topology_age, raw_path=DATAPATH, protected=settings.protected_topologies, ) diff --git a/source/packages/nvdct b/source/packages/nvdct index 32ad046946260c2eb190ea09a64f9b71d419de41..d277e55d94d95f7fc2f230b44f2e30ff6993ff98 100644 --- a/source/packages/nvdct +++ b/source/packages/nvdct @@ -47,7 +47,7 @@ 'htdocs/images/icons/location_80.png']}, 'name': 'nvdct', 'title': 'Network Visualization Data Creation Tool (NVDCT)', - 'version': '0.9.7-20241230', + 'version': '0.9.8-20250205', 'version.min_required': '2.3.0b1', 'version.packaged': 'cmk-mkp-tool 0.2.0', 'version.usable_until': '2.4.0p1'}