diff --git a/README.md b/README.md index 075d2465fc415b0fd135719de11880d240634ef1..f4a91797c992be185ee01b408002fd1ea8e05d1c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[PACKAGE]: ../../raw/master/mkp/nvdct-0.9.5-20241217.mkp "nvdct-0.9.5-20241217.mkp" +[PACKAGE]: ../../raw/master/mkp/nvdct-0.9.6-20241222.mkp "nvdct-0.9.6-20241222.mkp" # Network Visualization Data Creation Tool (NVDCT) This script creates the topology data file needed for the [Checkmk Exchange Network visualization](https://exchange.checkmk.com/p/network-visualization) plugin.\ diff --git a/mkp/nvdct-0.9.6-20241222.mkp b/mkp/nvdct-0.9.6-20241222.mkp new file mode 100644 index 0000000000000000000000000000000000000000..a245e33f052657a47f08c939ec5ed9bfe5ec7575 Binary files /dev/null and b/mkp/nvdct-0.9.6-20241222.mkp differ diff --git a/source/bin/nvdct/conf/nvdct.toml b/source/bin/nvdct/conf/nvdct.toml index 414861aab4269dcd02d9d3255bb0158edfa258e5..e907197e6ce3c6c378211553d06c4ed0b19b3b3e 100755 --- a/source/bin/nvdct/conf/nvdct.toml +++ b/source/bin/nvdct/conf/nvdct.toml @@ -26,25 +26,27 @@ L2_DROP_HOSTS = [ # "a nother invalid name", ] -# hosts will be ignored in L3v4 topology +# hosts will be ignored in L3 topologies # [0-9-a-zA-Z\.\_\-]{1,253} -> host -L3V4_IGNORE_HOSTS = [ +L3_IGNORE_HOSTS = [ # "host1", # "host2", ] -# drop IP address that match network -L3V4_IGNORE_IP = [ +# drop IP address that matches ip/network +L3_IGNORE_IP = [ # "192.168.100.231", # "192.168.100.0/16", # "192.168.150.0/255.255.255.0", + # "fd00::1" + # "fd00::/8" ] # ignore IPs by wildcard # if comparing an ip address: # each 0 bit in the wildcad has to be exacly as in the pattern # each 1 bit in the wildacrd will be ignored -L3V4_IRNORE_WILDCARD = [ +L3V4_IGNORE_WILDCARD = [ # [ pattern , wildcard ] # ["172.17.0.1", "0.0.255.0"], # ignore all IPs ending with 1 from 172.17.128.0/16 # ["172.17.128.0", "0.0.127.3"], # ignore all IPs ending with 0-3 from 172.17.128.0/17 @@ -52,11 +54,12 @@ L3V4_IRNORE_WILDCARD = [ ] # networks to summarize -L3V4_SUMMARIZE = [ +L3_SUMMARIZE = [ # "10.193.172.0/24", # "10.194.8.0/23", # "10.194.12.0/24", # "10.194.115.0/255.255.255.0", + # "fd00::/8" ] # topologies will not be deleted by "--keep" @@ -120,11 +123,12 @@ SITES = [ # replace network objects (takes place after summarize) # [0-9-a-zA-Z\.\_\-]{1,253} -> host -[L3V4_REPLACE] +[L3_REPLACE] # "10.193.172.0/24" = "MPLS" # "10.194.8.0/23" = "MPLS" # "10.194.12.0/24" = "MPLS" # "10.194.115.0/24" = "MPLS" +# "fc00::/7" = "Unique-local" [EMBLEMS] # can use misc icons from CMK or upload your own in the misc category @@ -133,8 +137,8 @@ SITES = [ # "host_node" = "icon_missinc" # "ip_address" = "ip-address_80" # "ip_network" = "ip-network_80" -# "l3v4_replace" = "icon_plugins_cloud" -# "l3v4_summarize" = "icon_aggr" +# "l3_replace" = "icon_plugins_cloud" +# "l3_summarize" = "icon_aggr" # "service_node" = "icon_missing" [MAP_SPEED_TO_THICKNESS] @@ -158,13 +162,13 @@ SITES = [ # filter_customers = "INCLUDE" |"EXCLUDE" # filter_sites = "INCLUDE" | "EXCLUDE" # include_l3_hosts = false -# keep = 0 -# layers = ["LLDP", "CDP", L3v4, "STATIC", "CUSTOM"] +keep = 10 +# layers = ["LLDP", "CDP", "L3v4", "STATIC", "CUSTOM"] # log_file = "~/var/log/nvdct.log" # log_level = "WARNING" # log_to_stdout = false -# min_age = 0 -# output_directory = '' # +min_age = 1 +output_directory = 'nvdct' # remove to get date formated directory # pre_fetch = false # prefix = "" # quiet = true diff --git a/source/bin/nvdct/lib/args.py b/source/bin/nvdct/lib/args.py index cf77c81bea79ee7e545e426450ad836b9469159c..5ad06bd4dd8d1ae0953c9f1994722490f491cecf 100755 --- a/source/bin/nvdct/lib/args.py +++ b/source/bin/nvdct/lib/args.py @@ -45,18 +45,18 @@ from argparse import ( from pathlib import Path from lib.constants import ( + ExitCodes, HOME_URL, MIN_CDP_VERSION, MIN_LINUX_IP_ADDRESSES, + MIN_LLDP_VERSION, MIN_SNMP_IP_ADDRESSES, MIN_WINDOWS_IP_ADDRESSES, - MIN_LLDP_VERSION, NVDCT_VERSION, SCRIPT, TIME_FORMAT_ARGPARSER, USER_DATA_FILE, ) -from lib.utils import ExitCodes def parse_arguments() -> arg_Namespace: @@ -80,10 +80,12 @@ def parse_arguments() -> arg_Namespace: ), formatter_class=RawTextHelpFormatter, epilog='Exit codes:\n' - f' {ExitCodes.OK.value} - No error\n' - f' {ExitCodes.BAD_OPTION_LIST.value} - Bad options list\n' - f' {ExitCodes.BACKEND_NOT_IMPLEMENTED.value} - Backend not implemented\n' - f' {ExitCodes.AUTOMATION_SECRET_NOT_FOUND.value} - Automation secret not found\n' + f' {ExitCodes.OK} - No error\n' + f' {ExitCodes.BAD_OPTION_LIST} - Bad options list\n' + f' {ExitCodes.BAD_TOML_FORMAT} - Bad TOML file format\n' + f' {ExitCodes.BACKEND_NOT_IMPLEMENTED} - Backend not implemented\n' + f' {ExitCodes.AUTOMATION_SECRET_NOT_FOUND} - Automation secret not found\n' + f' {ExitCodes.NO_LAYER_CONFIGURED} - No layer to work on\n' '\nUsage:\n' f'{SCRIPT} -u ~/local/bin/nvdct/conf/my_{USER_DATA_FILE} \n\n' ) @@ -121,16 +123,22 @@ def parse_arguments() -> arg_Namespace: parser.add_argument( '-l', '--layers', nargs='+', - choices=['CDP', 'CUSTOM', 'LLDP', 'STATIC', 'L3v4'], + choices=[ + 'CDP', + 'CUSTOM', + 'L3v4', + 'LLDP', + 'STATIC', + ], # default=['CDP'], help=( - f' - CDP : needs inv_cdp_cache package at least in version {MIN_CDP_VERSION}\n' - f' - LLDP : needs inv_lldp_cache package at least in version {MIN_LLDP_VERSION}\n' - f' - L3v4 : needs inv_ip_address package at least in version {MIN_SNMP_IP_ADDRESSES} for SNMP based hosts\n' - f' for Linux based hosts inv_lnx_ip_if in version {MIN_LINUX_IP_ADDRESSES}\n' - f' for Windows based hosts inv_win_ip_if in version {MIN_WINDOWS_IP_ADDRESSES}\n' - f' - STATIC: creates a topology base on the "STATIC_CONNECTIONS" in the toml file\n' - f' - CUSTOM: (deprecated)\n' + f' - CDP : needs inv_cdp_cache package at least in version {MIN_CDP_VERSION}\n' + f' - LLDP : needs inv_lldp_cache package at least in version {MIN_LLDP_VERSION}\n' + f' - L3v4 : needs inv_ip_address package at least in version {MIN_SNMP_IP_ADDRESSES} for SNMP based hosts\n' + f' for Linux based hosts inv_lnx_ip_if in version {MIN_LINUX_IP_ADDRESSES}\n' + f' for Windows based hosts inv_win_ip_if in version {MIN_WINDOWS_IP_ADDRESSES}\n' + f' - STATIC : creates a topology base on the "STATIC_CONNECTIONS" in the toml file\n' + f' - CUSTOM : (deprecated)\n' ) ) parser.add_argument( diff --git a/source/bin/nvdct/lib/backends.py b/source/bin/nvdct/lib/backends.py index 64cce3ae7b7075834fdc26052821ba65e4c60754..78a7d4bdbad83e006c5eea5bf18cff7b89217bf4 100755 --- a/source/bin/nvdct/lib/backends.py +++ b/source/bin/nvdct/lib/backends.py @@ -10,32 +10,33 @@ # 2024-06-18: fixed host_exist returns always True if host was in host_cache, even with host=None # 2024-09-25: fixed crash on missing "customer" section in site config file +# 2024-12-22: refactoring, leave only backend specific stuff in the backend +# removed not strictly needed properties, renamed functions to better understand what the do -from collections.abc import Mapping, Sequence from abc import abstractmethod from ast import literal_eval +from collections.abc import Mapping, MutableSequence, Sequence from enum import Enum, unique from pathlib import Path from requests import session -from time import time_ns -from typing import Dict, List, Tuple from sys import exit as sys_exit +from typing import Dict, List, Tuple from livestatus import MultiSiteConnection, SiteConfigurations, SiteId from lib.constants import ( CACHE_INTERFACES_DATA, + ExitCodes, OMD_ROOT, PATH_INTERFACES, ) from lib.utils import ( - ExitCodes, + LOGGER, get_data_form_live_status, get_table_from_inventory, - LOGGER, - ) +HOST_EXIST: Dict = {'exists': True} def hosts_to_query(hosts: List[str]) -> Tuple[str, List[str]]: # WORKAROUND for: Apache HTTP Error 414: Request URI too long @@ -89,6 +90,8 @@ class CacheItems(Enum): inventory = 'inventory' interfaces = 'interfaces' + def __get__(self, instance, owner): + return self.value class HostCache: def __init__( @@ -96,247 +99,287 @@ class HostCache: pre_fetch: bool, backend: str, ): - self._cache: Dict = {} + LOGGER.info('init HOST_CACHE') + + self.cache: Dict = {} self._inventory_pre_fetch_list: List[str] = [ PATH_INTERFACES, ] - self._count: int = 0 - self._pre_fetch = pre_fetch - self._backend = backend - LOGGER.info('init HOST_CACHE') - @abstractmethod - def get_inventory_data(self, hosts: Sequence[str]) -> Dict[str, Dict | None]: + self.pre_fetch: bool = bool(pre_fetch) + self.backend: str = str(backend) + + if self.pre_fetch: + for host in self.query_all_hosts(): + self.cache[host] = HOST_EXIST.copy() + + def get_inventory_data(self, hosts: Sequence[str]) -> Dict[str, Dict]: """ + Returns a dictionary of hosts and there inventory data. Args: - hosts: the host name to return the inventory data for + hosts: list of host names to return the inventory data for Returns: the inventory data as dictionary """ - raise NotImplementedError() - @abstractmethod + inventory_data: Dict[str, Dict | None] = {} + # init inventory_data with None + for host in hosts: + inventory_data[host] = None + + open_hosts = hosts.copy() + while open_hosts: + hosts_str, open_hosts = hosts_to_query(open_hosts) + for host, inventory in self.query_inventory_data(hosts_str).items(): + inventory_data[host] = inventory + + return inventory_data + def get_interface_data(self, hosts: Sequence[str]) -> Dict[str, Dict | None]: """ - + Returns Dictionary of hosts and there interface services from CMK. + The interface information consists of the "Item", the "Description (summary)" and the service details Args: hosts: lit of host names to return the interface data for Returns: - dictionary of the interface data with the item as key + dictionary of the interface data with the host -> item as key """ - raise NotImplementedError() + host_data: Dict[str, any] = {} # to make pylint happy + # init host_data with None + for host in hosts: + host_data[host] = None + open_hosts = hosts.copy() + while open_hosts: + hosts_str, open_hosts = hosts_to_query(open_hosts) + host_data.update(self.query_interface_data(hosts_str)) - @abstractmethod - def host_exists(self, host: str) -> bool: - raise NotImplementedError() + return host_data - @abstractmethod - def get_hosts_by_label(self, label: str) -> List[str] | None: - raise NotImplementedError() + def host_exists(self, host: str) -> bool: + """ + Returns True if host exists in CMK, else False + """ + try: + return bool(self.cache[host]) + except KeyError: + pass - @abstractmethod - def pre_fetch_hosts(self): - raise NotImplementedError() + # get host from CMK and init host in cache + if exists := self.query_host(host): + self.cache[host] = HOST_EXIST.copy() + else: + self.cache[host] = None - @property - def cache(self) -> Dict: - return self._cache + return exists - @property - def backend(self) -> str: - return self._backend + def get_hosts_by_label(self, label: str) -> Sequence[str]: + """ + Returns list of hosts from CMK filtered by label + Args: + label: hostlabel to filter by - @property - def pre_fetch(self) -> bool: - return self._pre_fetch + Returns: + List of hosts + """ + return self.query_hosts_by_label(label) - def stop_host(self, host: str) -> None: - if host not in self._cache: - self._cache[host] = None + def fill_cache(self, hosts: Sequence[str]) -> None: + """ + Gets the host data from CMK and puts them in the host cache. Data collected: + - inventory + - interfaces - def pre_fetch_cache(self, hosts: Sequence[str]) -> None: - # pre fill inventory data - self._count += 1 - _pre_query = time_ns() + Args: + hosts: List of hosts the get data from CMK + Returns: None, the data is directly writen to self.cache + """ inventory_of_hosts: Mapping[str, Mapping | None] = self.get_inventory_data(hosts=hosts) - LOGGER.debug(f'{(time_ns() - _pre_query) / 1e9}|{self._count:0>4}|inventory|{hosts}') - if inventory_of_hosts: for host, inventory in inventory_of_hosts.items(): - if host not in self._cache: - self._cache[host] = {} - self._cache[host][CacheItems.inventory.value] = {} - self._cache[host][CacheItems.inventory.value].update({ + if host not in self.cache: + self.cache[host] = HOST_EXIST.copy() + self.cache[host][CacheItems.inventory] = {} + self.cache[host][CacheItems.inventory].update({ entry: get_table_from_inventory( inventory=inventory, raw_path=entry ) for entry in self._inventory_pre_fetch_list }) - _pre_query = time_ns() - interfaces_of_hosts: Mapping[str, Mapping | None] = self.get_interface_data(hosts) for host, interfaces in interfaces_of_hosts.items(): - if host not in self._cache: - self._cache[host] = {} - if not self._cache[host].get(CacheItems.interfaces.value): - self._cache[host][CacheItems.interfaces.value] = {} - self._cache[host][CacheItems.interfaces.value][CACHE_INTERFACES_DATA] = interfaces - - LOGGER.debug(f'{(time_ns() - _pre_query) / 1e9}|{self._count:0>4}|items|{host}') + if host not in self.cache: + self.cache[host] = HOST_EXIST.copy() + if not self.cache[host].get(CacheItems.interfaces): + self.cache[host][CacheItems.interfaces] = {} + self.cache[host][CacheItems.interfaces][CACHE_INTERFACES_DATA] = interfaces def get_data(self, host: str, item: CacheItems, path: str) -> Dict[str, any] | None: + """ + Returns data from self.cache. If the cache for "host" is empty, data will be fetched from CMK + Args: + host: host to get data from cache + item: item in cache (inventory/interface) + path: path in cache item + + Returns: + the requested data or None + """ if self.host_exists(host=host): - if host not in self._cache: - self._cache[host] = {} - LOGGER.info(f'Host not in cache: {host}') - self.pre_fetch_cache(hosts=[host]) + if self.cache[host] == HOST_EXIST: + LOGGER.info(f'fetch data for: {host}') + self.fill_cache(hosts=[host]) try: - return self._cache[host][item.value][path] + return self.cache[host][item][path] except (KeyError, TypeError): return None - else: - self._cache[host] = None + return None - def add_inventory_prefetch_path(self, path: str) -> None: + def add_inventory_path(self, path: str) -> None: self._inventory_pre_fetch_list = list(set(self._inventory_pre_fetch_list + [path])) + @abstractmethod + def query_host(self, host: str) -> bool: + """ + Query Livestatus for "host" + + Args: + host: CMK host name to query livestus for + Returns: + True: if host was found + False: if host is not found + """ + raise NotImplementedError + + @abstractmethod + def query_all_hosts(self) -> Sequence[str]: + """ + Queries Livestatus for a list of all hosts + Returns: + List of all hosts + """ + raise NotImplementedError + + @abstractmethod + def query_hosts_by_label(self, label: str) -> Sequence[str]: + """ + Queries Livestatus for a list of hosts filtered by a host label + Args: + label: Host label to filter list of host by + + Returns: List of hosts + """ + raise NotImplementedError + + @abstractmethod + def query_inventory_data(self, hosts: str) -> Dict[str, Dict]: + raise NotImplementedError + + @abstractmethod + def query_interface_data(self, hosts: str) -> Dict[str, Dict]: + raise NotImplementedError class HostCacheLiveStatus(HostCache): def __init__(self, pre_fetch: bool, backend: str = '[LIVESTATUS]'): super().__init__(pre_fetch, backend) - if self.pre_fetch: - self.pre_fetch_hosts() def get_raw_data(self, query: str) -> any: return get_data_form_live_status(query=query) - def get_inventory_data(self, hosts: List[str]) -> Dict[str, Dict | None]: - host_data: Dict[str, Dict | None] = {} - # int host_data with None - for host in hosts: - host_data[host] = None - open_hosts = hosts.copy() - while open_hosts: - hosts_str, open_hosts = hosts_to_query(open_hosts) - query = ( - 'GET hosts\n' - 'Columns: host_name mk_inventory\n' - 'OutputFormat: python3\n' - f'Filter: host_name ~~ {hosts_str}\n' - ) - data: Sequence[Tuple[str, bytes]] = self.get_raw_data(query=query) - LOGGER.debug(f'{self.backend} data for hosts {hosts}: {data}') - if data: - for host, inventory in data: - if inventory == b'': - LOGGER.warning(f'{self.backend} Device: {hosts}: no inventory data found!') - continue - try: - host_data[host] = literal_eval(inventory.decode('utf-8')) - except SyntaxError as e: - LOGGER.exception(f'inventory: |{inventory!r}|') # output raw data - LOGGER.exception(f'type: {type(inventory)}') - LOGGER.exception(f'exception: {e}') - continue - else: - LOGGER.warning(f'{self.backend} Device: {hosts}: no inventory data found!') - return host_data - - def get_interface_data(self, hosts: List[str]) -> Dict[str, Dict | None]: - # host_data: Dict[str, Dict[str, Dict[str, List[str]] ] | None] = {} - host_data: Dict[str, any] = {} # to make pylint happy - # int host_data with None - for host in hosts: - host_data[host] = None - open_hosts = hosts.copy() - while open_hosts: - hosts_str, open_hosts = hosts_to_query(open_hosts) - query = ( - 'GET services\n' - 'Columns: host_name description long_plugin_output\n' - 'Filter: description ~ ^Interface\n' - f'Filter: host_name ~~ {hosts_str}\n' - 'OutputFormat: python3\n' - ) - data: List[Tuple[str, str, str]] = self.get_raw_data(query=query) - LOGGER.debug(f'{self.backend} data for host {hosts}: {data}') - if data: - for host, description, long_plugin_output in data: - if host_data.get(host) is None: - host_data[host] = {} - host_data[host][description[10:]] = { # remove 'Interface ' from description - 'long_plugin_output': long_plugin_output.split('\\n') - } - else: - LOGGER.warning(f'{self.backend} No Interfaces items found for hosts {hosts}') - - return host_data - - def host_exists(self, host: str) -> bool: + def query_host(self, host: str) -> bool: query = ( 'GET hosts\n' 'Columns: host_name\n' 'OutputFormat: python3\n' f'Filter: host_name = {host}\n' ) - if self.cache.get(host) is not None: - return True - elif host in self.cache: - return False - # if self.pre_fetch: - # LOGGER.warning(f'{self.backend} pre_fetch host not found in cache {host}') - # return False data: Sequence[Sequence[str]] = self.get_raw_data(query=query) LOGGER.debug(f'{self.backend} data for host {host}: {data}') if [host] in data: LOGGER.debug(f'{self.backend} Host {host} found in CMK') return True - LOGGER.warning(f'{self.backend} Host {host} not found in CMK') - self.stop_host(host) - return False - def get_hosts_by_label(self, label: str) -> List[str] | None: + def query_all_hosts(self) -> Sequence[str]: + query = ( + 'GET hosts\n' + 'Columns: host_name\n' + 'OutputFormat: python3\n' + ) + data: Sequence[Sequence[str]] = self.get_raw_data(query=query) + if data: + LOGGER.info(f'{self.backend} # of hosts found: {len(data)}') + return [host[0] for host in data] + + LOGGER.warning(f'{self.backend} no hosts found') + return [] + + def query_hosts_by_label(self, label: str) -> Sequence[str]: query = ( 'GET hosts\n' 'Columns: name\n' 'OutputFormat: python3\n' - # f'Filter: label_names ~ {label}\n' f'Filter: labels = {label}\n' ) data: Sequence[Sequence[str]] = self.get_raw_data(query=query) - LOGGER.debug(f'{self.backend} routing capable hosts: {data}') + LOGGER.debug(f'{self.backend} hosts matching label: {data}') if data: - hosts = [] - for host in data: - hosts.append(host[0]) - return hosts + LOGGER.info(f'{self.backend} # of hosts found: {len(data)}') + return [host[0] for host in data] - LOGGER.debug(f'{self.backend} no routing capable hosts found') - return None + LOGGER.warning(f'{self.backend} no hosts found matching label {label}') + return [] - def pre_fetch_hosts(self): - LOGGER.debug(f'{self.backend} pre_fetch_hosts') + def query_inventory_data(self, hosts: str) -> Dict[str, Dict]: query = ( 'GET hosts\n' - 'Columns: host_name\n' + 'Columns: host_name mk_inventory\n' 'OutputFormat: python3\n' + f'Filter: host_name ~~ {hosts}\n' ) - data: Sequence[Sequence[str]] = self.get_raw_data(query=query) + inventory_data = {} + data: Sequence[Tuple[str, bytes]] = self.get_raw_data(query=query) + LOGGER.debug(f'{self.backend} data for hosts {hosts}: {data}') if data: - for host in data: - self._cache[host[0]] = {} - LOGGER.debug(f'{self.backend} # of host found: {len(self.cache.keys())}') + for host, inventory in data: + if not inventory: + LOGGER.warning(f'{self.backend} Device: {host}: no inventory data found!') + continue + inventory = literal_eval(inventory.decode('utf-8')) + inventory_data[host] = inventory else: - LOGGER.warning(f'{self.backend} no hosts found') + LOGGER.warning(f'{self.backend} Device: {hosts}: no inventory data found!') + return inventory_data + + def query_interface_data(self, hosts: str) -> Dict[str, Dict]: + query = ( + 'GET services\n' + 'Columns: host_name description long_plugin_output\n' + 'Filter: description ~ ^Interface\n' + f'Filter: host_name ~~ {hosts}\n' + 'OutputFormat: python3\n' + ) + interface_data = {} + data: List[Tuple[str, str, str]] = self.get_raw_data(query=query) + LOGGER.debug(f'{self.backend} interface data for hosts {hosts}: {data}') + if data: + for host, description, long_plugin_output in data: + if interface_data.get(host) is None: + interface_data[host] = {} + interface_data[host][description[10:]] = { # remove 'Interface ' from description + 'long_plugin_output': long_plugin_output.split('\\n') + } + else: + LOGGER.warning(f'{self.backend} No Interfaces items found for hosts {hosts}') + + return interface_data class HostCacheMultiSite(HostCacheLiveStatus): def __init__( @@ -347,7 +390,7 @@ class HostCacheMultiSite(HostCacheLiveStatus): filter_customers: str | None = None, customers: List[str] = None, ): - self._backend = '[MULTISITE]' + self.backend = '[MULTISITE]' self.sites: SiteConfigurations = SiteConfigurations({}) self.get_sites() self.filter_sites(filter_sites, sites) @@ -361,9 +404,10 @@ class HostCacheMultiSite(HostCacheLiveStatus): dead_sites = ', '.join(self.dead_sites) LOGGER.warning(f'{self.backend} WARNING: use of dead site(s) {dead_sites} is disabled') self.c.set_only_sites(self.c.alive_sites()) - super().__init__(pre_fetch, self._backend) - if self.pre_fetch: - self.pre_fetch_hosts() + super().__init__(pre_fetch, self.backend) + + def get_raw_data(self, query: str) -> object: + return self.c.query(query=query) # https://github.com/Checkmk/checkmk/blob/master/packages/cmk-livestatus-client/example_multisite.py def get_sites(self): @@ -435,10 +479,6 @@ class HostCacheMultiSite(HostCacheLiveStatus): case _: return - def get_raw_data(self, query: str) -> object: - return self.c.query(query=query) - - class HostCacheRestApi(HostCache): def __init__( self, @@ -447,10 +487,9 @@ class HostCacheRestApi(HostCache): filter_sites: str | None = None, sites: List[str] = [], ): - super().__init__(pre_fetch, '[RESTAPI]') + self.backend = '[RESTAPI]' LOGGER.debug(f'{self.backend} init backend') - self._api_port = api_port - self.sites = [] + try: self.__secret = Path( f'{OMD_ROOT}/var/check_mk/web/automation/automation.secret' @@ -458,33 +497,47 @@ class HostCacheRestApi(HostCache): except FileNotFoundError as e: LOGGER.exception(f'{self.backend} automation.secret not found, {e}') print(f'{self.backend} automation.secret not found, {e}') - sys_exit(ExitCodes.AUTOMATION_SECRET_NOT_FOUND.value) + sys_exit(ExitCodes.AUTOMATION_SECRET_NOT_FOUND) + + self.__api_port = api_port self.__hostname = 'localhost' self.__site = OMD_ROOT.split('/')[-1] - self.__api_url = f"http://{self.__hostname}:{self._api_port}/{self.__site}/check_mk/api/1.0" + self.__api_url = f"http://{self.__hostname}:{self.__api_port}/{self.__site}/check_mk/api/1.0" self.__user = 'automation' + LOGGER.info(f'{self.backend} Create REST API session') self.__session = session() self.__session.headers['Authorization'] = f"Bearer {self.__user} {self.__secret}" self.__session.headers['Accept'] = 'application/json' - self.get_sites() + self.sites: MutableSequence[str] = self.query_sites() self.filter_sites(filter_=filter_sites, sites=sites) LOGGER.info(f'{self.backend} filtered sites : {self.sites}') + super().__init__(pre_fetch, self.backend) - if self.pre_fetch: - self.pre_fetch_hosts() + def get_raw_data(self, url: str, params: Mapping[str, object] | None): + resp = self.__session.get( + url=url, + params=params, + ) + LOGGER.debug(f'{self.backend} raw data: {resp.text}') + if resp.status_code == 200: + return resp.json() + else: + LOGGER.warning(f'{self.backend} response: {resp.status_code}') - def get_sites(self): + def query_sites(self) -> MutableSequence[str]: LOGGER.debug(f'{self.backend} get_sites') - resp = self.__session.get(url=f"{self.__api_url}/domain-types/site_connection/collections/all") - if resp.status_code == 200: - sites = resp.json().get("value") - self.sites = [site.get('id') for site in sites] - LOGGER.debug(f'{self.backend} sites : {self.sites}') + url = f"{self.__api_url}/domain-types/site_connection/collections/all" + sites = [] + if raw_data:= self.get_raw_data(url, None): + raw_sites = raw_data.get("value") + sites = [site.get('id') for site in raw_sites] + LOGGER.debug(f'{self.backend} sites : {sites}') else: - LOGGER.warning(f'{self.backend} got no site information! status code {resp.status_code}') - LOGGER.debug(f'{self.backend} response text: {resp.text}') + LOGGER.warning(f'{self.backend} got no site information!') + + return sites def filter_sites(self, filter_: str | None, sites: List[str]): match filter_: @@ -495,170 +548,108 @@ class HostCacheRestApi(HostCache): case _: return - def get_inventory_data(self, hosts: List[str]) -> Dict[str, Dict | None]: - LOGGER.debug(f'{self.backend} get_inventory_data {hosts}') - host_data: Dict[str, Dict | None] = {} - # init host_data with None - for host in hosts: - host_data[host] = None - open_hosts = hosts.copy() - while open_hosts: - hosts_str, open_hosts = hosts_to_query(open_hosts) - LOGGER.debug(f'{self.backend} open hosts: {open_hosts}, len: {len(open_hosts)}') - query = '{"op": "~~", "left": "name", "right": "' + hosts_str + '"}' - resp = self.__session.get( - url=f"{self.__api_url}/domain-types/host/collections/all", - params={ - 'query': query, - 'columns': ['name', 'mk_inventory'], - 'sites': self.sites, - }, - ) - if resp.status_code == 200: - LOGGER.debug(f'{self.backend} {resp.elapsed}|{self._count:0>4}|inventory|{hosts}') - data = resp.json().get('value', []) - for raw_host in data: - host = raw_host.get('extensions', {}).get('name') - if host: - host_data[host] = raw_host['extensions'].get('mk_inventory') + def query_host(self, host: str) -> bool: + query = '{"op": "=", "left": "name", "right": "' + host + '"}' + url = f'{self.__api_url}/domain-types/host/collections/all' + params = { + 'query': query, + 'columns': ['name'], + 'sites': self.sites, + } + + if raw_data := self.get_raw_data(url, params): + try: + data = raw_data['value'][0]['extensions']['name'] + LOGGER.debug(f'{self.backend} data for host {host}: {data}') + except IndexError: + LOGGER.warning(f'Host {host} not found in CMK') else: - LOGGER.warning( - f'{self.backend} got no inventory data found!, status code {resp.status_code}' - ) - LOGGER.debug(f'{self.backend} response query: {query}') - LOGGER.debug(f'{self.backend} response text: {resp.text}') - LOGGER.debug(f'{self.backend} response url: {resp.url}, len: {len(resp.url)}') + if data == host: + return True - return host_data + return False - def get_interface_data(self, hosts: List[str]) -> Dict[str, Dict | None]: - LOGGER.debug(f'{self.backend} get_interface_data {hosts}') - host_data: Dict[str, Dict | None] = {} - # init host_data with None - for host in hosts: - host_data[host] = None - open_hosts = hosts.copy() - while open_hosts: - hosts_str, open_hosts = hosts_to_query(open_hosts) + def query_all_hosts(self) -> Sequence[str]: + url = f'{self.__api_url}/domain-types/host/collections/all' + params = { + 'columns': ['name'], + 'sites': self.sites, + } - query_host = f'{{"op": "~~", "left": "host_name", "right": "{hosts_str}"}}' - query_item = '{"op": "~", "left": "description", "right": "Interface "}' - query = f'{{"op": "and", "expr": [{query_item},{query_host}]}}' - - resp = self.__session.get( - url=f'{self.__api_url}/domain-types/service/collections/all', - params={ - 'query': query, - 'columns': ['host_name', 'description', 'long_plugin_output'], - 'sites': self.sites, - }, - ) + if raw_data := self.get_raw_data(url, params): + if data := raw_data.get('value', []): + LOGGER.info(f'{self.backend} # of hosts found: {len(data)}') + return [host.get('extensions', {}).get('name') for host in data] + + return [] + + def query_hosts_by_label(self, label: str) -> Sequence[str]: + query = '{"op": "=", "left": "labels", "right": "' + label + '"}' + + url = f'{self.__api_url}/domain-types/host/collections/all' + params = { + 'columns': ['name', 'labels'], + 'query': query, + 'sites': self.sites, + } + + if raw_data := self.get_raw_data(url, params): + if data := raw_data.get('value'): + LOGGER.info(f'{self.backend} # of hosts found: {len(data)}') + return [host['extensions']['name'] for host in data] + + LOGGER.warning(f'{self.backend} no hosts found matching label {label}') + return [] + + def query_inventory_data(self, hosts: str) -> Dict[str, Dict]: + query = '{"op": "~~", "left": "name", "right": "' + hosts + '"}' + url = f"{self.__api_url}/domain-types/host/collections/all" + params = { + 'query': query, + 'columns': ['name', 'mk_inventory'], + 'sites': self.sites, + } + + inventory_data = {} + + if raw_data := self.get_raw_data(url, params): + LOGGER.debug(f'{self.backend} raw inventory data: {raw_data}') + if data := raw_data.get('value', []): + for raw_host in data: + if host := raw_host.get('extensions', {}).get('name'): + inventory = raw_host['extensions'].get('mk_inventory') + if not inventory: + LOGGER.warning(f'{self.backend} Device: {host}: no inventory data found!') + inventory_data[host] = inventory + + return inventory_data + + def query_interface_data(self, hosts: str) -> Dict[str, Dict]: + query_host = f'{{"op": "~~", "left": "host_name", "right": "{hosts}"}}' + query_item = '{"op": "~", "left": "description", "right": "Interface "}' + query = f'{{"op": "and", "expr": [{query_item},{query_host}]}}' + + url = f'{self.__api_url}/domain-types/service/collections/all' + params = { + 'query': query, + 'columns': ['host_name', 'description', 'long_plugin_output'], + 'sites': self.sites, + } - if resp.status_code == 200: - LOGGER.debug(f'{resp.elapsed}|{self._count:0>4}|items|{hosts}') + interface_data = {} - data = resp.json().get('value', []) + if raw_data := self.get_raw_data(url, params): + LOGGER.debug(f'{self.backend} raw interface data: {raw_data}') + + if data := raw_data.get('value', []): for raw_service in data: LOGGER.debug(f'{self.backend} data for service : {raw_service}') service = raw_service.get('extensions') host, description, long_plugin_output = service.values() - host = service['host_name'] - description = service['description'] - if host_data.get(host) is None: - host_data[host] = {} - host_data[host][description[10:]] = { + if interface_data.get(host) is None: + interface_data[host] = {} + interface_data[host][description[10:]] = { 'long_plugin_output': long_plugin_output.split('\\n') } - else: - LOGGER.warning( - f'{self.backend} got no interface data, response code {resp.status_code}' - ) - LOGGER.debug(f'{self.backend} response query: {query}') - LOGGER.debug(f'{self.backend} response text: {resp.text}') - LOGGER.debug(f'{self.backend} response url: {resp.url}, len: {len(resp.url)}') - - return host_data - - def host_exists(self, host: str) -> bool: - LOGGER.debug(f'{self.backend} host_exists {host}') - if self.cache.get(host) is not None: - LOGGER.debug(f'{self.backend} host found in cache {host}') - return True - elif host in self.cache: - return False - - # if self.pre_fetch: - # LOGGER.warning(f'{self.backend} pre_fetch host not found in cache {host}') - # return False - query = '{"op": "=", "left": "name", "right": "' + host + '"}' - resp = self.__session.get( - url=f'{self.__api_url}/domain-types/host/collections/all', - params={ - 'query': query, - 'columns': ['name'], - 'sites': self.sites, - }, - ) - if resp.status_code == 200: - LOGGER.debug(f'{resp.elapsed}|{self._count:0>4}|name|{host}') - try: - data = resp.json()['value'][0]['extensions']['name'] - LOGGER.debug(f'{self.backend} data for host {host}: {resp.json()}') - except IndexError: - LOGGER.warning(f'Host {host} not found in CMK') - self.stop_host(host) - return False - if data == host: - return True - else: - LOGGER.warning(f'{self.backend} response: {resp.status_code}') - return False - - def get_hosts_by_label(self, label: str) -> List[str] | None: - LOGGER.debug(f'{self.backend} get_hosts_by_label {label}') - query = '{"op": "=", "left": "labels", "right": "' + label + '"}' - - resp = self.__session.get( - url=f'{self.__api_url}/domain-types/host/collections/all', - params={ - 'columns': ['name', 'labels'], - 'query': query, - 'sites': self.sites, - }, - ) - if resp.status_code == 200: - LOGGER.debug(f'{self.backend} data for routing_capable: {resp.json()}') - LOGGER.debug(f'{resp.elapsed}|{self._count:0>4}|name|routing_capable') - try: - data = resp.json().get('value') - hosts = [] - for host in data: - hosts.append(host['extensions']['name']) - LOGGER.debug(f'{self.backend} host list {hosts}') - return hosts - except IndexError: - LOGGER.debug(f'{self.backend} no routing capable hosts found') - return None - else: - LOGGER.warning(f'{self.backend} response: {resp.status_code}') - return None - def pre_fetch_hosts(self): - LOGGER.debug(f'{self.backend} pre_fetch_hosts') - LOGGER.critical(f'{self.backend} pre_fetch_hosts sites {self.sites}') - resp = self.__session.get( - url=f'{self.__api_url}/domain-types/host/collections/all', - params={ - 'columns': ['name'], - 'sites': self.sites, - }, - ) - if resp.status_code == 200: - data = resp.json().get('value', []) - for raw_host in data: - host = raw_host.get('extensions', {}).get('name') - if host: - self._cache[host] = {} - LOGGER.debug(f'{self.backend} # of host found: {len(self.cache.keys())}') - else: - LOGGER.warning(f'{self.backend} respons: {resp.text}') + return interface_data \ No newline at end of file diff --git a/source/bin/nvdct/lib/constants.py b/source/bin/nvdct/lib/constants.py index 4bf5240ab6154dcef3d0839ad7e9cdc9637dac76..590cba11358c77cdc061e9f0bc76976868dcb784 100755 --- a/source/bin/nvdct/lib/constants.py +++ b/source/bin/nvdct/lib/constants.py @@ -8,11 +8,42 @@ # File : nvdct/lib/constants.py +from dataclasses import dataclass +from enum import Enum, unique, auto from logging import getLogger from os import environ from typing import Final -NVDCT_VERSION: Final[str] = '0.9.5-20241217' +# +NVDCT_VERSION: Final[str] = '0.9.6-20241222' +# +@unique +class ExitCodes(Enum): + OK = 0 + BAD_OPTION_LIST = auto() + BAD_TOML_FORMAT = auto() + BACKEND_NOT_IMPLEMENTED = auto() + AUTOMATION_SECRET_NOT_FOUND = auto() + NO_LAYER_CONFIGURED = auto() + + def __get__(self, instance, owner): + return self.value + +@unique +class IPVersion(Enum): + IPv4 = 4 + IPv6 = 6 + + def __get__(self, instance, owner): + return self.value + +@dataclass(frozen=True) +class Layer: + path: str + columns: str + label: str + host_label: str + # OMD_ROOT: Final[str] = environ["OMD_ROOT"] # @@ -20,17 +51,21 @@ API_PORT: Final[int] = 5001 CACHE_INTERFACES_DATA: Final[str] = 'interface_data' CMK_SITE_CONF: Final[str] = f'{OMD_ROOT}/etc/omd/site.conf' COLUMNS_CDP: Final[str] = 'neighbour_name,local_port,neighbour_port' -COLUMNS_L3v4: Final[str] = 'address,device,cidr,network,type' +COLUMNS_L3: Final[str] = 'address,device,cidr,network,type' COLUMNS_LLDP: Final[str] = 'neighbour_name,local_port,neighbour_port' DATAPATH: Final[str] = f'{OMD_ROOT}/var/check_mk/topology/data' HOME_URL: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/nvdct' HOST_LABEL_CDP: Final[str] = "'nvdct/has_cdp_neighbours' 'yes'" HOST_LABEL_L3V4_HOSTS: Final[str] = "'nvdct/l3v4_topology' 'host'" HOST_LABEL_L3V4_ROUTER: Final[str] = "'nvdct/l3v4_topology' 'router'" +HOST_LABEL_L3V6_HOSTS: Final[str] = "'nvdct/l3v6_topology' 'host'" +HOST_LABEL_L3V6_ROUTER: Final[str] = "'nvdct/l3v6_topology' 'router'" HOST_LABEL_LLDP: Final[str] = "'nvdct/has_lldp_neighbours' 'yes'" LABEL_CDP: Final[str] = 'CDP' -LABEL_L3v4: Final[str] = 'LAYER3v4' +LABEL_L3v4: Final[str] = 'L3v4' +LABEL_L3v6: Final[str] = 'L3v6' LABEL_LLDP: Final[str] = 'LLDP' +LABEL_STATIC: Final[str] = 'STATIC' LOGGER: Final[str] = getLogger('root)') LOG_FILE: Final[str] = f'{OMD_ROOT}/var/log/nvdct.log' MIN_CDP_VERSION: Final[str] = '0.7.1-20240320' @@ -40,9 +75,55 @@ MIN_WINDOWS_IP_ADDRESSES: Final[str] = '0.0.3-20241210' MIN_LLDP_VERSION: Final[str] = '0.9.3-20240320' PATH_CDP: Final[str] = 'networking,cdp_cache,neighbours' PATH_INTERFACES: Final[str] = 'networking,interfaces' -PATH_L3v4: Final[str] = 'networking,addresses' +PATH_L3: Final[str] = 'networking,addresses' PATH_LLDP: Final[str] = 'networking,lldp_cache,neighbours' SCRIPT: Final[str] = '~/local/bin/nvdct/nvdct.py' TIME_FORMAT: Final[str] = '%Y-%m-%dT%H:%M:%S.%m' TIME_FORMAT_ARGPARSER: Final[str] = '%%Y-%%m-%%dT%%H:%%M:%%S.%%m' USER_DATA_FILE: Final[str] = 'nvdct.toml' +# +TOML_CUSTOMERS : Final[str] = 'CUSTOMERS' +TOML_CUSTOM_LAYERS : Final[str] = 'CUSTOM_LAYERS' +TOML_EMBLEMS : Final[str] = 'EMBLEMS' +TOML_L2_DROP_HOSTS: Final[str] = 'L2_DROP_HOSTS' +TOML_L2_HOST_MAP : Final[str] = 'L2_HOST_MAP' +TOML_L2_NEIGHBOUR_REPLACE_REGEX : Final[str] = 'L2_NEIGHBOUR_REPLACE_REGEX' +TOML_L2_SEED_DEVICES: Final[str] = 'L2_SEED_DEVICES' +TOML_L3V4_IGNORE_WILDCARD : Final[str] = 'L3V4_IGNORE_WILDCARD' +TOML_L3_IGNORE_HOSTS : Final[str] = 'L3_IGNORE_HOSTS' +TOML_L3_IGNORE_IP : Final[str] = 'L3_IGNORE_IP' +TOML_L3_REPLACE : Final[str] = 'L3_REPLACE' +TOML_L3_SUMMARIZE : Final[str] = 'L3_SUMMARIZE' +TOML_MAP_SPEED_TO_THICKNESS : Final[str] = 'MAP_SPEED_TO_THICKNESS' +TOML_PROTECTED_TOPOLOGIES : Final[str] = 'PROTECTED_TOPOLOGIES' +TOML_SETTINGS : Final[str] = 'SETTINGS' +TOML_SITES : Final[str] = 'SITES' +TOML_STATIC_CONNECTIONS : Final[str] = 'STATIC_CONNECTIONS' +# +LAYERS = { + 'CDP': Layer( + path=PATH_CDP, + columns=COLUMNS_CDP, + label=LABEL_CDP, + host_label=HOST_LABEL_CDP, + ), + 'LLDP': Layer( + path=PATH_LLDP, + columns=COLUMNS_LLDP, + label=LABEL_LLDP, + host_label=HOST_LABEL_LLDP, + ), + 'L3v4': Layer( + path=PATH_L3, + columns='', + label=LABEL_L3v4, + host_label=HOST_LABEL_L3V4_ROUTER, + ), + 'L3v6': Layer( + path=PATH_L3, + columns='', + label=LABEL_L3v6, + host_label=HOST_LABEL_L3V6_ROUTER, + ), +} + diff --git a/source/bin/nvdct/lib/settings.py b/source/bin/nvdct/lib/settings.py index 0252f749e54df5b170b60baeefa2ffbca1708891..e625634eab31494d676ba7b3392e0ccb2380bb33 100755 --- a/source/bin/nvdct/lib/settings.py +++ b/source/bin/nvdct/lib/settings.py @@ -11,7 +11,7 @@ # 2024-12-17: fixed wrong import for OMD_ROOT (copy&paste) (ThX to BH2005@forum.checkmk.com) from collections.abc import Mapping -from ipaddress import AddressValueError, IPv4Address, IPv4Network, NetmaskValueError +from ipaddress import AddressValueError, NetmaskValueError, ip_address, ip_network from logging import CRITICAL, FATAL, ERROR, WARNING, INFO, DEBUG from sys import exit as sys_exit from time import strftime @@ -20,15 +20,33 @@ from pathlib import Path from lib.constants import ( API_PORT, + ExitCodes, LOGGER, LOG_FILE, + Layer, OMD_ROOT, TIME_FORMAT, USER_DATA_FILE, + + TOML_CUSTOMERS, + TOML_CUSTOM_LAYERS, + TOML_EMBLEMS, + TOML_L2_DROP_HOSTS, + TOML_L2_HOST_MAP, + TOML_L2_NEIGHBOUR_REPLACE_REGEX, + TOML_L2_SEED_DEVICES, + TOML_L3V4_IGNORE_WILDCARD, + TOML_L3_IGNORE_HOSTS, + TOML_L3_IGNORE_IP, + TOML_L3_REPLACE, + TOML_L3_SUMMARIZE, + TOML_MAP_SPEED_TO_THICKNESS, + TOML_PROTECTED_TOPOLOGIES, + TOML_SETTINGS, + TOML_SITES, + TOML_STATIC_CONNECTIONS, ) from lib.utils import ( - ExitCodes, - Layer, get_data_from_toml, get_local_cmk_api_port, is_valid_customer_name, @@ -43,8 +61,8 @@ class Emblems(NamedTuple): host_node: str ip_address: str ip_network: str - l3v4_replace: str - l3v4_summarize: str + l3_replace: str + l3_summarize: str service_node: str @@ -113,10 +131,10 @@ class Settings: if self.__args.get('check_user_data_only'): LOGGER.info(msg=f'Could read/parse the user data from {self.user_data_file}') print(f'Could read/parse the user data from {self.user_data_file}') - sys_exit(ExitCodes.OK.value) + sys_exit(ExitCodes.OK) # defaults -> overridden by toml -> overridden by cli - self.__settings.update(self.__user_data.get('SETTINGS', {})) + self.__settings.update(self.__user_data.get(TOML_SETTINGS, {})) self.__settings.update(self.__args) if self.layers: @@ -126,7 +144,7 @@ class Settings: msg='-l/--layers options must be unique. Don\'t use any layer more than once.' ) print('-l/--layers options must be unique. Don\'t use any layer more than once.') - sys_exit(ExitCodes.BAD_OPTION_LIST.value) + sys_exit(ExitCodes.BAD_OPTION_LIST) self.__api_port: int | None = None @@ -138,11 +156,11 @@ class Settings: self.__l2_host_map: Dict[str, str] | None = None self.__l2_neighbour_replace_regex: List[Tuple[str, str]] | None = None self.__l2_seed_devices: List[str] | None = None - self.__l3v4_ignore_hosts: List[str] | None = None - self.__l3v4_ignore_ip: List[IPv4Network] | None = None + self.__l3_ignore_hosts: List[str] | None = None + self.__l3_ignore_ip: List[ip_network] | None = None self.__l3v4_ignore_wildcard: List[Wildcard] | None = None - self.__l3v4_replace: Dict[str, str] | None = None - self.__l3v4_summarize: List[IPv4Network] | None = None + self.__l3_replace: Dict[str, str] | None = None + self.__l3_summarize: List[ip_network] | None = None self.__map_speed_to_thickness: List[Thickness] | None = None self.__protected_topologies: List[str] | None = None self.__sites: List[str] | None = None @@ -318,7 +336,7 @@ class Settings: def customers(self) -> List[str]: if self.__customers is None: self.__customers = [ - str(customer) for customer in set(self.__user_data.get('CUSTOMERS', [])) + str(customer) for customer in set(self.__user_data.get(TOML_CUSTOMERS, [])) if is_valid_customer_name(customer)] LOGGER.info(f'Found {len(self.__customers)} to filter on') return self.__customers @@ -327,7 +345,7 @@ class Settings: def custom_layers(self) -> List[Layer]: if self.__custom_layers is None: self.__custom_layers = [] - for _layer in self.__user_data.get('CUSTOM_LAYERS', []): + for _layer in self.__user_data.get(TOML_CUSTOM_LAYERS, []): try: self.__custom_layers.append(Layer( path=_layer['path'], @@ -337,25 +355,25 @@ class Settings: )) except KeyError: LOGGER.error( - f'Invalid entry in CUSTOM_LAYERS -> {_layer} -> ignored' + f'Invalid entry in {TOML_CUSTOM_LAYERS} -> {_layer} -> ignored' ) continue LOGGER.critical( - f'Valid entries in CUSTOM_LAYERS found: {len(self.__custom_layers)}/' - f'{len(self.__user_data.get("CUSTOM_LAYERS", []))}' + f'Valid entries in {TOML_CUSTOM_LAYERS} found: {len(self.__custom_layers)}/' + f'{len(self.__user_data.get(TOML_CUSTOM_LAYERS, []))}' ) return self.__custom_layers @property def emblems(self) -> Emblems: if self.__emblems is None: - raw_emblems = self.__user_data.get('EMBLEMS', {}) + raw_emblems = self.__user_data.get(TOML_EMBLEMS, {}) self.__emblems = Emblems( host_node=str(raw_emblems.get('host_node', 'icon_missing')), ip_address=str(raw_emblems.get('ip_address', 'ip-address_80')), ip_network=str(raw_emblems.get('ip_network', 'ip-network_80')), - l3v4_replace=str(raw_emblems.get('l3v4_replace', 'icon_plugins_cloud')), - l3v4_summarize=str(raw_emblems.get('l3v4_summarize', 'icon_aggr')), + l3_replace=str(raw_emblems.get('l3_replace', 'icon_plugins_cloud')), + l3_summarize=str(raw_emblems.get('l3_summarize', 'icon_aggr')), service_node=str(raw_emblems.get('service_node', 'icon_missing')), ) return self.__emblems @@ -363,14 +381,14 @@ class Settings: @property def l2_drop_hosts(self) -> List[str]: if self.__l2_drop_host is None: - self.__l2_drop_host = [str(host) for host in set(self.__user_data.get('L2_DROP_HOSTS', []))] + self.__l2_drop_host = [str(host) for host in set(self.__user_data.get(TOML_L2_DROP_HOSTS, []))] return self.__l2_drop_host @property def l2_seed_devices(self) -> List[str]: if self.__l2_seed_devices is None: self.__l2_seed_devices = list(set(str(host) for host in ( - self.__user_data.get('L2_SEED_DEVICES', [])) if is_valid_hostname(host))) + self.__user_data.get(TOML_L2_SEED_DEVICES, [])) if is_valid_hostname(host))) return self.__l2_seed_devices @property @@ -378,7 +396,7 @@ class Settings: if self.__l2_host_map is None: self.__l2_host_map = { str(host): str(replace_host) for host, replace_host in self.__user_data.get( - 'L2_HOST_MAP', {} + TOML_L2_HOST_MAP, {} ).items() if is_valid_hostname(host) } return self.__l2_host_map @@ -389,47 +407,47 @@ class Settings: self.__l2_neighbour_replace_regex = [ ( str(regex), str(replace) - ) for regex, replace in self.__user_data.get('L2_NEIGHBOUR_REPLACE_REGEX', {}).items() + ) for regex, replace in self.__user_data.get(TOML_L2_NEIGHBOUR_REPLACE_REGEX, {}).items() ] return self.__l2_neighbour_replace_regex @property - def l3v4_ignore_hosts(self) -> List[str]: - if self.__l3v4_ignore_hosts is None: - self.__l3v4_ignore_hosts = [str(host) for host in set(self.__user_data.get( - 'L3V4_IGNORE_HOSTS', [] + def l3_ignore_hosts(self) -> List[str]: + if self.__l3_ignore_hosts is None: + self.__l3_ignore_hosts = [str(host) for host in set(self.__user_data.get( + TOML_L3_IGNORE_HOSTS, [] )) if is_valid_hostname(host)] - return self.__l3v4_ignore_hosts + return self.__l3_ignore_hosts @property - def l3v4_ignore_ips(self) -> List[IPv4Network]: - if self.__l3v4_ignore_ip is None: - self.__l3v4_ignore_ip = [] - for ip_network in self.__user_data.get('L3V4_IGNORE_IP', []): + def l3_ignore_ips(self) -> List[ip_network]: + if self.__l3_ignore_ip is None: + self.__l3_ignore_ip = [] + for raw_ip_network in self.__user_data.get(TOML_L3_IGNORE_IP, []): try: - self.__l3v4_ignore_ip.append(IPv4Network(ip_network, strict=False)) + self.__l3_ignore_ip.append(ip_network(raw_ip_network, strict=False)) except (AddressValueError, NetmaskValueError): LOGGER.error( - f'Invalid entry in L3V4_IGNORE_IP found: {ip_network} -> ignored' + f'Invalid entry in {TOML_L3_IGNORE_IP} found: {raw_ip_network} -> ignored' ) continue LOGGER.info( - f'Valid entries in L3V4_IGNORE_IP found: {len(self.__l3v4_ignore_ip)}/' - f'{len(self.__user_data.get("L3V4_IGNORE_IP", []))}' + f'Valid entries in {TOML_L3_IGNORE_IP} found: {len(self.__l3_ignore_ip)}/' + f'{len(self.__user_data.get(TOML_L3_IGNORE_IP, []))}' ) - return self.__l3v4_ignore_ip + return self.__l3_ignore_ip @property def l3v4_ignore_wildcard(self) -> List[Wildcard]: if self.__l3v4_ignore_wildcard is None: self.__l3v4_ignore_wildcard = [] - for entry in self.__user_data.get('L3V4_IRNORE_WILDCARD', []): + for entry in self.__user_data.get(TOML_L3V4_IGNORE_WILDCARD, []): try: - ip_address, wildcard = entry + raw_ip_address, wildcard = entry except ValueError: LOGGER.error( - f'Invalid entry in L3V4_IRNORE_WILDCARD -> {entry} -> ignored' + f'Invalid entry in {TOML_L3V4_IGNORE_WILDCARD} -> {entry} -> ignored' ) continue try: @@ -438,71 +456,71 @@ class Settings: [str(255 - int(octet)) for octet in wildcard.split('.')] ) self.__l3v4_ignore_wildcard.append(Wildcard( - int_ip_address=int(IPv4Address(ip_address)), - int_wildcard=int(IPv4Address(inverted_wildcard)), - ip_address=ip_address, + int_ip_address=int(ip_address(raw_ip_address)), + int_wildcard=int(ip_address(inverted_wildcard)), + ip_address=raw_ip_address, wildcard=wildcard, - bit_pattern=int(IPv4Address(ip_address)) & int( - IPv4Address(inverted_wildcard) + bit_pattern=int(ip_address(raw_ip_address)) & int( + ip_address(inverted_wildcard) ) )) except (AddressValueError, NetmaskValueError): LOGGER.error( - f'Invalid entry in L3V4_IRNORE_WILDCARD -> {entry} -> ignored' + f'Invalid entry in {TOML_L3V4_IGNORE_WILDCARD} -> {entry} -> ignored' ) continue LOGGER.info( - f'Valid entries in L3V4_IRNORE_WILDCARD found: {len(self.__l3v4_ignore_wildcard)}/' - f'{len(self.__user_data.get("L3V4_IRNORE_WILDCARD", []))}' + f'Valid entries in {TOML_L3V4_IGNORE_WILDCARD} found: {len(self.__l3v4_ignore_wildcard)}/' + f'{len(self.__user_data.get(TOML_L3V4_IGNORE_WILDCARD, []))}' ) return self.__l3v4_ignore_wildcard @property - def l3v4_replace(self) -> Dict[str, str]: - if self.__l3v4_replace is None: - self.__l3v4_replace = {} - for ip_network, node in self.__user_data.get('L3V4_REPLACE', {}).items(): + def l3_replace(self) -> Dict[str, str]: + if self.__l3_replace is None: + self.__l3_replace = {} + for raw_ip_network, node in self.__user_data.get(TOML_L3_REPLACE, {}).items(): try: - _ip_network = IPv4Network(ip_network) # noqa: F841 + _ip_network = ip_network(raw_ip_network) # noqa: F841 except (AddressValueError, NetmaskValueError): LOGGER.error( - f'Invalid entry in L3V4_REPLACE found: {ip_network} -> line ignored' + f'Invalid entry in {TOML_L3_REPLACE} found: {raw_ip_network} -> line ignored' ) continue if not is_valid_hostname(node): LOGGER.error(f'Invalid node name found: {node} -> line ignored ') continue - self.__l3v4_replace[ip_network] = str(node) + self.__l3_replace[raw_ip_network] = str(node) LOGGER.info( - f'Valid entries in L3V4_REPLACE found: {len(self.__l3v4_replace)}/' - f'{len(self.__user_data.get("L3V4_REPLACE", {}))}' + f'Valid entries in {TOML_L3_REPLACE} found: {len(self.__l3_replace)}/' + f'{len(self.__user_data.get(TOML_L3_REPLACE, {}))}' ) - return self.__l3v4_replace + return self.__l3_replace @property - def l3v4_summarize(self) -> List[IPv4Network]: - if self.__l3v4_summarize is None: - self.__l3v4_summarize = [] - for ip_network in self.__user_data.get('L3V4_SUMMARIZE', []): + def l3_summarize(self) -> List[ip_network]: + if self.__l3_summarize is None: + self.__l3_summarize = [] + for raw_ip_network in self.__user_data.get(TOML_L3_SUMMARIZE, []): try: - self.__l3v4_summarize.append(IPv4Network(ip_network, strict=False)) + self.__l3_summarize.append(ip_network(raw_ip_network, strict=False)) except (AddressValueError, NetmaskValueError): LOGGER.error( - f'Invalid entry in L3V4_SUMMARIZE -> {ip_network} -> ignored' + f'Invalid entry in {TOML_L3_SUMMARIZE} -> {raw_ip_network} -> ignored' ) continue LOGGER.info( - f'Valid entries in L3V4_SUMMARIZE found: {len(self.__l3v4_summarize)}/' - f'{len(self.__user_data.get("L3V4_SUMMARIZE", []))}' + f'Valid entries in {TOML_L3_SUMMARIZE} found: {len(self.__l3_summarize)}/' + f'{len(self.__user_data.get(TOML_L3_SUMMARIZE, []))}' ) - return self.__l3v4_summarize + return self.__l3_summarize @property def map_speed_to_thickness(self) -> List[Thickness]: if self.__map_speed_to_thickness is None: self.__map_speed_to_thickness = [] map_speed_to_thickness = self.__user_data.get( - 'MAP_SPEED_TO_THICKNESS', {} + TOML_MAP_SPEED_TO_THICKNESS, {} ) for speed, thickness in map_speed_to_thickness.items(): try: @@ -512,12 +530,12 @@ class Settings: )) except ValueError: LOGGER.error( - f'Invalid entry in MAP_SPEED_TO_THICKNESS -> {speed}={thickness} -> ignored' + f'Invalid entry in {TOML_MAP_SPEED_TO_THICKNESS} -> {speed}={thickness} -> ignored' ) continue LOGGER.info( - f'Valid entries in MAP_SPEED_TO_THICKNESS found: {len(self.__map_speed_to_thickness)}' # noqa: E501 - f'/{len(self.__user_data.get("MAP_SPEED_TO_THICKNESS", []))}' + f'Valid entries in {TOML_MAP_SPEED_TO_THICKNESS} found: {len(self.__map_speed_to_thickness)}' # noqa: E501 + f'/{len(self.__user_data.get(TOML_MAP_SPEED_TO_THICKNESS, []))}' ) return self.__map_speed_to_thickness @@ -525,7 +543,7 @@ class Settings: def protected_topologies(self) -> List[str]: if self.__protected_topologies is None: self.__protected_topologies = [str(topology) for topology in self.__user_data.get( - 'PROTECTED_TOPOLOGIES', [] + TOML_PROTECTED_TOPOLOGIES, [] )] return self.__protected_topologies @@ -533,12 +551,12 @@ class Settings: def static_connections(self) -> List[StaticConnection]: if self.__static_connections is None: self.__static_connections = [] - for connection in self.__user_data.get('STATIC_CONNECTIONS', []): + for connection in self.__user_data.get(TOML_STATIC_CONNECTIONS, []): try: left_host, left_service, right_service, right_host = connection except ValueError: LOGGER.error( - f'Wrong entry in STATIC_CONNECTIONS -> {connection} -> ignored' + f'Wrong entry in {TOML_STATIC_CONNECTIONS} -> {connection} -> ignored' ) continue if not right_host or not left_host: @@ -553,14 +571,14 @@ class Settings: left_host=str(left_host), )) LOGGER.info( - f'Valid entries in STATIC_CONNECTIONS found: {len(self.__static_connections)}/' - f'{len(self.__user_data.get("STATIC_CONNECTIONS", []))}' + f'Valid entries in {TOML_STATIC_CONNECTIONS} found: {len(self.__static_connections)}/' + f'{len(self.__user_data.get(TOML_STATIC_CONNECTIONS, []))}' ) return self.__static_connections @property def sites(self) -> List[str]: if self.__sites is None: - self.__sites = [str(site) for site in set(self.__user_data.get('SITES', [])) if is_valid_site_name(site)] + self.__sites = [str(site) for site in set(self.__user_data.get(TOML_SITES, [])) if is_valid_site_name(site)] LOGGER.info(f'Found {len(self.__sites)} to filter on') return self.__sites diff --git a/source/bin/nvdct/lib/topologies.py b/source/bin/nvdct/lib/topologies.py index 32710db0219f2adfb5b5400fce911a47b0966c2e..3fa4cbb028ca607b96a6c3db529bfd9b0f05bb6a 100755 --- a/source/bin/nvdct/lib/topologies.py +++ b/source/bin/nvdct/lib/topologies.py @@ -2,16 +2,21 @@ # -*- coding: utf-8 -*- # # License: GNU General Public License v2 +import sys # Author: thl-cmk[at]outlook[dot]com # URL : https://thl-cmk.hopto.org # Date : 2024-06-09 # File : lib/topologies.py -from collections.abc import Mapping, Sequence -from ipaddress import IPv4Address, IPv4Network -from typing import Dict, List, Tuple +# 2024-12-22: refactoring topology creation into classes +# made L3 topology IP version independent + +from abc import abstractmethod +from collections.abc import Mapping, MutableMapping, Sequence +from ipaddress import ip_address, ip_network, ip_interface from re import sub as re_sub +from typing import Dict, List, Tuple from lib.backends import ( CacheItems, @@ -21,9 +26,13 @@ from lib.constants import ( CACHE_INTERFACES_DATA, HOST_LABEL_L3V4_HOSTS, HOST_LABEL_L3V4_ROUTER, + HOST_LABEL_L3V6_HOSTS, + HOST_LABEL_L3V6_ROUTER, + IPVersion, LOGGER, PATH_INTERFACES, - PATH_L3v4, + PATH_L3, + DATAPATH, ) from lib.settings import ( Emblems, @@ -33,8 +42,9 @@ from lib.settings import ( ) from lib.utils import ( InventoryColumns, - Ipv4Info, - is_valid_hostname, + IpInfo, + # is_valid_hostname, + save_data_to_file, ) @@ -165,21 +175,21 @@ class NvObjects: elif metadata is not {}: self.nv_objects[service_object]['metadata'].update(metadata) - def add_ipv4_address( + def add_ip_address( self, host: str, - ipv4_address: str, + raw_ip_address: str, emblem: str, interface: str | None, ) -> None: if interface is not None: - service_object = f'{ipv4_address}@{interface}@{host}' + service_object = f'{raw_ip_address}@{interface}@{host}' else: - service_object = f'{ipv4_address}@{host}' + service_object = f'{raw_ip_address}@{host}' if service_object not in self.nv_objects: self.nv_objects[service_object] = { - 'name': ipv4_address, + 'name': raw_ip_address, 'link': {}, 'metadata': { 'images': { @@ -188,13 +198,13 @@ class NvObjects: }, 'tooltip': { 'quickinfo': [ - {'name': 'IP-Address', 'value': ipv4_address}, + {'name': 'IP-Address', 'value': raw_ip_address}, ] } } } - def add_ipv4_network(self, network: str, emblem: str, ) -> None: + def add_ip_network(self, network: str, emblem: str, ) -> None: if network not in self.nv_objects: self.nv_objects[network] = { 'name': network, @@ -306,17 +316,18 @@ class NvConnections: f'<->{right} (duplex: {right_duplex})' ) if left_native_vlan and right_native_vlan: - if left_native_vlan != right_native_vlan: - warning = True - - metadata = add_tooltip_html( - metadata, 'Native<br>VLAN', left, left_native_vlan, right, right_native_vlan - ) - - LOGGER.warning( - f'Connection with native vlan mismatch: ' - f'{left} (vlan: {left_native_vlan})<->{right} (vlan: {right_native_vlan})' - ) + if left_native_vlan != '0' and right_native_vlan != '0': # ignore VLAN 0 (Native VLAN on routed ports) + if left_native_vlan != right_native_vlan: + warning = True + + metadata = add_tooltip_html( + metadata, 'Native<br>VLAN', left, left_native_vlan, right, right_native_vlan + ) + + LOGGER.warning( + f'Connection with native vlan mismatch: ' + f'{left} (vlan: {left_native_vlan})<->{right} (vlan: {right_native_vlan})' + ) if warning: metadata['line_config'].update({ 'color': 'red', @@ -329,6 +340,481 @@ class NvConnections: connection.append(metadata) +class Topology: + def __init__( + self, + emblems: Emblems, + host_cache: HostCache, + ): + self.nv_objects: NvObjects = NvObjects() + self.nv_connections: NvConnections = NvConnections() + self.emblems: Emblems = emblems + self.host_cache: HostCache = host_cache + + @abstractmethod + def create(self): + raise NotImplementedError + + def save(self, label:str, output_directory: str, make_default: bool): + data = { + 'version': 1, + 'name': label, + 'objects': dict(sorted(self.nv_objects.nv_objects.items())), + 'connections': sorted(self.nv_connections.nv_connections) + } + save_data_to_file( + data=data, + path=( + f'{DATAPATH}/{output_directory}' + ), + file=f'data_{label}.json', + make_default=make_default, + ) + +class TopologyStatic(Topology): + def __init__( + self, + connections: Sequence[StaticConnection], + emblems: Emblems, + host_cache: HostCache, + ): + super().__init__( + emblems=emblems, + host_cache=host_cache, + ) + self.connections: Sequence[StaticConnection] = connections + + def create(self): + for connection in self.connections: + LOGGER.info(msg=f'connection: {connection}') + self.nv_objects.add_host( + host=connection.right_host, + host_cache=self.host_cache, + emblem=self.emblems.host_node + ) + self.nv_objects.add_host( + host=connection.left_host, + host_cache=self.host_cache, + emblem=self.emblems.host_node + ) + if connection.right_service: + self.nv_objects.add_service( + host=connection.right_host, + host_cache=self.host_cache, + emblem=self.emblems.service_node, + service=connection.right_service + ) + self.nv_connections.add_connection( + left=connection.right_host, + right=f'{connection.right_service}@{connection.right_host}', + ) + + if connection.left_service: + self.nv_objects.add_service( + host=connection.left_host, + host_cache=self.host_cache, + emblem=self.emblems.service_node, + service=connection.left_service + ) + self.nv_connections.add_connection( + left=connection.left_host, + right=f'{connection.left_service}@{connection.left_host}', + ) + + if connection.right_service and connection.left_service: + self.nv_connections.add_connection( + left=f'{connection.right_service}@{connection.right_host}', + right=f'{connection.left_service}@{connection.left_host}', + ) + elif connection.right_service: # connect right_service with left_host + self.nv_connections.add_connection( + left=f'{connection.right_service}@{connection.right_host}', + right=f'{connection.left_host}', + ) + elif connection.left_service: # connect left_service with right_host + self.nv_connections.add_connection( + left=f'{connection.right_host}', + right=f'{connection.left_service}@{connection.left_host}', + ) + else: # connect right_host with left_host + self.nv_connections.add_connection( + left=f'{connection.right_host}', + right=f'{connection.left_host}', + ) + +class TopologyL2(Topology): + def __init__( + self, + emblems: Emblems, + host_cache: HostCache, + case: str, + inv_columns: InventoryColumns, + l2_drop_hosts: List[str], + l2_host_map: Dict[str, str], + l2_neighbour_replace_regex: List[Tuple[str, str]], + label: str, + path_in_inventory: str, + prefix: str, + remove_domain: bool, + seed_devices: Sequence[str], + ): + super().__init__( + emblems=emblems, + host_cache=host_cache, + ) + self.case: str = case + self.inv_columns: InventoryColumns = inv_columns + self.l2_drop_hosts: List[str] = l2_drop_hosts + self.l2_host_map: Dict[str, str] = l2_host_map + self.l2_neighbour_replace_regex: List[Tuple[str, str]] = l2_neighbour_replace_regex + self.label: str = label + self.neighbour_to_host: MutableMapping[str, str] = {} + self.path_in_inventory: str = path_in_inventory + self.prefix: str = prefix + self.remove_domain: bool = remove_domain + self.seed_devices: Sequence[str] = seed_devices + + def create(self): + if not (devices_to_go := list(set(self.seed_devices))): # remove duplicates + LOGGER.error('No seed devices configured!') + return + + devices_done = [] + + while devices_to_go: + device = devices_to_go[0] + + if device in self.l2_host_map.keys(): + try: + devices_to_go.remove(device) + except ValueError: + pass + device = self.l2_host_map[device] + if device in devices_done: + continue + + topo_data = self.host_cache.get_data( + host=device, item=CacheItems.inventory, path=self.path_in_inventory + ) + if topo_data: + self.device_from_inventory( + host=device, + inv_data=topo_data, + ) + + for _entry in self.nv_objects.host_list: + if _entry not in devices_done: + devices_to_go.append(_entry) + + devices_to_go = list(set(devices_to_go)) + devices_done.append(device) + devices_to_go.remove(device) + LOGGER.info(msg=f'Device done: {device}, source: {self.label}') + + def device_from_inventory( + self, + host: str, + inv_data, + ): + for topo_neighbour in inv_data: + # check if required data are not empty + if not (neighbour := topo_neighbour.get(self.inv_columns.neighbour)): + LOGGER.warning(f'incomplete data, neighbour missing {topo_neighbour}') + continue + if not (raw_local_port := topo_neighbour.get(self.inv_columns.local_port)): + LOGGER.warning(f'incomplete data, local port missing {topo_neighbour}') + continue + if not (raw_neighbour_port := topo_neighbour.get(self.inv_columns.neighbour_port)): + LOGGER.warning(f'incomplete data, neighbour port missing {topo_neighbour}') + continue + + if not (neighbour:= self.get_host_from_neighbour(neighbour)): + continue + + # getting/checking interfaces + local_port = get_service_by_interface(host, raw_local_port, self.host_cache) + if not local_port: + local_port = raw_local_port + LOGGER.warning(msg=f'service not found: host: {host}, raw_local_port: {raw_local_port}') + elif local_port != raw_local_port: + # local_port = raw_local_port # don't reset local_port + LOGGER.info( + msg=f'host: {host}, raw_local_port: {raw_local_port} -> local_port: {local_port}' + ) + + neighbour_port = get_service_by_interface(neighbour, raw_neighbour_port, self.host_cache) + if not neighbour_port: + neighbour_port = raw_neighbour_port + LOGGER.warning( + msg=f'service not found: neighbour: {neighbour}, ' + f'raw_neighbour_port: {raw_neighbour_port}' + ) + elif neighbour_port != raw_neighbour_port: + # neighbour_port = raw_neighbour_port # don't reset neighbour_port + LOGGER.info( + msg=f'neighbour: {neighbour}, raw_neighbour_port {raw_neighbour_port} ' + f'-> neighbour_port {neighbour_port}' + ) + + metadata = { + 'duplex': topo_neighbour.get('duplex'), + 'native_vlan': topo_neighbour.get('native_vlan'), + } + + self.nv_objects.add_host(host=host, host_cache=self.host_cache) + self.nv_objects.add_host(host=neighbour, host_cache=self.host_cache) + self.nv_objects.add_interface( + host=str(host), + service=str(local_port), + host_cache=self.host_cache, + metadata=metadata, + name=str(raw_local_port), + item=str(local_port) + ) + self.nv_objects.add_interface( + host=str(neighbour), + service=str(neighbour_port), + host_cache=self.host_cache, + name=str(raw_neighbour_port), + item=str(neighbour_port) + ) + self.nv_connections.add_connection( + left=str(host), + right=f'{local_port}@{host}', + ) + self.nv_connections.add_connection( + left=str(neighbour), + right=f'{neighbour_port}@{neighbour}', + ) + self.nv_connections.add_connection( + left=f'{local_port}@{host}', + right=f'{neighbour_port}@{neighbour}', + ) + + def get_host_from_neighbour(self, neighbour: str) -> str | None: + try: + return self.neighbour_to_host[neighbour] + except KeyError: + pass + + if neighbour in self.l2_drop_hosts: + LOGGER.info(msg=f'drop neighbour: {neighbour}') + self.neighbour_to_host[neighbour] = None + return None + + if self.l2_neighbour_replace_regex: + for re_str, replace_str in self.l2_neighbour_replace_regex: + re_neighbour = re_sub(re_str, replace_str, neighbour) + if re_neighbour != neighbour: + LOGGER.info(f'regex changed Neighbor |{neighbour}| to |{re_neighbour}|') + neighbour = re_neighbour + if not neighbour: + LOGGER.info(f'Neighbour removed by regex (|{neighbour}|, |{re_str}|, |{replace_str}|)') + break + if not neighbour: + self.neighbour_to_host[neighbour] = None + return None + + if self.remove_domain: + neighbour = neighbour.split('.')[0] + + # drop neighbour after domain split + if neighbour in self.l2_drop_hosts: + LOGGER.info(msg=f'drop neighbour: {neighbour}') + self.neighbour_to_host[neighbour] = None + return None + + if self.case == 'UPPER': + neighbour = neighbour.upper() + LOGGER.debug(f'Changed neighbour to upper case: {neighbour}') + elif self.case == 'LOWER': + neighbour = neighbour.lower() + LOGGER.debug(f'Changed neighbour to lower case: {neighbour}') + + if self.prefix: + neighbour = f'{self.prefix}{neighbour}' + # rewrite neighbour if inventory neighbour and checkmk host don't match + if neighbour in self.l2_host_map.keys(): + neighbour = self.l2_host_map[neighbour] + + return neighbour + +class TopologyL3(Topology): + def __init__( + self, + emblems: Emblems, + host_cache: HostCache, + ignore_hosts: Sequence[str], + ignore_ips: Sequence[ip_network], + ignore_wildcard: Sequence[Wildcard], + include_hosts: bool, + replace: Mapping[str, str], + skip_if: bool, + skip_ip: bool, + summarize: Sequence[ip_network], + version: int + ): + super().__init__( + emblems=emblems, + host_cache=host_cache, + ) + self.ignore_hosts: Sequence[str] = ignore_hosts + self.ignore_ips: Sequence[ip_network] = ignore_ips + self.ignore_wildcard: Sequence[Wildcard] = ignore_wildcard + self.include_hosts: bool = include_hosts + self.replace: Mapping[str, str] = replace + self.skip_if: bool = skip_if + self.skip_ip: bool = skip_ip + self.summarize: Sequence[ip_network] = summarize + self.version = version + + def create(self): + match self.version: + case IPVersion.IPv4: + host_list: Sequence[str] = self.host_cache.get_hosts_by_label(HOST_LABEL_L3V4_ROUTER) + + if self.include_hosts: + host_list += self.host_cache.get_hosts_by_label(HOST_LABEL_L3V4_HOSTS) + + case IPVersion.IPv6: + host_list: Sequence[str] = self.host_cache.get_hosts_by_label(HOST_LABEL_L3V6_ROUTER) + + if self.include_hosts: + host_list += self.host_cache.get_hosts_by_label(HOST_LABEL_L3V6_HOSTS) + + case _: + host_list = [] + + LOGGER.debug(f'host list: {host_list}') + if not host_list: + LOGGER.warning( + msg='No (routing capable) host found. Check if "inv_ip_addresses.mkp" ' + 'added/enabled and inventory and host label discovery has run.' + ) + return + + LOGGER.debug(f'L3 ignore hosts: {self.ignore_hosts}') + for raw_host in host_list: + host = raw_host + if host in self.ignore_hosts: + LOGGER.info(f'L3 host {host} ignored') + continue + if not (inv_ip_addresses := self.host_cache.get_data( + host=host, item=CacheItems.inventory, path=PATH_L3) + ): + LOGGER.warning(f'No IP address inventory found for host: {host}') + continue + + self.nv_objects.add_host(host=host, host_cache=self.host_cache) + for inv_ip_address in inv_ip_addresses: + emblem = self.emblems.ip_network + try: + ip_info = IpInfo( + address=inv_ip_address['address'], + device=inv_ip_address['device'], + broadcast=inv_ip_address['broadcast'], + cidr=inv_ip_address['cidr'], + netmask=inv_ip_address['netmask'], + network=inv_ip_address['network'], + type=inv_ip_address['type'], + scope_id=inv_ip_address.get('scope_id'), # this is an optional field + ) + except KeyError: + LOGGER.warning(f'Drop IP address data for host: {host}, data: {inv_ip_address}') + continue + + interface_address: ip_interface = ip_interface(f'{ip_info.address}/{ip_info.cidr}') + if interface_address.version != self.version: + LOGGER.info( + f'host: {host} dropped non IPv{self.version} address: {ip_info.address},' + f' type: {ip_info.type}' + ) + continue + + if interface_address.is_loopback: + LOGGER.info(f'host: {host} dropped loopback address: {ip_info.address}') + continue + + if interface_address.is_link_local: + LOGGER.info(f'host: {host} dropped link-local address: {ip_info.address}') + continue + + # if interface_address.network.prefixlen == 32 or interface_address.network.prefixlen == 128: # drop host addresses + # LOGGER.info( + # f'host: {host} dropped host address: {ip_info.address}/{ip_info.cidr}' + # ) + # continue + + if is_ignore_ip(ip_info.address, self.ignore_ips): + LOGGER.info(f'host: {host} dropped ignore address: {ip_info.address}') + continue + + if is_ignore_wildcard(ip_info.address, self.ignore_wildcard): + LOGGER.info(f'host: {host} dropped wildcard address: {ip_info.address}') + continue + + if network := get_network_summary( + raw_ip_address=ip_info.address, + summarize=self.summarize, + ): + emblem = self.emblems.l3_summarize + LOGGER.info( + f'Network summarized: {ip_info.network}/{ip_info.cidr} -> {network}' + ) + else: + network = f'{ip_info.network}/{ip_info.cidr}' + + if network in self.replace.keys(): + LOGGER.info(f'Replaced network {network} with {self.replace[network]}') + network = self.replace[network] + emblem = self.emblems.l3_replace + + self.nv_objects.add_ip_network(network=network, emblem=emblem) + + if self.skip_if is True and self.skip_ip is True: + self.nv_connections.add_connection(left=host, right=network) + elif self.skip_if is True and self.skip_ip is False: + self.nv_objects.add_ip_address( + host=host, + interface=None, + raw_ip_address=ip_info.address, + emblem=self.emblems.ip_address, + ) + self.nv_objects.add_tooltip_quickinfo( + '{ip_info.address}@{host}', 'Interface', ip_info.device + ) + self.nv_connections.add_connection(left=f'{host}', right=f'{ip_info.address}@{host}') + self.nv_connections.add_connection(left=network, right=f'{ip_info.address}@{host}') + elif self.skip_if is False and self.skip_ip is True: + self.nv_objects.add_interface( + host=host, service=ip_info.device, host_cache=self.host_cache + ) + self.nv_objects.add_tooltip_quickinfo( + f'{ip_info.device}@{host}', 'IP-address', ip_info.address + ) + self.nv_connections.add_connection(left=f'{host}', right=f'{ip_info.device}@{host}') + self.nv_connections.add_connection(left=network, right=f'{ip_info.device}@{host}') + else: + self.nv_objects.add_ip_address( + host=host, + interface=ip_info.device, + raw_ip_address=ip_info.address, + emblem=self.emblems.ip_address, + ) + self.nv_objects.add_interface( + host=host, service=ip_info.device, host_cache=self.host_cache, + ) + self.nv_connections.add_connection( + left=host, right=f'{ip_info.device}@{host}') + self.nv_connections.add_connection( + left=f'{ip_info.device}@{host}', + right=f'{ip_info.address}@{ip_info.device}@{host}', + ) + self.nv_connections.add_connection( + left=network, right=f'{ip_info.address}@{ip_info.device}@{host}', + ) + + def map_speed_to_thickness(speed_to_map: int, speed_map: Sequence[Thickness]) -> int: thickness: int = 1 # use in case of empty MAP_SPEED_TO_THICKNESS for speed, thickness in speed_map: @@ -584,27 +1070,33 @@ def close_tooltip_html(metadata: Dict) -> Dict: return metadata -def get_network_summary(ipv4_address: str, summarize: Sequence[IPv4Network]) -> str | None: +def get_network_summary(raw_ip_address: str, summarize: Sequence[ip_network]) -> str | None: for network in summarize: - if IPv4Network(ipv4_address).subnet_of(network): - return network.exploded + try: + if ip_network(raw_ip_address).subnet_of(network): + return network.compressed + except TypeError: + pass return None -def is_ignore_ipv4(ip_address: str, ignore_ips: Sequence[IPv4Network]) -> bool: +def is_ignore_ip(raw_ip_address: str, ignore_ips: Sequence[ip_network]) -> bool: for ip in ignore_ips: - if IPv4Network(ip_address).subnet_of(ip): - LOGGER.info(f'IP address {ip_address} is in ignore list -> ({ip})') - return True + try: + if ip_network(raw_ip_address).subnet_of(ip): + LOGGER.info(f'IP address {raw_ip_address} is in ignore list -> ({ip})') + return True + except TypeError: + continue return False -def is_ignore_wildcard(ip_address: str, ignore_wildcard: Sequence[Wildcard]) -> bool: - int_ip_address = int(IPv4Address(ip_address)) +def is_ignore_wildcard(raw_ip_address: str, ignore_wildcard: Sequence[Wildcard]) -> bool: + int_ip_address = int(ip_address(raw_ip_address)) for wildcard in ignore_wildcard: if int_ip_address & wildcard.int_wildcard == wildcard.bit_pattern: LOGGER.info( - f'IP address {ip_address} matches ignore wildcard ' + f'IP address {raw_ip_address} matches ignore wildcard ' f'list ({wildcard.ip_address}/{wildcard.wildcard})' ) return True @@ -616,387 +1108,3 @@ def get_list_of_devices(data: Mapping) -> List[str]: for connection in data.values(): devices.append(connection[0]) return list(set(devices)) - - -def create_static_connections( - connections_: Sequence[StaticConnection], - emblems: Emblems, - host_cache: HostCache, - nv_connections: NvConnections, - nv_objects: NvObjects, -): - for connection in connections_: - LOGGER.info(msg=f'connection: {connection}') - nv_objects.add_host( - host=connection.right_host, - host_cache=host_cache, - emblem=emblems.host_node - ) - nv_objects.add_host( - host=connection.left_host, - host_cache=host_cache, - emblem=emblems.host_node - ) - if connection.right_service: - nv_objects.add_service( - host=connection.right_host, - host_cache=host_cache, - emblem=emblems.service_node, - service=connection.right_service - ) - nv_connections.add_connection( - left=connection.right_host, - right=f'{connection.right_service}@{connection.right_host}', - ) - - if connection.left_service: - nv_objects.add_service( - host=connection.left_host, - host_cache=host_cache, - emblem=emblems.service_node, - service=connection.left_service - ) - nv_connections.add_connection( - left=connection.left_host, - right=f'{connection.left_service}@{connection.left_host}', - ) - - if connection.right_service and connection.left_service: - nv_connections.add_connection( - left=f'{connection.right_service}@{connection.right_host}', - right=f'{connection.left_service}@{connection.left_host}', - ) - elif connection.right_service: # connect right_service with left_host - nv_connections.add_connection( - left=f'{connection.right_service}@{connection.right_host}', - right=f'{connection.left_host}', - ) - elif connection.left_service: # connect left_service with right_host - nv_connections.add_connection( - left=f'{connection.right_host}', - right=f'{connection.left_service}@{connection.left_host}', - ) - else: # connect right_host with left_host - nv_connections.add_connection( - left=f'{connection.right_host}', - right=f'{connection.left_host}', - ) - - -def create_l2_device_from_inv( - case: str, - host: str, - host_cache: HostCache, - inv_columns: InventoryColumns, - inv_data: Sequence[Mapping[str, str]], - l2_drop_hosts: List, - l2_host_map: Dict[str, str], - l2_neighbour_replace_regex: List[Tuple[str, str]] | None, - nv_connections: NvConnections, - nv_objects: NvObjects, - prefix: str, - remove_domain: bool, -) -> None: - for topo_neighbour in inv_data: - # check if required data are not empty - if not (neighbour := topo_neighbour.get(inv_columns.neighbour)): - LOGGER.warning(f'incomplete data, neighbour missing {topo_neighbour}') - continue - if not (raw_local_port := topo_neighbour.get(inv_columns.local_port)): - LOGGER.warning(f'incomplete data, local port missing {topo_neighbour}') - continue - if not (raw_neighbour_port := topo_neighbour.get(inv_columns.neighbour_port)): - LOGGER.warning(f'incomplete data, neighbour port missing {topo_neighbour}') - continue - - # drop neighbour before domain split - if neighbour in l2_drop_hosts: - LOGGER.info(msg=f'drop neighbour: {neighbour}') - continue - - if l2_neighbour_replace_regex: - for re_str, replace_str in l2_neighbour_replace_regex: - re_neighbour = re_sub(re_str, replace_str, neighbour) - if re_neighbour != neighbour: - LOGGER.info(f'regex changed Neighbor |{neighbour}| to |{re_neighbour}|') - neighbour = re_neighbour - if not neighbour: - LOGGER.info(f'Neighbour removed by regex (|{neighbour}|, |{re_str}|, |{replace_str}|)') - break - if not neighbour: - continue - - if remove_domain: - neighbour = neighbour.split('.')[0] - - # drop neighbour after domain split - if neighbour in l2_drop_hosts: - LOGGER.info(msg=f'drop neighbour: {neighbour}') - continue - - if case == 'UPPER': - neighbour = neighbour.upper() - LOGGER.debug(f'Changed neighbour to upper case: {neighbour}') - elif case == 'LOWER': - neighbour = neighbour.lower() - LOGGER.debug(f'Changed neighbour to lower case: {neighbour}') - - if prefix: - neighbour = f'{prefix}{neighbour}' - # rewrite neighbour if inventory neighbour and checkmk host don't match - if neighbour in l2_host_map.keys(): - neighbour = l2_host_map[neighbour] - - # getting/checking interfaces - local_port = get_service_by_interface(host, raw_local_port, host_cache) - if not local_port: - local_port = raw_local_port - LOGGER.warning(msg=f'service not found: host: {host}, raw_local_port: {raw_local_port}') - elif local_port != raw_local_port: - # local_port = raw_local_port # don't reset local_port - LOGGER.info( - msg=f'host: {host}, raw_local_port: {raw_local_port} -> local_port: {local_port}' - ) - - neighbour_port = get_service_by_interface(neighbour, raw_neighbour_port, host_cache) - if not neighbour_port: - neighbour_port = raw_neighbour_port - LOGGER.warning( - msg=f'service not found: neighbour: {neighbour}, ' - f'raw_neighbour_port: {raw_neighbour_port}' - ) - elif neighbour_port != raw_neighbour_port: - # neighbour_port = raw_neighbour_port # don't reset neighbour_port - LOGGER.info( - msg=f'neighbour: {neighbour}, raw_neighbour_port {raw_neighbour_port} ' - f'-> neighbour_port {neighbour_port}' - ) - - metadata = { - 'duplex': topo_neighbour.get('duplex'), - 'native_vlan': topo_neighbour.get('native_vlan'), - } - - nv_objects.add_host(host=host, host_cache=host_cache) - nv_objects.add_host(host=neighbour, host_cache=host_cache) - nv_objects.add_interface( - host=str(host), - service=str(local_port), - host_cache=host_cache, - metadata=metadata, - name=str(raw_local_port), - item=str(local_port) - ) - nv_objects.add_interface( - host=str(neighbour), - service=str(neighbour_port), - host_cache=host_cache, - name=str(raw_neighbour_port), - item=str(neighbour_port) - ) - nv_connections.add_connection( - left=str(host), - right=f'{local_port}@{host}', - ) - nv_connections.add_connection( - left=str(neighbour), - right=f'{neighbour_port}@{neighbour}', - ) - nv_connections.add_connection( - left=f'{local_port}@{host}', - right=f'{neighbour_port}@{neighbour}', - ) - - -def create_l2_topology( - case: str, - host_cache: HostCache, - inv_columns: InventoryColumns, - l2_drop_hosts: List[str], - l2_host_map: Dict[str, str], - l2_neighbour_replace_regex: List[Tuple[str, str]], - label_: str, - nv_connections: NvConnections, - nv_objects: NvObjects, - path_in_inventory: str, - prefix: str, - remove_domain: bool, - seed_devices: Sequence[str], -) -> None: - devices_to_go = list(set(seed_devices)) # remove duplicates - devices_done = [] - - while devices_to_go: - device = devices_to_go[0] - - if device in l2_host_map.keys(): - try: - devices_to_go.remove(device) - except ValueError: - pass - device = l2_host_map[device] - if device in devices_done: - continue - - topo_data = host_cache.get_data( - host=device, item=CacheItems.inventory, path=path_in_inventory - ) - if topo_data: - create_l2_device_from_inv( - host=device, - inv_data=topo_data, - inv_columns=inv_columns, - l2_host_map=l2_host_map, - l2_drop_hosts=l2_drop_hosts, - l2_neighbour_replace_regex=l2_neighbour_replace_regex, - host_cache=host_cache, - nv_objects=nv_objects, - nv_connections=nv_connections, - case=case, - prefix=prefix, - remove_domain=remove_domain - ) - - for _entry in nv_objects.host_list: - if _entry not in devices_done: - devices_to_go.append(_entry) - - devices_to_go = list(set(devices_to_go)) - devices_done.append(device) - devices_to_go.remove(device) - LOGGER.info(msg=f'Device done: {device}, source: {label_}') - - -def create_l3v4_topology( - emblems: Emblems, - host_cache: HostCache, - ignore_hosts: Sequence[str], - ignore_ips: Sequence[IPv4Network], - ignore_wildcard: Sequence[Wildcard], - include_hosts: bool, - nv_connections: NvConnections, - nv_objects: NvObjects, - replace: Mapping[str, str], - skip_if: bool, - skip_ip: bool, - summarize: Sequence[IPv4Network], -) -> None: - host_list: Sequence[str] = host_cache.get_hosts_by_label(HOST_LABEL_L3V4_ROUTER) - - if include_hosts: - host_list += host_cache.get_hosts_by_label(HOST_LABEL_L3V4_HOSTS) - - LOGGER.debug(f'host list: {host_list}') - if not host_list: - LOGGER.warning( - msg='No (routing capable) host found. Check if "inv_ip_addresses.mkp" ' - 'added/enabled and inventory and host label discovery has run.' - ) - return - - LOGGER.debug(f'L3v4 ignore hosts: {ignore_hosts}') - for raw_host in host_list: - host = raw_host - if host in ignore_hosts: - LOGGER.info(f'L3v4 host {host} ignored') - continue - if not (ipv4_addresses := host_cache.get_data( - host=host, item=CacheItems.inventory, path=PATH_L3v4) - ): - LOGGER.warning(f'No IPv4 address inventory found for host: {host}') - continue - - nv_objects.add_host(host=host, host_cache=host_cache) - for _entry in ipv4_addresses: - emblem = emblems.ip_network - try: - ipv4_info = Ipv4Info(**_entry) - except TypeError: # as e - LOGGER.warning(f'Drop IPv4 address data for host: {host}, data: {_entry}') - continue - - if ipv4_info.address.startswith('127.'): # drop loopback addresses - LOGGER.info(f'host: {host} dropped loopback address: {ipv4_info.address}') - continue - - if ipv4_info.cidr == 32: # drop host addresses - LOGGER.info( - f'host: {host} dropped host address: {ipv4_info.address}/{ipv4_info.cidr}' - ) - continue - - if ipv4_info.type.lower() != 'ipv4': # drop if not ipv4 - LOGGER.warning( - f'host: {host} dropped non ipv4 address: {ipv4_info.address},' - f' type: {ipv4_info.type}' - ) - continue - - if is_ignore_ipv4(ipv4_info.address, ignore_ips): - LOGGER.info(f'host: {host} dropped ignore address: {ipv4_info.address}') - continue - - if is_ignore_wildcard(ipv4_info.address, ignore_wildcard): - LOGGER.info(f'host: {host} dropped wildcard address: {ipv4_info.address}') - continue - - if network := get_network_summary( - ipv4_address=ipv4_info.address, - summarize=summarize, - ): - emblem = emblems.l3v4_summarize - LOGGER.info( - f'Network summarized: {ipv4_info.network}/{ipv4_info.cidr} -> {network}' - ) - else: - network = f'{ipv4_info.network}/{ipv4_info.cidr}' - - if network in replace.keys(): - LOGGER.info(f'Replaced network {network} with {replace[network]}') - network = replace[network] - emblem = emblems.l3v4_replace - - nv_objects.add_ipv4_network(network=network, emblem=emblem) - - if skip_if is True and skip_ip is True: - nv_connections.add_connection(left=host, right=network) - elif skip_if is True and skip_ip is False: - nv_objects.add_ipv4_address( - host=host, - interface=None, - ipv4_address=ipv4_info.address, - emblem=emblems.ip_address, - ) - nv_objects.add_tooltip_quickinfo( - '{ipv4_info.address}@{host}', 'Interface', ipv4_info.device - ) - nv_connections.add_connection(left=f'{host}', right=f'{ipv4_info.address}@{host}') - nv_connections.add_connection(left=network, right=f'{ipv4_info.address}@{host}') - elif skip_if is False and skip_ip is True: - nv_objects.add_interface( - host=host, service=ipv4_info.device, host_cache=host_cache - ) - nv_objects.add_tooltip_quickinfo( - f'{ipv4_info.device}@{host}', 'IP-address', ipv4_info.address - ) - nv_connections.add_connection(left=f'{host}', right=f'{ipv4_info.device}@{host}') - nv_connections.add_connection(left=network, right=f'{ipv4_info.device}@{host}') - else: - nv_objects.add_ipv4_address( - host=host, - interface=ipv4_info.device, - ipv4_address=ipv4_info.address, - emblem=emblems.ip_address, - ) - nv_objects.add_interface( - host=host, service=ipv4_info.device, host_cache=host_cache, - ) - nv_connections.add_connection( - left=host, right=f'{ipv4_info.device}@{host}') - nv_connections.add_connection( - left=f'{ipv4_info.device}@{host}', - right=f'{ipv4_info.address}@{ipv4_info.device}@{host}', - ) - nv_connections.add_connection( - left=network, right=f'{ipv4_info.address}@{ipv4_info.device}@{host}', - ) diff --git a/source/bin/nvdct/lib/utils.py b/source/bin/nvdct/lib/utils.py index 4bd7f573474d8f75a4463ad77e5e089fa1a2c6ba..c5049225044bfd0d16c151777508ffcec9ff324f 100755 --- a/source/bin/nvdct/lib/utils.py +++ b/source/bin/nvdct/lib/utils.py @@ -7,10 +7,9 @@ # Date : 2023-10-12 # File : nvdct/lib/utils.py -from collections.abc import Mapping, Sequence from ast import literal_eval +from collections.abc import Mapping, Sequence from dataclasses import dataclass -from enum import Enum, unique from json import dumps from logging import disable as log_off, Formatter, getLogger, StreamHandler from logging.handlers import RotatingFileHandler @@ -24,33 +23,15 @@ from typing import List, Dict, TextIO from lib.constants import ( CMK_SITE_CONF, - COLUMNS_CDP, - COLUMNS_LLDP, - HOST_LABEL_CDP, - HOST_LABEL_L3V4_ROUTER, - HOST_LABEL_LLDP, - LABEL_CDP, - LABEL_L3v4, - LABEL_LLDP, + DATAPATH, + ExitCodes, LOGGER, OMD_ROOT, - PATH_CDP, - PATH_L3v4, - PATH_LLDP, - DATAPATH, ) -@unique -class ExitCodes(Enum): - OK = 0 - BAD_OPTION_LIST = 1 - BAD_TOML_FORMAT = 2 - BACKEND_NOT_IMPLEMENTED = 3 - AUTOMATION_SECRET_NOT_FOUND = 4 - NO_LAYER_CONFIGURED = 5 @dataclass(frozen=True) -class Ipv4Info: +class IpInfo: address: str device: str broadcast: str @@ -58,6 +39,7 @@ class Ipv4Info: netmask: str network: str type: str + scope_id: str | None @dataclass(frozen=True) class InventoryColumns: @@ -65,35 +47,6 @@ class InventoryColumns: local_port: str neighbour_port: str -@dataclass(frozen=True) -class Layer: - path: str - columns: str - label: str - host_label: str - -LAYERS = { - 'CDP': Layer( - path=PATH_CDP, - columns=COLUMNS_CDP, - label=LABEL_CDP, - host_label=HOST_LABEL_CDP, - ), - 'LLDP': Layer( - path=PATH_LLDP, - columns=COLUMNS_LLDP, - label=LABEL_LLDP, - host_label=HOST_LABEL_LLDP, - ), - 'L3v4': Layer( - path=PATH_L3v4, - columns='', - label=LABEL_L3v4, - host_label=HOST_LABEL_L3V4_ROUTER, - ), -} - - def get_local_cmk_version() -> str: return Path(f'{OMD_ROOT}/version').readlink().name @@ -133,7 +86,7 @@ def get_data_from_toml(file: str) -> Dict: msg=f'ERROR: data file {toml_file} is not in valid TOML format! ({e}),' f' (see https://toml.io/en/)' ) - sys_exit(ExitCodes.BAD_TOML_FORMAT.value) + sys_exit(ExitCodes.BAD_TOML_FORMAT) else: LOGGER.error(msg=f'WARNING: User data {file} not found.') @@ -414,7 +367,6 @@ def get_table_from_inventory(inventory: Dict[str, object], raw_path: str) -> Lis return None for m in path: try: - # print(raw_table[m]) table = table[m] except KeyError: LOGGER.info(msg=f'Inventory table for {path} not found') diff --git a/source/bin/nvdct/nvdct.py b/source/bin/nvdct/nvdct.py index 8966b104060df4a5b6e7cbc0ba359fd788eb8fd3..ffd846826268393d412350be70890c5955070d1f 100755 --- a/source/bin/nvdct/nvdct.py +++ b/source/bin/nvdct/nvdct.py @@ -82,7 +82,7 @@ # added duplex mismatch ('yellow') # added native vlan mismatch ('orange') # 2024-03-25: added --pre-fetch, this will speed up the RESTAPI backend performance -# min inv_cdp_cahe version: 0.7.1-20240320 -> host label nvdct/has_cdp_neighbours +# min inv_cdp_cache version: 0.7.1-20240320 -> host label nvdct/has_cdp_neighbours # min inv_lldp_cache version: 0.9.3-20240320 -> host label nvdct/has_lldp_neighbours # 2024-03-27: added option --api-port, defaults to 80 # 2024-03-28: changed restapi get_interface_data to use one call to fetch all data @@ -107,38 +107,37 @@ # added IP-Address/IP-Network as quickinfo # 2024-05-18: fixed crash non empty neighbour port (ThX to andreas doehler) # 2024-06-05: fixed interface index padding -# 2024-06-06: added interfcae detection alias+index +# 2024-06-06: added interface detection alias+index # added interface detection description + index -# 2024-06-09: moved topologiy helpers to lib/topologies.py +# 2024-06-09: moved topology helpers to lib/topologies.py # moved (default) config file(s) to ./conf/ # 2024-06-14: added debug code for bad IPv4 address data # 2024-06-17: fixed bad IPv4 address data (just drop it) -# 2024-09-23: incompatible replaced options --lowercase/--uppercase with --case LOWER|UPPER +# 2024-09-23: INCOMPATIBLE: replaced options --lowercase/--uppercase with --case LOWER|UPPER # changed version output from settings to argparse action -# incompatible removed backend FILESYSTEM -> will fallback to MULTISITE -# incompatible -# removed support for CMK2.2.x file format (removed option --new-format) +# INCOMPATIBLE: removed backend FILESYSTEM -> will fallback to MULTISITE +# INCOMPATIBLE: removed support for CMK2.2.x file format (removed option --new-format) # 2024-09-24: added site filter for multisite deployments (MULTISITE only), option --filter-sites # and SITES section in toml file # added customer filter for MSP deployments (MULTISITE only), option --filter-customers # and section CUSTOMERS in toml file # 2024-11-16: added better logging for missing L2 data # 2024-11-17: added L2_NEIGHBOUR_REPLACE_REGEX (ThX to Frankb@checkmk forum for the base idea) -# incompatible removed DROP_HOST_REGEX -> use L2_NEIGHBOUR_REPLACE_REGEX instead -# incompatible changed section names in TOML file to better distinguish between L2 and L3v4 +# INCOMPATIBLE: removed DROP_HOST_REGEX -> use L2_NEIGHBOUR_REPLACE_REGEX instead +# INCOMPATIBLE: changed section names in TOML file to better distinguish between L2 and L3v4 # HOST_MAP -> L2_HOST_MAP # DROP_HOSTS -> L2_DROP_HOSTS # SEED_DEVICES -> L2_SEED_DEVICES -# incompatible removed option -s, --seed-devices from CLI -> use TOML section L2_SEED_DEVICES instead -# incompatible changed the option keep-domain to remove-domain -> don't mess with neighbor names by default -# 2024-12-08: incompatible: changed hostlabel for L3v4 topology to nvdct/l3v4_topology +# INCOMPATIBLE_ removed option -s, --seed-devices from CLI -> use TOML section L2_SEED_DEVICES instead +# INCOMPATIBLE: changed the option keep-domain to remove-domain -> don't mess with neighbor names by default +# 2024-12-08: INCOMPATIBLE: changed hostlabel for L3v4 topology to nvdct/l3v4_topology # needs at least inv_ip_address inv_ip_address-0.0.5-20241209.mkp # 2024-12-09: added option --include-l3-hosts # added site filter for RESTAPI backend # enabled customer filter for MULTISITE backend # 2024-12-10: refactoring: moved topology code to topologies, removed all global variables, created main() function -# 2024-12-11: incompatible: changed default layers to None -> use the CLI option -l CDP or the configfile instead -# incompatible: reworked static topology -> can now be used for each service, host/service name has to be +# 2024-12-11: INCOMPATIBLE: changed default layers to None -> use the CLI option -l CDP or the configfile instead +# INCOMPATIBLE: reworked static topology -> can now be used for each service, host/service name has to be # exactly like in CMK. See ~/local/bin/nvdct/conf/nfdct.toml # moved string constants to lib/constants.py # @@ -146,6 +145,18 @@ # - inv_lnx_if_ip-0.0.4-20241210.mkp # - inv_ip_address-0.0.6-20241210.mkp # - inv_win_if_ip-0.0.3-20241210.mkp +# 2024-12-20: fixed typo in TOML L3v4 -> "L3v4" (ThX to BH2005@checkmk_forum) +# fixed crash in topologies (devices_to_go.remove(device) ValueError if device not in list) (ThX to BH2005) +# 2024-12-23: streamlined L3v4 in preparation for L3v6 topology +# INCOMPATIBLE: changes in TOML: +# L3V4_IGNORE_HOSTS -> L3_IGNORE_HOSTS +# L3V4_IGNORE_IP -> L3_IGNORE_IP +# L3V4_SUMMARIZE -> L3_SUMMARIZE +# L3V4_REPLACE -> L3_REPLACE +# L3V4_IRNORE_WILDCARD -> L3V4_IGNORE_WILDCARD # Typo +# [EMBLEMS] +# l3v4_replace -> l3_replace +# l3v4_summarize -> l3_summarize # creating topology data json from inventory data # @@ -243,7 +254,6 @@ __data = { """ import sys -from logging import DEBUG from time import strftime, time_ns from typing import List @@ -255,37 +265,33 @@ from lib.backends import ( HostCacheRestApi, ) from lib.constants import ( + DATAPATH, HOME_URL, + IPVersion, + LABEL_L3v4, + LAYERS, LOGGER, + Layer, NVDCT_VERSION, - DATAPATH, ) from lib.settings import Settings from lib.topologies import ( - NvConnections, - NvObjects, - create_l2_topology, - create_l3v4_topology, - create_static_connections, + TopologyL2, + TopologyL3, + TopologyStatic, ) from lib.utils import ( ExitCodes, InventoryColumns, - LAYERS, - Layer, StdoutQuiet, configure_logger, remove_old_data, - save_data_to_file, ) def main(): start_time = time_ns() - nv_connections = NvConnections() - nv_objects = NvObjects() - settings: Settings = Settings(vars(parse_arguments())) sys.stdout = StdoutQuiet(quiet=settings.quiet) @@ -329,36 +335,38 @@ def main(): case _: LOGGER.error(msg=f'Backend {settings.backend} not (yet) implemented') host_cache: HostCache | None = None # to keep linter happy - sys.exit(ExitCodes.BACKEND_NOT_IMPLEMENTED.value) + sys.exit(ExitCodes.BACKEND_NOT_IMPLEMENTED) jobs: List[Layer] = [] pre_fetch_layers: List[str] = [] pre_fetch_host_list: List[str] = [] for layer in settings.layers: - if layer == 'STATIC': - jobs.append(layer) - if layer == 'L3v4': - jobs.append(layer) - host_cache.add_inventory_prefetch_path(path=LAYERS[layer].path) - pre_fetch_layers.append(LAYERS[layer].host_label) - - elif layer in LAYERS: - jobs.append(LAYERS[layer]) - host_cache.add_inventory_prefetch_path(path=LAYERS[layer].path) - pre_fetch_layers.append(LAYERS[layer].host_label) - - elif layer == 'CUSTOM': - for entry in settings.custom_layers: - jobs.append(entry) - host_cache.add_inventory_prefetch_path(entry.path) + match layer: + case 'STATIC': + jobs.append(layer) + case 'L3v4': + jobs.append(layer) + host_cache.add_inventory_path(path=LAYERS[layer].path) + pre_fetch_layers.append(LAYERS[layer].host_label) + case 'CUSTOM': + for entry in settings.custom_layers: + jobs.append(entry) + host_cache.add_inventory_path(entry.path) + case 'CDP' | 'LLDP': + jobs.append(LAYERS[layer]) + host_cache.add_inventory_path(path=LAYERS[layer].path) + pre_fetch_layers.append(LAYERS[layer].host_label) + case _: + LOGGER.warning(f'Unknown layer {layer} dropped.') + continue if not jobs: message = ('No layer to work on. Please configura at least one layer (i.e. CLI option "-l CDP")\n' 'See ~/local/bin/nvdct/conf/nvdct.toml -> SETTINGS -> layers') LOGGER.warning(message) print(message) - sys.exit(ExitCodes.NO_LAYER_CONFIGURED.value) + sys.exit(ExitCodes.NO_LAYER_CONFIGURED) if settings.pre_fetch: LOGGER.info('Pre fill cache...') @@ -367,95 +375,81 @@ def main(): pre_fetch_host_list = list(set(pre_fetch_host_list + _host_list)) LOGGER.info(f'Fetching data for {len(pre_fetch_host_list)} hosts start') print(f'Prefetch start: {strftime(settings.time_format)}') - print(f'Prefetch hosts: {len(pre_fetch_host_list)} of {len(host_cache.cache.keys())}') - host_cache.pre_fetch_cache(pre_fetch_host_list) + print(f'Prefetch hosts: {len(pre_fetch_host_list)} of {len(host_cache.cache)}') + host_cache.fill_cache(pre_fetch_host_list) LOGGER.info(f'Fetching data for {len(pre_fetch_host_list)} hosts end') print(f'Prefetch end..: {strftime(settings.time_format)}') for job in jobs: match job: case 'STATIC': - label = 'STATIC' - create_static_connections( - connections_=settings.static_connections, + label = job + topology = TopologyStatic( + connections=settings.static_connections, emblems=settings.emblems, host_cache=host_cache, - nv_objects=nv_objects, - nv_connections=nv_connections, ) + topology.create() + case 'L3v4': - label = 'L3v4' - create_l3v4_topology( - ignore_hosts=settings.l3v4_ignore_hosts, - ignore_ips=settings.l3v4_ignore_ips, + label = job + topology = TopologyL3( + emblems=settings.emblems, + host_cache=host_cache, + ignore_hosts=settings.l3_ignore_hosts, + ignore_ips=settings.l3_ignore_ips, ignore_wildcard=settings.l3v4_ignore_wildcard, include_hosts=settings.include_l3_hosts, - replace=settings.l3v4_replace, + replace=settings.l3_replace, skip_if=settings.skip_l3_if, skip_ip=settings.skip_l3_ip, - summarize=settings.l3v4_summarize, - emblems=settings.emblems, - host_cache=host_cache, - nv_objects=nv_objects, - nv_connections=nv_connections, + summarize=settings.l3_summarize, + version=IPVersion.IPv4 if job == LABEL_L3v4 else IPVersion.IPv6 ) + topology.create() + case _: label = job.label.upper() columns = job.columns.split(',') - create_l2_topology( - seed_devices=settings.l2_seed_devices, - path_in_inventory=job.path, + topology = TopologyL2( + case=settings.case, + emblems=settings.emblems, + host_cache=host_cache, inv_columns=InventoryColumns( neighbour=columns[0], local_port=columns[1], neighbour_port=columns[2] ), - label_=label, l2_drop_hosts=settings.l2_drop_hosts, l2_host_map=settings.l2_host_map, l2_neighbour_replace_regex=settings.l2_neighbour_replace_regex, - host_cache=host_cache, - nv_objects=nv_objects, - nv_connections=nv_connections, - case=settings.case, + label=label, + path_in_inventory=job.path, prefix=settings.prefix, remove_domain=settings.remove_domain, + seed_devices=settings.l2_seed_devices, ) + topology.create() + - nv_connections.add_meta_data_to_connections( - nv_objects=nv_objects, + topology.nv_connections.add_meta_data_to_connections( + nv_objects=topology.nv_objects, speed_map=settings.map_speed_to_thickness, ) - _data = { - 'version': 1, - 'name': label, - 'objects': nv_objects.nv_objects if not settings.loglevel == DEBUG else dict( - sorted(nv_objects.nv_objects.items()) - ), - 'connections': nv_connections.nv_connections if not settings.loglevel == DEBUG else sorted( - nv_connections.nv_connections - ) - } - save_data_to_file( - data=_data, - path=( - f'{DATAPATH}/{settings.output_directory}' - ), - file=f'data_{label}.json', - make_default=settings.default, + topology.save( + label=label, + output_directory=settings.output_directory, + make_default=settings.default ) message = ( - f'Layer {label:.<8s}: Devices/Objects/Connections added {nv_objects.host_count}/' - f'{len(nv_objects.nv_objects)}/{len(nv_connections.nv_connections)}' + f'Layer {label:.<8s}: Devices/Objects/Connections added {topology.nv_objects.host_count}/' + f'{len(topology.nv_objects.nv_objects)}/{len(topology.nv_connections.nv_connections)}' ) LOGGER.info(msg=message) print(message) - nv_objects = NvObjects() - nv_connections = NvConnections() - if settings.keep: remove_old_data( keep=settings.keep, diff --git a/source/packages/nvdct b/source/packages/nvdct index a41f73e6981757ab73a9344482db1285e9afbae0..329c316c4d67f1236001a95535f01b1a214b0213 100644 --- a/source/packages/nvdct +++ b/source/packages/nvdct @@ -47,7 +47,7 @@ 'htdocs/images/icons/location_80.png']}, 'name': 'nvdct', 'title': 'Network Visualization Data Creation Tool (NVDCT)', - 'version': '0.9.5-20241217', + 'version': '0.9.6-20241222', 'version.min_required': '2.3.0b1', 'version.packaged': 'cmk-mkp-tool 0.2.0', 'version.usable_until': '2.4.0p1'}