diff --git a/README.md b/README.md
index f4a91797c992be185ee01b408002fd1ea8e05d1c..9ba3b1d709026ac3e3c057dfa157b897344a309e 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[PACKAGE]: ../../raw/master/mkp/nvdct-0.9.6-20241222.mkp "nvdct-0.9.6-20241222.mkp"
+[PACKAGE]: ../../raw/master/mkp/nvdct-0.9.7-20241230.mkp "nvdct-0.9.7-20241230.mkp"
 # Network Visualization Data Creation Tool (NVDCT)
 
 This script creates the topology data file needed for the [Checkmk Exchange Network visualization](https://exchange.checkmk.com/p/network-visualization) plugin.\
diff --git a/mkp/nvdct-0.9.7-20241230.mkp b/mkp/nvdct-0.9.7-20241230.mkp
new file mode 100644
index 0000000000000000000000000000000000000000..6d8891a76db8f05493e079d6b60d689cc0533ed5
Binary files /dev/null and b/mkp/nvdct-0.9.7-20241230.mkp differ
diff --git a/source/bin/nvdct/conf/nvdct.toml b/source/bin/nvdct/conf/nvdct.toml
index e907197e6ce3c6c378211553d06c4ed0b19b3b3e..d31f64ef5324aaa0a7f4f30d3febc13f7684f8d7 100755
--- a/source/bin/nvdct/conf/nvdct.toml
+++ b/source/bin/nvdct/conf/nvdct.toml
@@ -12,7 +12,7 @@
 # contains the user data and settings for nvdct.py
 #
 
-# list of (additional to -s/--seed-devices) seed devices
+# list of CDP/LLDP seed devices (if empty, all CDP/LLDP devices will be used)
 # [0-9-a-zA-Z\.\_\-]{1,253} -> host
 L2_SEED_DEVICES = [
     # "CORE01",
@@ -20,8 +20,8 @@ L2_SEED_DEVICES = [
     # "LOCATION02",
 ]
 
-# drop neighbours with invalid names (only L2 Topologies (i.e. CDP, LLDP, CUSTOM)
-L2_DROP_HOSTS = [
+# drop CDP/LLDP neighbours names
+L2_DROP_NEIGHBOURS = [
     # "not advertised",
     #  "a nother invalid name",
 ]
@@ -53,7 +53,7 @@ L3V4_IGNORE_WILDCARD = [
     # ["172.17.128.3", "0.0.127.0"],  # ignore all IPs ending with 3 from 172.17.128.0/17
 ]
 
-# networks to summarize
+# IP _networks_ to summarize
 L3_SUMMARIZE = [
    # "10.193.172.0/24",
    # "10.194.8.0/23",
@@ -82,17 +82,7 @@ STATIC_CONNECTIONS = [
     # connection: "left_host"<->"right_host"
 ]
 
-# THIS OPTION IS DEPRECATED
-# optional custom layers use option -l/--layers CUSTOM to include these layers
-# don't use --pre-fetch without a host_label that matches all host you want to add
-# THIS OPTION IS DEPRECATED
-CUSTOM_LAYERS = [
-#    { path = "path,in,inventory", columns = "columns from inventory", label = "label for the layer", host_label = "CMK host label to find matching hosts" },
-#    { path = "networking,lldp_cache,neighbours", columns = "neighbour_name,local_port,neighbour_port", label = "custom_LLDP", host_label = "nvdct/has_lldp_neighbours" },
-#    { path = "networking,cdp_cache,neighbours", columns = "neighbour_name,local_port,neighbour_port", label = "custom_CDP", host_label = "nvdct/has_cdp_neighbours" },
-]
-
-# list customers to include/excluse, use option --filter-costumers INCLUDE/EXCLUDE
+# list customers to include/excluse, use with option --filter-costumers INCLUDE/EXCLUDE
 # [0-9-a-zA-Z\.\_\-]{1,16} -> customer
 CUSTOMERS = [
     # "customer1",
@@ -100,7 +90,7 @@ CUSTOMERS = [
     # "customer3",
 ]
 
-# list site to include/excluse, use option --filter-sites INCLUDE/EXCLUDE
+# list site to include/excluse, use with option --filter-sites INCLUDE/EXCLUDE
 # [0-9-a-zA-Z\.\_\-]{1,16} -> site
 SITES = [
     # "site1",
@@ -108,20 +98,21 @@ SITES = [
     # "site3",
 ]
 
-# map inventory neighbour name to Checkmk host name
+# map inventory CDP/LLDP neighbour name to Checkmk host name
 # [0-9-a-zA-Z\.\_\-]{1,253} -> host
 [L2_HOST_MAP]
-# inventory_neighbour1 = "cmk_host1"
-# inventory_neighbour2 = "cmk_host2"
-# inventory_neighbour3 = "cmk_host3"
+# "inventory_neighbour1" = "cmk_host1"
+# "inventory_neighbour2" = "cmk_host2"
+# "inventory_neighbour3" = "cmk_host3"
 
+# modify CDP/LLDP neighbour name with regex before mapping to CMK host names
 [L2_NEIGHBOUR_REPLACE_REGEX]
 # "regex string to replace" = "string to replace with"
 # "^(([0-9a-fA-F]){2}[:.-]?){5}([0-9a-fA-F]){2}$" = ""
 # "\\([0-9a-zA-Z]+\\)$" = ""
 # "^Meraki.*\\s-\\s" = ""
 
-# replace network objects (takes place after summarize)
+# replace _network objects_ in L§ topologies (takes place after summarize)
 # [0-9-a-zA-Z\.\_\-]{1,253} -> host
 [L3_REPLACE]
 # "10.193.172.0/24" = "MPLS"
@@ -134,17 +125,18 @@ SITES = [
 # can use misc icons from CMK or upload your own in the misc category
 # for built-in icons use "icon_" as prefix to the name from CMK
 # max size 80x80px
-# "host_node" = "icon_missinc"
+# emblems will only be used for non CMK objects
+# "host_node" = "icon_alert_unreach"
 # "ip_address" = "ip-address_80"
 # "ip_network" = "ip-network_80"
 # "l3_replace" = "icon_plugins_cloud"
 # "l3_summarize" = "icon_aggr"
-# "service_node" = "icon_missing"
+# "service_node" = "icon_alert_unreach"
 
 [MAP_SPEED_TO_THICKNESS]
 # must be sorted from slower to faster speed
-# use only one entry to have all conections with the same thickness
-# bits per second = thickness
+# use only one/no entry to have all conections with the same thickness
+# "bits per second" = thickness
 # "2000000" = 1  # 2 mbit
 # "5000000" = 2  # 5 mbit
 # "1e7" = 3      # 10 mbit
@@ -158,10 +150,12 @@ SITES = [
 # backend = "MULTISITE" | "RESTAPI" | "LIVESTATUS"
 # case = "LOWER" | "UPPER"
 # default = false
+# display_l2_neighbours = false
 # dont_compare = false
 # filter_customers = "INCLUDE" |"EXCLUDE"
 # filter_sites = "INCLUDE" | "EXCLUDE"
 # include_l3_hosts = false
+# include_l3_loopback = false  # most likely dropped from inventory (SNMP) before
 keep = 10
 # layers = ["LLDP", "CDP", "L3v4", "STATIC", "CUSTOM"]
 # log_file = "~/var/log/nvdct.log"
@@ -173,6 +167,9 @@ output_directory = 'nvdct'  # remove to get date formated directory
 # prefix = ""
 # quiet = true
 # remove_domain = false
+# skip_l3_cidr_0 = false
+# skip_l3_cidr_32_128 = false
 # skip_l3_if = false
 # skip_l3_ip = false
+# skip_l3_public = false
 # time_format = "%Y-%m-%dT%H:%M:%S.%m"
diff --git a/source/bin/nvdct/lib/args.py b/source/bin/nvdct/lib/args.py
index 5ad06bd4dd8d1ae0953c9f1994722490f491cecf..1d1be87c70b9d3d706106a3ca7d1d9dbb93349a8 100755
--- a/source/bin/nvdct/lib/args.py
+++ b/source/bin/nvdct/lib/args.py
@@ -11,18 +11,19 @@
 # options used
 # -b --backend
 # -d --default
-# -l --layer
+# -l --layers
 # -o --output-directory
 # -p --prefix
-# # -s --seed-devices
 # -u --user-data-file
 # -v --version
 # --api-port (deprecated ?)
 # --case
 # --check-user-data-only
+# --display-l2-neighbours
 # --dont-compare
 # --filter-customers
 # --filter-sites
+# --fix-toml
 # --include-l3-hosts
 # --keep
 # --log-file
@@ -45,16 +46,19 @@ from argparse import (
 from pathlib import Path
 
 from lib.constants import (
+    Backends,
+    Case,
+    CliLong,
     ExitCodes,
-    HOME_URL,
-    MIN_CDP_VERSION,
-    MIN_LINUX_IP_ADDRESSES,
-    MIN_LLDP_VERSION,
-    MIN_SNMP_IP_ADDRESSES,
-    MIN_WINDOWS_IP_ADDRESSES,
+    IncludeExclude,
+    Layers,
+    LogLevels,
+    MinVersions,
     NVDCT_VERSION,
     SCRIPT,
     TIME_FORMAT_ARGPARSER,
+    TomlSections,
+    URLs,
     USER_DATA_FILE,
 )
 
@@ -63,20 +67,15 @@ def parse_arguments() -> arg_Namespace:
     parser = ArgumentParser(
         prog='nvdct.py',
         description=(
-            'This script creates the topology data file needed for the Checkmk "network_visualization"\n'  # noqa: E501
-            'plugin by Andreas Boesl and schnetz. For more information see\n'
-            'the announcement from schnetz: '
-            'https://forum.checkmk.com/t/network-visualization/41680\n'
-            'and the plugin on the Exchange: '
-            'https://exchange.checkmk.com/p/network-visualization .\n'
+            'This script creates the topology data file needed for the Checkmk Network Visualization,\n'  # noqa: E501
+            'For more information see the announcement from schnetz in the Checkmk forum:\n'
+            f'{URLs.FORUM_SCHNETZ}\n'
             '\n'
-            'The required inventory data can be created with my inventory plugins:\n'
-            'CDP: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_cdp_cache\n'  # noqa: E501
-            'LLDP: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_lldp_cache\n'  # noqa: E501
-            'L3v4: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_ip_address\n'  # noqa: E501
+            'The required plugins to create the inventory data can be found here:\n'
+            f'{URLs.TOPIC_NV}\n'
             '\n'
             f'\nVersion: {NVDCT_VERSION} | Written by: thl-cmk\n'
-            f'for more information see: {HOME_URL}'
+            f'for more information see: {URLs.NVDCT}'
         ),
         formatter_class=RawTextHelpFormatter,
         epilog='Exit codes:\n'
@@ -91,156 +90,180 @@ def parse_arguments() -> arg_Namespace:
     )
 
     parser.add_argument(
-        '-b', '--backend',
-        choices=['LIVESTATUS', 'MULTISITE', 'RESTAPI'],
+        '-b', CliLong.BACKEND,
+        choices=[Backends.LIVESTATUS, Backends.MULTISITE, Backends.RESTAPI],
         # default='MULTISITE',
         help='Backend used to retrieve the topology data\n'
-             ' - LIVESTATUS : fetches data via local Livestatus (local site only)\n'
-             ' - MULTISITE  : like LIVESTATUS but for distributed environments (default)\n'
-             ' - RESTAPI    : uses the CMK REST API.',
+             f' - {Backends.LIVESTATUS} : fetches data via local Livestatus (local site only)\n'
+             f' - {Backends.MULTISITE}  : like LIVESTATUS but for distributed environments (default)\n'
+             f' - {Backends.RESTAPI}    : uses the CMK REST API.',
     )
     parser.add_argument(
-        '-d', '--default', action='store_const', const=True,  # default=False,
+        '-d', CliLong.DEFAULT, action='store_const', const=True,  # default=False,
         help='Set the created topology data as default. Will be created automatically\n'
-             'if it doesn\'t exists.',
+             'if it doesnt exists.',
     )
     parser.add_argument(
-        '-o', '--output-directory', type=str,
+        '-o', CliLong.OUTPUT_DIRECTORY, type=str,
         help='Directory name where to save the topology data.\n'
              'I.e.: my_topology. Default is the actual date/time\n'
-             'in "--time-format" format.\n'
+             f'in "{CliLong.TIME_FORMAT}" format.\n'
              'NOTE: the directory is a sub directory under "~/var/check_mk/topology/data/"\n',
     )
-    # parser.add_argument(
-    #     '-s', '--seed-devices', type=str, nargs='+',
-    #     help=f'List of devices to start the topology discovery from.\n'
-    #          f'I.e. {SAMPLE_SEEDS}',
-    # )
     parser.add_argument(
-        '-p', '--prefix', type=str,
-        help='Prepends each host with the prefix. (Needs testing)\n'
+        '-p', CliLong.PREFIX, type=str,
+        help='Prepends each host with the prefix. (Needs more testing)\n'
     )
     parser.add_argument(
-        '-l', '--layers',
+        '-l', CliLong.LAYERS,
         nargs='+',
         choices=[
-            'CDP',
-            'CUSTOM',
-            'L3v4',
-            'LLDP',
-            'STATIC',
+            Layers.CDP,
+            Layers.LLDP,
+            Layers.L3V4,
+            Layers.STATIC,
         ],
         # default=['CDP'],
         help=(
-            f' - CDP      : needs inv_cdp_cache package at least in version {MIN_CDP_VERSION}\n'
-            f' - LLDP     : needs inv_lldp_cache package at least in version {MIN_LLDP_VERSION}\n'
-            f' - L3v4     : needs inv_ip_address package at least in version {MIN_SNMP_IP_ADDRESSES} for SNMP based hosts\n'
-            f'              for Linux based hosts inv_lnx_ip_if in version {MIN_LINUX_IP_ADDRESSES}\n'
-            f'              for Windows based hosts inv_win_ip_if in version {MIN_WINDOWS_IP_ADDRESSES}\n'
-            f' - STATIC   : creates a topology base on the "STATIC_CONNECTIONS" in the toml file\n'
-            f' - CUSTOM   : (deprecated)\n'
+            f' - {Layers.CDP}      : needs inv_cdp_cache package at least in version {MinVersions.CDP}\n'
+            f' - {Layers.LLDP}     : needs inv_lldp_cache package at least in version {MinVersions.LLDP}\n'
+            f' - {Layers.L3V4}     : needs inv_ip_address package at least in version {MinVersions.SNMP_IP_ADDRESSES} for SNMP based hosts\n'
+            f'              for Linux based hosts inv_lnx_ip_if in version {MinVersions.LINUX_IP_ADDRESSES}\n'
+            f'              for Windows based hosts inv_win_ip_if in version {MinVersions.WINDOWS_IP_ADDRESSES}\n'
+            f' - {Layers.STATIC}   : creates a topology base on the "[{TomlSections.STATIC_CONNECTIONS}]" section in the TOML file\n'
         )
     )
     parser.add_argument(
-        '-u', '--user-data-file', type=str,
-        help='Set the name uf the user provided data file\n'
+        '-u', CliLong.USER_DATA_FILE, type=str,
+        help='Set the name of the user provided data file\n'
              f'Default is ~/local/bin/nvdct/conf/{USER_DATA_FILE}\n',
     )
     parser.add_argument(
-        '-v', '--version', action='version',
+        '-v', CliLong.VERSION, action='version',
         version=f'{Path(SCRIPT).name} version: {NVDCT_VERSION}',
         help='Print version of this script and exit',
     )
     parser.add_argument(
-        '--api-port', type=int,  # default=False,
-        help='TCP Port to access the REST API. Default is 80. NVDCT will try to automatically\n'
+        CliLong.ADJUST_TOML, action='store_const', const=True,  # default=False,
+        help='Adjusts old options in TOML file.',
+    )
+    parser.add_argument(
+        CliLong.API_PORT, type=int,  # default=False,
+        help='TCP Port to access the REST API. By NVDCT will try to automatically\n'
              'detect the site apache port.',
     )
     parser.add_argument(
-        '--case',
-        choices=['LOWER', 'UPPER'],
+        CliLong.CASE,
+        choices=[Case.LOWER, Case.UPPER],
         # default='NONE',
-        help='Change neighbour name to all lower/upper case',
+        help='Change L2 neighbour name to all lower/upper case before matching to CMK host',
     )
     parser.add_argument(
-        '--check-user-data-only', action='store_const', const=True,  # default=False,
+        CliLong.CHECK_USER_DATA_ONLY, action='store_const', const=True,  # default=False,
         help=f'Only tries to read/parse the user data from {USER_DATA_FILE} and exits.',
     )
     parser.add_argument(
-        '--log-file', type=str,
-        help='Set the log file. Default is ~/var/log/nvdct.log\n',
+        CliLong.LOG_FILE, type=str,
+        help='Set the log file. Default is "~/var/log/nvdct.log"\n',
     )
     parser.add_argument(
-        '--log-level',
+        CliLong.LOG_LEVEL,
         # nargs='+',
-        choices=['CRITICAL', 'FATAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'OFF'],
+        choices=[
+            LogLevels.CRITICAL,
+            LogLevels.FATAL,
+            LogLevels.ERROR,
+            LogLevels.WARNING,
+            LogLevels.INFO,
+            LogLevels.DEBUG,
+            LogLevels.OFF
+        ],
         # default='WARNING',
-        help='Sets the log level. The default is "WARNING"\n'
+        help=f'Sets the log level. The default is "{LogLevels.WARNING}"\n'
     )
     parser.add_argument(
-        '--log-to-stdout', action='store_const', const=True,  # default=False,
+        CliLong.LOG_TO_STDOUT, action='store_const', const=True,  # default=False,
         help='Send log to stdout.',
     )
     parser.add_argument(
-        '--dont-compare', action='store_const', const=True,  # default=False,
+        CliLong.DISPLAY_L2_NEIGHBOURS, action='store_const', const=True,  # default=False,
+        help='Use L2 neighbour name as display name in L2 topologies',
+    )
+    parser.add_argument(
+        CliLong.DONT_COMPARE, action='store_const', const=True,  # default=False,
         help='Do not compare the actual topology data with the default topology\n'
              'data. By default, the actual topology is compared with the default\n'
              'topology. If the data matches, the actual topology is not saved.\n'
              'So, if you run this tool in a cron job, a new topology will be\n'
-             'created only if there was a change, unless you use "--dont-compare".'
+             f'created only if there was a change, unless you use "{CliLong.DONT_COMPARE}".'
     )
     parser.add_argument(
-        '--filter-customers',
-        choices=['INCLUDE', 'EXCLUDE'],
+        CliLong.FILTER_CUSTOMERS,
+        choices=[IncludeExclude.INCLUDE, IncludeExclude.EXCLUDE],
         # default='INCLUDE',
-        help='INCLUDE/EXCLUDE customer list from config file.\n'
-             'Note: MULTISITE backend only.',
+        help=f'{IncludeExclude.INCLUDE}/{IncludeExclude.EXCLUDE} customer list "[{TomlSections.CUSTOMERS}]" from TOML file.\n'
+             f'Note: {Backends.MULTISITE} backend only.',
     )
     parser.add_argument(
-        '--filter-sites',
-        choices=['INCLUDE', 'EXCLUDE'],
+        CliLong.FILTER_SITES,
+        choices=[IncludeExclude.EXCLUDE, IncludeExclude.EXCLUDE],
         # default='INCLUDE',
-        help='INCLUDE/EXCLUDE site list from config file.\n'
-             'Note: MULTISITE backend only.',
+        help=f'{IncludeExclude.INCLUDE}/{IncludeExclude.EXCLUDE} site list "[{TomlSections.SITES}]" from TOML file.\n'
+    )
+    parser.add_argument(
+        CliLong.INCLUDE_L3_HOSTS, action='store_const', const=True,  # default=False,
+        help='Include hosts (single IP objects) in layer 3 topologies',
     )
     parser.add_argument(
-        '--include-l3-hosts', action='store_const', const=True,  # default=False,
-        help='Include hosts (single IP objects) in layer 3 topology',
+        CliLong.INCLUDE_L3_LOOPBACK, action='store_const', const=True,  # default=False,
+        help='Include loopback ip-addresses in layer 3 topologies',
     )
     parser.add_argument(
-        '--remove-domain', action='store_const', const=True,  # default=False,
-        help='Remove the domain name from the neighbor name',
+        CliLong.REMOVE_DOMAIN, action='store_const', const=True,  # default=False,
+        help='Remove the domain name from the L2 neighbor name before matching CMK host.',
     )
     parser.add_argument(
-        '--keep', type=int,
+        CliLong.KEEP, type=int,
         help='Number of topologies to keep. The oldest topologies above keep\n'
-             'max will be deleted.\n'
-             'NOTE: The default topologies will be always kept.\n'
+             'will be deleted.\n'
+             'NOTE: The default/protected topologies will be kept always.\n'
     )
     parser.add_argument(
-        '--min-age', type=int,
-        help='The minimum number of days before a topology is deleted by "--keep".'
+        CliLong.MIN_AGE, type=int,
+        help=f'The minimum number of days before a topology is deleted by "{CliLong.KEEP}"'
     )
     parser.add_argument(
-        '--pre-fetch', action='store_const', const=True,  # default=False,
-        help='Try to fetch host data, with less API calls. Can improve RESTAPI backend\n'
+        CliLong.PRE_FETCH, action='store_const', const=True,  # default=False,
+        help=f'Try to fetch host data, with less API calls. Can improve {Backends.RESTAPI} backend\n'
              'performance',
     )
     parser.add_argument(
-        '--quiet', action='store_const', const=True,  # default=False,
-        help='Suppress output to stdtout',
+        CliLong.QUIET, action='store_const', const=True,  # default=False,
+        help='Suppress all output to stdtout',
+    )
+    parser.add_argument(
+        CliLong.SKIP_L3_CIDR_0, action='store_const', const=True,  # default=False,
+        help='Skip ip-address with CIDR "/0" in layer 3 topologies',
+    )
+    parser.add_argument(
+        CliLong.SKIP_L3_CIDR_32_128, action='store_const', const=True,  # default=False,
+        help='Skip ip-address with CIDR "/32" or "/128" in layer 3 topologies',
+    )
+    parser.add_argument(
+        CliLong.SKIP_L3_IF, action='store_const', const=True,  # default=False,
+        help='Dont show interface in layer 3 topologies',
     )
     parser.add_argument(
-        '--skip-l3-if', action='store_const', const=True,  # default=False,
-        help='Skip interface in layer 3 topology',
+        CliLong.SKIP_L3_IP, action='store_const', const=True,  # default=False,
+        help='Dont show ip-addresses in layer 3 topologies',
     )
     parser.add_argument(
-        '--skip-l3-ip', action='store_const', const=True,  # default=False,
-        help='Skip ip-address in layer 3 topology',
+        CliLong.SKIP_L3_PUBLIC, action='store_const', const=True,  # default=False,
+        help='Skip public ip-addresses in layer 3 topologies',
     )
     parser.add_argument(
-        '--time-format', type=str,
-        help=f'Format string to render the time. (default: {TIME_FORMAT_ARGPARSER})',
+        CliLong.TIME_FORMAT, type=str,
+        help=f'Format string to render the time. (default: "{TIME_FORMAT_ARGPARSER}")',
     )
 
     return parser.parse_args()
diff --git a/source/bin/nvdct/lib/backends.py b/source/bin/nvdct/lib/backends.py
index 78a7d4bdbad83e006c5eea5bf18cff7b89217bf4..80adf5a63295c9b824dd841482ea1be0c247db03 100755
--- a/source/bin/nvdct/lib/backends.py
+++ b/source/bin/nvdct/lib/backends.py
@@ -16,19 +16,23 @@
 from abc import abstractmethod
 from ast import literal_eval
 from collections.abc import Mapping, MutableSequence, Sequence
-from enum import Enum, unique
 from pathlib import Path
 from requests import session
 from sys import exit as sys_exit
-from typing import Dict, List, Tuple
+from typing import Dict, List, Tuple, MutableMapping
 
 from livestatus import MultiSiteConnection, SiteConfigurations, SiteId
 
 from lib.constants import (
+    Backends,
     CACHE_INTERFACES_DATA,
+    CacheItems,
+    Case,
     ExitCodes,
+    IncludeExclude,
+    InvPaths,
     OMD_ROOT,
-    PATH_INTERFACES,
+    TomlSections,
 )
 from lib.utils import (
     LOGGER,
@@ -84,36 +88,45 @@ def hosts_to_query(hosts: List[str]) -> Tuple[str, List[str]]:
 
     return hosts_str, open_hosts
 
-
-@unique
-class CacheItems(Enum):
-    inventory = 'inventory'
-    interfaces = 'interfaces'
-
-    def __get__(self, instance, owner):
-        return self.value
-
 class HostCache:
     def __init__(
             self,
-            pre_fetch: bool,
             backend: str,
+            pre_fetch: bool,
     ):
         LOGGER.info('init HOST_CACHE')
 
         self.cache: Dict = {}
+        self.neighbour_to_host: MutableMapping[str, str] = {}
         self._inventory_pre_fetch_list: List[str] = [
-            PATH_INTERFACES,
+            InvPaths.INTERFACES,
         ]
 
-        self.pre_fetch: bool = bool(pre_fetch)
         self.backend: str = str(backend)
+        self.case: str = ''
+        self.l2_host_map: Dict[str, str] = {}
+        self.l2_neighbour_replace_regex: List[Tuple[str, str]] = []
+        self.pre_fetch: bool = bool(pre_fetch)
+        self.prefix: str = ''
+        self.remove_domain: bool = False
 
         if self.pre_fetch:
             for host in self.query_all_hosts():
                 self.cache[host] = HOST_EXIST.copy()
 
-    def get_inventory_data(self, hosts: Sequence[str]) -> Dict[str, Dict]:
+    def init_neighbour_to_host(
+            self,
+            case: str,
+            l2_host_map: Dict[str, str],
+            prefix: str,
+            remove_domain: bool,
+    ):
+        self.case: str = case
+        self.l2_host_map: Dict[str, str] = l2_host_map
+        self.prefix: str = prefix
+        self.remove_domain: bool = remove_domain
+
+    def get_inventory_data(self, hosts: List[str]) -> Dict[str, Dict]:
         """
         Returns a dictionary of hosts and there inventory data.
         Args:
@@ -136,7 +149,7 @@ class HostCache:
 
         return inventory_data
 
-    def get_interface_data(self, hosts: Sequence[str]) -> Dict[str, Dict | None]:
+    def get_interface_data(self, hosts: List[str]) -> Dict[str, Dict | None]:
         """
         Returns Dictionary of hosts and there interface services from CMK.
         The interface information consists of the "Item", the "Description (summary)" and the service details
@@ -185,7 +198,7 @@ class HostCache:
         """
         return self.query_hosts_by_label(label)
 
-    def fill_cache(self, hosts: Sequence[str]) -> None:
+    def fill_cache(self, hosts: List[str]) -> None:
         """
         Gets the host data from CMK and puts them in the host cache. Data collected:
         - inventory
@@ -218,13 +231,13 @@ class HostCache:
                 self.cache[host][CacheItems.interfaces] = {}
             self.cache[host][CacheItems.interfaces][CACHE_INTERFACES_DATA] = interfaces
 
-    def get_data(self, host: str, item: CacheItems, path: str) -> Dict[str, any] | None:
+    def get_data(self, host: str, item: CacheItems, path: str) -> Mapping | Sequence | None:
         """
         Returns data from self.cache. If the cache for "host" is empty, data will be fetched from CMK
         Args:
             host: host to get data from cache
             item: item in cache (inventory/interface)
-            path: path in cache item
+            path: path in cache to data
 
         Returns:
             the requested data or None
@@ -243,6 +256,62 @@ class HostCache:
     def add_inventory_path(self, path: str) -> None:
         self._inventory_pre_fetch_list = list(set(self._inventory_pre_fetch_list + [path]))
 
+    def get_host_from_neighbour(self, neighbour: str) -> str | None:
+        """
+        Tries to get the CMK host name from a L2 neighbour name. It will test:
+        - the neighbour without domain name
+        - map the neighbour to a host via L2_HOST_MAP
+        - the neighbour in UPPER case (without domain)
+        - the neighbour in lower case (including domain)
+        - the neighbour with prefix
+        Args:
+            neighbour: the L2 neighbour name to find a CMK host for
+
+        Returns:
+            The CMK host name for the L2 neighbour or None if no host is found
+
+        """
+        try:
+            return self.neighbour_to_host[neighbour]
+        except KeyError:
+            pass
+
+        host = neighbour
+
+        # rewrite neighbour if inventory neighbour and checkmk host don't match
+        if host in self.l2_host_map:
+            LOGGER.info(f'Replace neighbour by [{TomlSections.L2_HOST_MAP}]: {neighbour} -> {host}')
+            host = self.l2_host_map[host]
+
+        if self.remove_domain:
+            LOGGER.debug(f'Remove domain: {host} -> {host.split(".")[0]}')
+            host = host.split('.')[0]
+
+        match self.case:
+            case Case.UPPER:
+                LOGGER.debug(f'Change neighbour to upper case: {host} -> {host.upper()}')
+                host = host.upper()
+
+            case Case.LOWER:
+                LOGGER.debug(f'Change neighbour to lower case: {host} -> {host.lower()}')
+                host = host.lower()
+            case _:
+                pass
+
+        if self.prefix:
+            LOGGER.debug(f'Prepend neighbour with prefix: {host} -> {self.prefix}{host}')
+            host = f'{self.prefix}{host}'
+
+
+        if self.host_exists(host):
+            self.neighbour_to_host[neighbour] = host
+            LOGGER.debug(f'Matched neighbour to host: |{neighbour}| -> |{host}|')
+            return host
+        else:
+            self.neighbour_to_host[neighbour] = None
+            LOGGER.debug(f'No match found for neighbour: |{neighbour}|')
+            return None
+
     @abstractmethod
     def query_host(self, host: str) -> bool:
         """
@@ -285,8 +354,16 @@ class HostCache:
         raise NotImplementedError
 
 class HostCacheLiveStatus(HostCache):
-    def __init__(self, pre_fetch: bool, backend: str = '[LIVESTATUS]'):
-        super().__init__(pre_fetch, backend)
+    def __init__(
+            self,
+            pre_fetch: bool,
+            backend: str = f'[{Backends.LIVESTATUS}]',
+    ):
+        self.backend = backend
+        super().__init__(
+            pre_fetch = pre_fetch,
+            backend = self.backend,
+        )
 
     def get_raw_data(self, query: str) -> any:
         return get_data_form_live_status(query=query)
@@ -294,9 +371,9 @@ class HostCacheLiveStatus(HostCache):
     def query_host(self, host: str) -> bool:
         query = (
             'GET hosts\n'
-            'Columns: host_name\n'
+            'Columns: name\n'
             'OutputFormat: python3\n'
-            f'Filter: host_name = {host}\n'
+            f'Filter: name = {host}\n'
         )
         data: Sequence[Sequence[str]] = self.get_raw_data(query=query)
         LOGGER.debug(f'{self.backend} data for host {host}: {data}')
@@ -309,7 +386,7 @@ class HostCacheLiveStatus(HostCache):
     def query_all_hosts(self) -> Sequence[str]:
         query = (
             'GET hosts\n'
-            'Columns: host_name\n'
+            'Columns: name\n'
             'OutputFormat: python3\n'
         )
         data: Sequence[Sequence[str]] = self.get_raw_data(query=query)
@@ -339,9 +416,9 @@ class HostCacheLiveStatus(HostCache):
     def query_inventory_data(self, hosts: str) -> Dict[str, Dict]:
         query = (
             'GET hosts\n'
-            'Columns: host_name mk_inventory\n'
+            'Columns: name mk_inventory\n'
             'OutputFormat: python3\n'
-            f'Filter: host_name ~~ {hosts}\n'
+            f'Filter: name ~~ {hosts}\n'
         )
         inventory_data = {}
         data: Sequence[Tuple[str, bytes]] = self.get_raw_data(query=query)
@@ -386,11 +463,13 @@ class HostCacheMultiSite(HostCacheLiveStatus):
         self,
         pre_fetch: bool,
         filter_sites: str | None = None,
-        sites: List[str] = [],
+        sites: List[str] | None = None,
         filter_customers: str | None = None,
         customers: List[str] = None,
     ):
-        self.backend = '[MULTISITE]'
+        if not sites:
+            sites = []
+        self.backend = f'[{Backends.MULTISITE}]'
         self.sites: SiteConfigurations = SiteConfigurations({})
         self.get_sites()
         self.filter_sites(filter_sites, sites)
@@ -402,9 +481,12 @@ class HostCacheMultiSite(HostCacheLiveStatus):
         self.dead_sites = [site['site']['alias'] for site in self.c.dead_sites().values()]
         if self.dead_sites:
             dead_sites = ', '.join(self.dead_sites)
-            LOGGER.warning(f'{self.backend} WARNING: use of dead site(s) {dead_sites} is disabled')
+            LOGGER.warning(f'{self.backend} use of dead site(s) {dead_sites} is disabled')
             self.c.set_only_sites(self.c.alive_sites())
-        super().__init__(pre_fetch, self.backend)
+        super().__init__(
+            pre_fetch=pre_fetch,
+            backend=self.backend,
+        )
 
     def get_raw_data(self, query: str) -> object:
         return self.c.query(query=query)
@@ -459,20 +541,20 @@ class HostCacheMultiSite(HostCacheLiveStatus):
 
     def filter_sites(self, filter_: str | None, sites: List[str]):
         match filter_:
-            case 'INCLUDE':
+            case IncludeExclude.INCLUDE:
                 self.sites = {site: data for site, data in self.sites.items() if site in sites}
-            case 'EXCLUDE':
+            case IncludeExclude.EXCLUDE:
                 self.sites = {site: data for site, data in self.sites.items() if site not in sites}
             case _:
                 return
 
     def filter_costumers(self, filter_: str | None, costumers: List[str]):
         match filter_:
-            case 'INCLUDE':
+            case IncludeExclude.INCLUDE:
                 self.sites = {
                     site: data for site, data in self.sites.items() if data.get('customer') in costumers
                 }
-            case 'EXCLUDE':
+            case IncludeExclude.EXCLUDE:
                 self.sites = {
                     site: data for site, data in self.sites.items() if data.get('customer') not in costumers
                 }
@@ -485,9 +567,11 @@ class HostCacheRestApi(HostCache):
             pre_fetch: bool,
             api_port: int,
             filter_sites: str | None = None,
-            sites: List[str] = [],
+            sites: List[str] | None = None,
     ):
-        self.backend = '[RESTAPI]'
+        if not sites:
+            sites = []
+        self.backend = f'[{Backends.RESTAPI}]'
         LOGGER.debug(f'{self.backend} init backend')
 
         try:
@@ -513,7 +597,10 @@ class HostCacheRestApi(HostCache):
         self.sites: MutableSequence[str]  = self.query_sites()
         self.filter_sites(filter_=filter_sites, sites=sites)
         LOGGER.info(f'{self.backend} filtered sites : {self.sites}')
-        super().__init__(pre_fetch, self.backend)
+        super().__init__(
+            pre_fetch=pre_fetch,
+            backend=self.backend,
+        )
 
     def get_raw_data(self, url: str, params: Mapping[str, object] | None):
         resp = self.__session.get(
@@ -541,9 +628,9 @@ class HostCacheRestApi(HostCache):
 
     def filter_sites(self, filter_: str | None, sites: List[str]):
         match filter_:
-            case 'INCLUDE':
+            case IncludeExclude.INCLUDE:
                 self.sites = [site for site in self.sites if site in sites]
-            case 'EXCLUDE':
+            case IncludeExclude.EXCLUDE:
                 self.sites = [site for site in self.sites if site not in sites]
             case _:
                 return
@@ -652,4 +739,4 @@ class HostCacheRestApi(HostCache):
                         'long_plugin_output': long_plugin_output.split('\\n')
                     }
 
-        return interface_data
\ No newline at end of file
+        return interface_data
diff --git a/source/bin/nvdct/lib/constants.py b/source/bin/nvdct/lib/constants.py
index 590cba11358c77cdc061e9f0bc76976868dcb784..4dd6c55b583d28827e6cff236ce345a562cff4ef 100755
--- a/source/bin/nvdct/lib/constants.py
+++ b/source/bin/nvdct/lib/constants.py
@@ -7,18 +7,35 @@
 # Date  : 2024-12-11
 # File  : nvdct/lib/constants.py
 
-
-from dataclasses import dataclass
 from enum import Enum, unique, auto
-from logging import getLogger
+from logging import Logger, getLogger
 from os import environ
 from typing import Final
 
 #
-NVDCT_VERSION: Final[str] = '0.9.6-20241222'
+NVDCT_VERSION: Final[str] = '0.9.7-20241230'
+#
+OMD_ROOT: Final[str] = environ["OMD_ROOT"]
 #
+API_PORT: Final[int] = 5001
+CACHE_INTERFACES_DATA: Final[str] = 'interface_data'
+CMK_SITE_CONF: Final[str] = f'{OMD_ROOT}/etc/omd/site.conf'
+LOGGER: Logger = getLogger('root)')
+LOG_FILE: Final[str] = f'{OMD_ROOT}/var/log/nvdct.log'
+SCRIPT: Final[str] = '~/local/bin/nvdct/nvdct.py'
+TIME_FORMAT: Final[str] = '%Y-%m-%dT%H:%M:%S.%m'
+TIME_FORMAT_ARGPARSER: Final[str] = '%%Y-%%m-%%dT%%H:%%M:%%S.%%m'
+USER_DATA_FILE: Final[str] = 'nvdct.toml'
+DATAPATH: Final[str] = f'{OMD_ROOT}/var/check_mk/topology/data'
+
+
+class MyEnum(Enum):
+    def __get__(self, instance, owner):
+        return self.value
+
+
 @unique
-class ExitCodes(Enum):
+class ExitCodes(MyEnum):
     OK = 0
     BAD_OPTION_LIST = auto()
     BAD_TOML_FORMAT = auto()
@@ -26,104 +43,218 @@ class ExitCodes(Enum):
     AUTOMATION_SECRET_NOT_FOUND = auto()
     NO_LAYER_CONFIGURED = auto()
 
-    def __get__(self, instance, owner):
-        return self.value
 
 @unique
-class IPVersion(Enum):
+class IPVersion(MyEnum):
     IPv4 = 4
     IPv6 = 6
 
-    def __get__(self, instance, owner):
-        return self.value
 
-@dataclass(frozen=True)
-class Layer:
-    path: str
-    columns: str
-    label: str
-    host_label: str
+@unique
+class URLs(MyEnum):
+    NVDCT: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/nvdct'
+    # CDP: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_cdp_cache'
+    # LLDP: Final[str] = 'LLDP: https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_lldp_cach'
+    # SNMP_IP_ADDRESS: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_ip_address'
+    # LINUX_SNM_APPRESS: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_lnx_if_ip'
+    # WINDOWS_IP_ADDRESS: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/inventory/inv_win_if_ip'
+    TOPIC_NV: Final[str] = 'https://thl-cmk.hopto.org/gitlab/explore/projects/topics/Network%20Visualization'
+    FORUM_SCHNETZ: Final[str] = 'https://forum.checkmk.com/t/network-visualization/41680'
 
-#
-OMD_ROOT: Final[str] = environ["OMD_ROOT"]
-#
-API_PORT: Final[int] = 5001
-CACHE_INTERFACES_DATA: Final[str] = 'interface_data'
-CMK_SITE_CONF: Final[str] = f'{OMD_ROOT}/etc/omd/site.conf'
-COLUMNS_CDP: Final[str] = 'neighbour_name,local_port,neighbour_port'
-COLUMNS_L3: Final[str] = 'address,device,cidr,network,type'
-COLUMNS_LLDP: Final[str] = 'neighbour_name,local_port,neighbour_port'
-DATAPATH: Final[str] = f'{OMD_ROOT}/var/check_mk/topology/data'
-HOME_URL: Final[str] = 'https://thl-cmk.hopto.org/gitlab/checkmk/vendor-independent/nvdct'
-HOST_LABEL_CDP: Final[str] = "'nvdct/has_cdp_neighbours' 'yes'"
-HOST_LABEL_L3V4_HOSTS: Final[str] = "'nvdct/l3v4_topology' 'host'"
-HOST_LABEL_L3V4_ROUTER: Final[str] = "'nvdct/l3v4_topology' 'router'"
-HOST_LABEL_L3V6_HOSTS: Final[str] = "'nvdct/l3v6_topology' 'host'"
-HOST_LABEL_L3V6_ROUTER: Final[str] = "'nvdct/l3v6_topology' 'router'"
-HOST_LABEL_LLDP: Final[str] = "'nvdct/has_lldp_neighbours' 'yes'"
-LABEL_CDP: Final[str] = 'CDP'
-LABEL_L3v4: Final[str] = 'L3v4'
-LABEL_L3v6: Final[str] = 'L3v6'
-LABEL_LLDP: Final[str] = 'LLDP'
-LABEL_STATIC: Final[str] = 'STATIC'
-LOGGER: Final[str] = getLogger('root)')
-LOG_FILE: Final[str] = f'{OMD_ROOT}/var/log/nvdct.log'
-MIN_CDP_VERSION: Final[str] = '0.7.1-20240320'
-MIN_LINUX_IP_ADDRESSES: Final[str] = '0.0.4-20241210'
-MIN_SNMP_IP_ADDRESSES: Final[str] = '0.0.6-20241210'
-MIN_WINDOWS_IP_ADDRESSES: Final[str] = '0.0.3-20241210'
-MIN_LLDP_VERSION: Final[str] = '0.9.3-20240320'
-PATH_CDP: Final[str] = 'networking,cdp_cache,neighbours'
-PATH_INTERFACES: Final[str] = 'networking,interfaces'
-PATH_L3: Final[str] = 'networking,addresses'
-PATH_LLDP: Final[str] = 'networking,lldp_cache,neighbours'
-SCRIPT: Final[str] = '~/local/bin/nvdct/nvdct.py'
-TIME_FORMAT: Final[str] = '%Y-%m-%dT%H:%M:%S.%m'
-TIME_FORMAT_ARGPARSER: Final[str] = '%%Y-%%m-%%dT%%H:%%M:%%S.%%m'
-USER_DATA_FILE: Final[str] = 'nvdct.toml'
-#
-TOML_CUSTOMERS : Final[str] = 'CUSTOMERS'
-TOML_CUSTOM_LAYERS : Final[str] = 'CUSTOM_LAYERS'
-TOML_EMBLEMS : Final[str] = 'EMBLEMS'
-TOML_L2_DROP_HOSTS: Final[str] = 'L2_DROP_HOSTS'
-TOML_L2_HOST_MAP : Final[str] = 'L2_HOST_MAP'
-TOML_L2_NEIGHBOUR_REPLACE_REGEX : Final[str] = 'L2_NEIGHBOUR_REPLACE_REGEX'
-TOML_L2_SEED_DEVICES: Final[str] = 'L2_SEED_DEVICES'
-TOML_L3V4_IGNORE_WILDCARD : Final[str] = 'L3V4_IGNORE_WILDCARD'
-TOML_L3_IGNORE_HOSTS : Final[str] = 'L3_IGNORE_HOSTS'
-TOML_L3_IGNORE_IP : Final[str] = 'L3_IGNORE_IP'
-TOML_L3_REPLACE : Final[str] = 'L3_REPLACE'
-TOML_L3_SUMMARIZE : Final[str] = 'L3_SUMMARIZE'
-TOML_MAP_SPEED_TO_THICKNESS : Final[str] = 'MAP_SPEED_TO_THICKNESS'
-TOML_PROTECTED_TOPOLOGIES : Final[str] = 'PROTECTED_TOPOLOGIES'
-TOML_SETTINGS : Final[str] = 'SETTINGS'
-TOML_SITES : Final[str] = 'SITES'
-TOML_STATIC_CONNECTIONS : Final[str] = 'STATIC_CONNECTIONS'
-#
-LAYERS = {
-    'CDP': Layer(
-        path=PATH_CDP,
-        columns=COLUMNS_CDP,
-        label=LABEL_CDP,
-        host_label=HOST_LABEL_CDP,
-    ),
-    'LLDP': Layer(
-        path=PATH_LLDP,
-        columns=COLUMNS_LLDP,
-        label=LABEL_LLDP,
-        host_label=HOST_LABEL_LLDP,
-    ),
-    'L3v4': Layer(
-        path=PATH_L3,
-        columns='',
-        label=LABEL_L3v4,
-        host_label=HOST_LABEL_L3V4_ROUTER,
-    ),
-    'L3v6': Layer(
-        path=PATH_L3,
-        columns='',
-        label=LABEL_L3v6,
-        host_label=HOST_LABEL_L3V6_ROUTER,
-    ),
-}
+
+@unique
+class Backends(MyEnum):
+    LIVESTATUS: Final[str] = 'LIVESTATUS'
+    MULTISITE: Final[str] = 'MULTISITE'
+    RESTAPI: Final[str] = 'RESTAPI'
+
+
+@unique
+class Case(MyEnum):
+    LOWER: Final[str] = 'LOWER'
+    UPPER: Final[str] = 'UPPER'
+
+@unique
+class CacheItems(MyEnum):
+    inventory = 'inventory'
+    interfaces = 'interfaces'
+
+@unique
+class CliLong(MyEnum):
+    ADJUST_TOML: Final[str] = '--adjust-toml'
+    API_PORT: Final[str] = '--api-port'
+    BACKEND: Final[str] = '--backend'
+    CASE: Final[str] = '--case'
+    CHECK_USER_DATA_ONLY: Final[str] = '--check-user-data-only'
+    DEFAULT: Final[str] = '--default'
+    DISPLAY_L2_NEIGHBOURS: Final[str] = '--display-l2-neighbours'
+    DONT_COMPARE: Final[str] = '--dont-compare'
+    FILTER_CUSTOMERS: Final[str] = '--filter-customers'
+    FILTER_SITES: Final[str] = '--filter-sites'
+    INCLUDE_L3_HOSTS: Final[str] = '--include-l3-hosts'
+    INCLUDE_L3_LOOPBACK: Final[str] = '--include-l3-loopback'
+    KEEP: Final[str] = '--keep'
+    LAYERS: Final[str] = '--layers'
+    LOG_FILE: Final[str] = '--log-file'
+    LOG_LEVEL: Final[str] = '--log-level'
+    LOG_TO_STDOUT: Final[str] = '--log-to-stdout'
+    MIN_AGE: Final[str] = '--min-age'
+    OUTPUT_DIRECTORY: Final[str] = '--output-directory'
+    PREFIX: Final[str] = '--prefix'
+    PRE_FETCH: Final[str] = '--pre-fetch'
+    QUIET: Final[str] = '--quiet'
+    REMOVE_DOMAIN: Final[str] = '--remove-domain'
+    SEED_DEVICES: Final[str] = '--seed-devices'
+    SKIP_L3_CIDR_0: Final[str] = '--skip-l3-cidr-0'
+    SKIP_L3_CIDR_32_128: Final[str] = '--skip-l3-cidr-32-128'
+    SKIP_L3_IF: Final[str] = '--skip-l3-if'
+    SKIP_L3_IP: Final[str] = '--skip-l3-ip'
+    SKIP_L3_PUBLIC: Final[str] = '--skip-l3-public'
+    TIME_FORMAT: Final[str] = '--time-format'
+    USER_DATA_FILE: Final[str] = '--user-data-file'
+    VERSION: Final[str] = '--version'
+
+
+@unique
+class EmblemNames(MyEnum):
+    HOST_NODE: Final[str] = 'host_node'
+    IP_ADDRESS: Final[str] = 'ip_address'
+    IP_NETWORK: Final[str] = 'ip_network'
+    L3_REPLACE: Final[str] = 'l3_replace'
+    L3_SUMMARIZE: Final[str] = 'l3_summarize'
+    SERVICE_NODE: Final[str] = 'service_node'
+
+
+@unique
+class EmblemValues(MyEnum):
+    ICON_AGGREGATION: Final[str] = 'icon_aggr'
+    ICON_ALERT_UNREACHABLE: Final[str] = 'icon_alert_unreach'
+    ICON_PLUGINS_CLOUD: Final[str] = 'icon_plugins_cloud'
+    IP_ADDRESS_80: Final[str] = 'ip-address_80'
+    IP_NETWORK_80: Final[str] = 'ip-network_80'
+
+
+@unique
+class HostLabels(MyEnum):
+    CDP: Final[str] = "'nvdct/has_cdp_neighbours' 'yes'"
+    L3V4_HOSTS: Final[str] = "'nvdct/l3v4_topology' 'host'"
+    L3V4_ROUTER: Final[str] = "'nvdct/l3v4_topology' 'router'"
+    L3V6_HOSTS: Final[str] = "'nvdct/l3v6_topology' 'host'"
+    L3V6_ROUTER: Final[str] = "'nvdct/l3v6_topology' 'router'"
+    LLDP: Final[str] = "'nvdct/has_lldp_neighbours' 'yes'"
+
+
+@unique
+class IncludeExclude(MyEnum):
+    INCLUDE: Final[str] = 'INCLUDE'
+    EXCLUDE: Final[str] = 'EXCLUDE'
+
+
+@unique
+class L2InvColumns(MyEnum):
+    NEIGHBOUR: Final[str] = 'neighbour_name'
+    LOCALPORT: Final[str] = 'local_port'
+    NEIGHBOURPORT: Final[str] = 'neighbour_port'
+
+
+@unique
+class L3InvColumns(MyEnum):
+    ADDRESS: Final[str] = 'address'
+    DEVICE: Final[str] = 'device'
+    CIDR: Final[str] = 'cidr'
+
+
+@unique
+class InvPaths(MyEnum):
+    CDP: Final[str] = 'networking,cdp_cache,neighbours'
+    INTERFACES: Final[str] = 'networking,interfaces'
+    L3: Final[str] = 'networking,addresses'
+    LLDP: Final[str] = 'networking,lldp_cache,neighbours'
+    LLDP_ATTRIBUTE: Final[str] = 'networking,lldp_cache'
+
+
+@unique
+class Layers(MyEnum):
+    CDP: Final[str] = 'CDP'
+    LLDP: Final[str] = 'LLDP'
+    L3V4: Final[str] = 'L3v4'
+    L3V6: Final[str] = 'L3v6'
+    STATIC: Final[str] = 'STATIC'
+
+
+@unique
+class LogLevels(MyEnum):
+    CRITICAL: Final[str] = 'CRITICAL'
+    FATAL: Final[str] = 'FATAL'
+    ERROR: Final[str] = 'ERROR'
+    WARNING: Final[str] = 'WARNING'
+    INFO: Final[str] = 'INFO'
+    DEBUG: Final[str] = 'DEBUG'
+    OFF: Final[str] = 'OFF'
+
+
+class MinVersions(MyEnum):
+    CDP: Final[str] = '0.7.1-20240320'
+    LLDP: Final[str] = '0.9.3-20240320'
+    LINUX_IP_ADDRESSES: Final[str] = '0.0.4-20241210'
+    SNMP_IP_ADDRESSES: Final[str] = '0.0.6-20241210'
+    WINDOWS_IP_ADDRESSES: Final[str] = '0.0.3-20241210'
+
+
+@unique
+class TomlSections(MyEnum):
+    CUSTOMERS: Final[str] = 'CUSTOMERS'
+    EMBLEMS: Final[str] = 'EMBLEMS'
+    L2_DROP_NEIGHBOURS: Final[str] = 'L2_DROP_NEIGHBOURS'
+    L2_HOST_MAP: Final[str] = 'L2_HOST_MAP'
+    L2_NEIGHBOUR_REPLACE_REGEX: Final[str] = 'L2_NEIGHBOUR_REPLACE_REGEX'
+    L2_SEED_DEVICES: Final[str] = 'L2_SEED_DEVICES'
+    L3V4_IGNORE_WILDCARD: Final[str] = 'L3V4_IGNORE_WILDCARD'
+    L3_IGNORE_HOSTS: Final[str] = 'L3_IGNORE_HOSTS'
+    L3_IGNORE_IP: Final[str] = 'L3_IGNORE_IP'
+    L3_REPLACE: Final[str] = 'L3_REPLACE'
+    L3_SUMMARIZE: Final[str] = 'L3_SUMMARIZE'
+    MAP_SPEED_TO_THICKNESS: Final[str] = 'MAP_SPEED_TO_THICKNESS'
+    PROTECTED_TOPOLOGIES: Final[str] = 'PROTECTED_TOPOLOGIES'
+    SETTINGS: Final[str] = 'SETTINGS'
+    SITES: Final[str] = 'SITES'
+    STATIC_CONNECTIONS: Final[str] = 'STATIC_CONNECTIONS'
+
+
+def cli_long_to_toml(cli_param: str) -> str:
+    return cli_param.strip('-').replace('-', '_')
+
+
+@unique
+class TomlSettings(MyEnum):
+    ADJUST_TOML: Final[str] = cli_long_to_toml(CliLong.ADJUST_TOML)
+    API_PORT: Final[str] = cli_long_to_toml(CliLong.API_PORT)
+    BACKEND: Final[str] = cli_long_to_toml(CliLong.BACKEND)
+    CASE: Final[str] = cli_long_to_toml(CliLong.CASE)
+    CHECK_USER_DATA_ONLY: Final[str] = cli_long_to_toml(CliLong.CHECK_USER_DATA_ONLY)
+    DEFAULT: Final[str] = cli_long_to_toml(CliLong.DEFAULT)
+    DISPLAY_L2_NEIGHBOURS: Final[str] = cli_long_to_toml(CliLong.DISPLAY_L2_NEIGHBOURS)
+    DONT_COMPARE: Final[str] = cli_long_to_toml(CliLong.DONT_COMPARE)
+    FILTER_CUSTOMERS: Final[str] = cli_long_to_toml(CliLong.FILTER_CUSTOMERS)
+    FILTER_SITES: Final[str] = cli_long_to_toml(CliLong.FILTER_SITES)
+    INCLUDE_L3_HOSTS: Final[str] = cli_long_to_toml(CliLong.INCLUDE_L3_HOSTS)
+    INCLUDE_L3_LOOPBACK: Final[str] = cli_long_to_toml(CliLong.INCLUDE_L3_LOOPBACK)
+    KEEP: Final[str] = cli_long_to_toml(CliLong.KEEP)
+    LAYERS: Final[str] = cli_long_to_toml(CliLong.LAYERS)
+    LOG_FILE: Final[str] = cli_long_to_toml(CliLong.LOG_FILE)
+    LOG_LEVEL: Final[str] = cli_long_to_toml(CliLong.LOG_LEVEL)
+    LOG_TO_STDOUT: Final[str] = cli_long_to_toml(CliLong.LOG_TO_STDOUT)
+    MIN_AGE: Final[str] = cli_long_to_toml(CliLong.MIN_AGE)
+    OUTPUT_DIRECTORY: Final[str] = cli_long_to_toml(CliLong.OUTPUT_DIRECTORY)
+    PREFIX: Final[str] = cli_long_to_toml(CliLong.PREFIX)
+    PRE_FETCH: Final[str] = cli_long_to_toml(CliLong.PRE_FETCH)
+    QUIET: Final[str] = cli_long_to_toml(CliLong.QUIET)
+    REMOVE_DOMAIN: Final[str] = cli_long_to_toml(CliLong.REMOVE_DOMAIN)
+    SKIP_L3_CIDR_0: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_CIDR_0)
+    SKIP_L3_CIDR_32_128: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_CIDR_32_128)
+    SKIP_L3_IF: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_IF)
+    SKIP_L3_IP: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_IP)
+    SKIP_L3_PUBLIC: Final[str] = cli_long_to_toml(CliLong.SKIP_L3_PUBLIC)
+    TIME_FORMAT: Final[str] = cli_long_to_toml(CliLong.TIME_FORMAT)
+    USER_DATA_FILE: Final[str] = cli_long_to_toml(CliLong.USER_DATA_FILE)
 
diff --git a/source/bin/nvdct/lib/settings.py b/source/bin/nvdct/lib/settings.py
index e625634eab31494d676ba7b3392e0ccb2380bb33..5b82bff0a8ce66f4ad627f4d172239e459ed5165 100755
--- a/source/bin/nvdct/lib/settings.py
+++ b/source/bin/nvdct/lib/settings.py
@@ -12,39 +12,29 @@
 
 from collections.abc import Mapping
 from ipaddress import AddressValueError, NetmaskValueError, ip_address, ip_network
-from logging import CRITICAL, FATAL, ERROR, WARNING, INFO, DEBUG
+from logging import CRITICAL, DEBUG, ERROR, FATAL, INFO, WARNING
+from pathlib import Path
 from sys import exit as sys_exit
 from time import strftime
 from typing import Dict, List, NamedTuple, Tuple
-from pathlib import Path
 
 from lib.constants import (
     API_PORT,
+    Backends,
+    Case,
+    EmblemValues,
+    EmblemNames,
     ExitCodes,
+    IncludeExclude,
     LOGGER,
     LOG_FILE,
-    Layer,
+    LogLevels,
     OMD_ROOT,
     TIME_FORMAT,
+    TomlSections,
+    TomlSettings,
     USER_DATA_FILE,
 
-    TOML_CUSTOMERS,
-    TOML_CUSTOM_LAYERS,
-    TOML_EMBLEMS,
-    TOML_L2_DROP_HOSTS,
-    TOML_L2_HOST_MAP,
-    TOML_L2_NEIGHBOUR_REPLACE_REGEX,
-    TOML_L2_SEED_DEVICES,
-    TOML_L3V4_IGNORE_WILDCARD,
-    TOML_L3_IGNORE_HOSTS,
-    TOML_L3_IGNORE_IP,
-    TOML_L3_REPLACE,
-    TOML_L3_SUMMARIZE,
-    TOML_MAP_SPEED_TO_THICKNESS,
-    TOML_PROTECTED_TOPOLOGIES,
-    TOML_SETTINGS,
-    TOML_SITES,
-    TOML_STATIC_CONNECTIONS,
 )
 from lib.utils import (
     get_data_from_toml,
@@ -72,18 +62,18 @@ class Thickness(NamedTuple):
 
 
 class StaticConnection(NamedTuple):
+    left_host: str
+    left_service: str
     right_host: str
     right_service: str
-    left_service: str
-    left_host: str
 
 
 class Wildcard(NamedTuple):
+    bit_pattern: int
     int_ip_address: int
     int_wildcard: int
     ip_address: str
     wildcard: str
-    bit_pattern: int
 
 
 class Settings:
@@ -93,30 +83,36 @@ class Settings:
     ):
         # cli defaults
         self.__settings = {
-            # 'api_port': 80,
-            'backend': 'MULTISITE',
-            'case': None,
-            'check_user_data_only': False,
-            'default': False,
-            'dont_compare': False,
-            'filter_customers': None,
-            'filter_sites': None,
-            'include_l3_hosts': False,
-            'keep': False,
-            'remove_domain': False,
-            'layers': [],
-            'log_file': LOG_FILE,
-            'log_level': 'WARNING',
-            'log_to_stdout': False,
-            'min_age': 0,
-            'output_directory': None,
-            'prefix': None,
-            'quiet': False,
-            'pre_fetch': False,
-            'skip_l3_if': False,
-            'skip_l3_ip': False,
-            'time_format': TIME_FORMAT,
-            'user_data_file': f'{OMD_ROOT}/local/bin/nvdct/conf/{USER_DATA_FILE}',
+            TomlSettings.ADJUST_TOML: False,
+            TomlSettings.API_PORT: None,
+            TomlSettings.BACKEND: Backends.MULTISITE,
+            TomlSettings.CASE: None,
+            TomlSettings.CHECK_USER_DATA_ONLY: False,
+            TomlSettings.DEFAULT: False,
+            TomlSettings.DISPLAY_L2_NEIGHBOURS: False,
+            TomlSettings.DONT_COMPARE: False,
+            TomlSettings.FILTER_CUSTOMERS: None,
+            TomlSettings.FILTER_SITES: None,
+            TomlSettings.INCLUDE_L3_HOSTS: False,
+            TomlSettings.INCLUDE_L3_LOOPBACK: False,
+            TomlSettings.KEEP: False,
+            TomlSettings.LAYERS: [],
+            TomlSettings.LOG_FILE: LOG_FILE,
+            TomlSettings.LOG_LEVEL: LogLevels.WARNING,
+            TomlSettings.LOG_TO_STDOUT: False,
+            TomlSettings.MIN_AGE: 0,
+            TomlSettings.OUTPUT_DIRECTORY: None,
+            TomlSettings.PREFIX: None,
+            TomlSettings.PRE_FETCH: False,
+            TomlSettings.QUIET: False,
+            TomlSettings.REMOVE_DOMAIN: False,
+            TomlSettings.SKIP_L3_CIDR_0: False,
+            TomlSettings.SKIP_L3_CIDR_32_128: False,
+            TomlSettings.SKIP_L3_IF: False,
+            TomlSettings.SKIP_L3_IP: False,
+            TomlSettings.SKIP_L3_PUBLIC: False,
+            TomlSettings.TIME_FORMAT: TIME_FORMAT,
+            TomlSettings.USER_DATA_FILE: f'{OMD_ROOT}/local/bin/nvdct/conf/{USER_DATA_FILE}',
         }
         # args in the form {'s, __seed_devices': 'CORE01', 'p, __path_in_inventory': None, ... }}
         # we will remove 's, __'
@@ -125,16 +121,16 @@ class Settings:
         )
 
         self.__user_data = get_data_from_toml(
-            file=self.__args.get('user_data_file', self.user_data_file)
+            file=self.__args.get(TomlSettings.USER_DATA_FILE, self.user_data_file)
         )
 
-        if self.__args.get('check_user_data_only'):
+        if self.__args.get(TomlSettings.CHECK_USER_DATA_ONLY):
             LOGGER.info(msg=f'Could read/parse the user data from {self.user_data_file}')
             print(f'Could read/parse the user data from {self.user_data_file}')
             sys_exit(ExitCodes.OK)
 
         # defaults -> overridden by toml -> overridden by cli
-        self.__settings.update(self.__user_data.get(TOML_SETTINGS, {}))
+        self.__settings.update(self.__user_data.get(TomlSections.SETTINGS, {}))
         self.__settings.update(self.__args)
 
         if self.layers:
@@ -149,18 +145,17 @@ class Settings:
         self.__api_port: int | None = None
 
         # init user data with defaults
-        self.__custom_layers: List[StaticConnection] | None = None
         self.__customers: List[str] | None = None
         self.__emblems: Emblems | None = None
-        self.__l2_drop_host: List[str] | None = None
+        self.__l2_drop_neighbours: List[str] | None = None
         self.__l2_host_map: Dict[str, str] | None = None
         self.__l2_neighbour_replace_regex: List[Tuple[str, str]] | None = None
         self.__l2_seed_devices: List[str] | None = None
         self.__l3_ignore_hosts: List[str] | None = None
         self.__l3_ignore_ip: List[ip_network] | None = None
-        self.__l3v4_ignore_wildcard: List[Wildcard] | None = None
         self.__l3_replace: Dict[str, str] | None = None
         self.__l3_summarize: List[ip_network] | None = None
+        self.__l3v4_ignore_wildcard: List[Wildcard] | None = None
         self.__map_speed_to_thickness: List[Thickness] | None = None
         self.__protected_topologies: List[str] | None = None
         self.__sites: List[str] | None = None
@@ -172,92 +167,108 @@ class Settings:
     @property  # --api-port
     def api_port(self) -> int:
         if self.__api_port is None:
-            if self.__settings.get('api_port'):
-                self.__api_port = int(self.__settings.get('api_port'))
+            if self.__settings.get(TomlSettings.API_PORT):
+                self.__api_port = int(self.__settings.get(TomlSettings.API_PORT))
             else:
                 self.__api_port = get_local_cmk_api_port()
             if self.__api_port is None:
-                self.__api_port = API_PORT
+               self.__api_port = API_PORT
 
         return self.__api_port
 
     @property  # -b --backend
     def backend(self) -> str:
-        if str(self.__settings['backend']) in ['LIVESTATUS', 'MULTISITE', 'RESTAPI']:
-            return str(self.__settings['backend'])
+        if str(self.__settings[TomlSettings.BACKEND]) in [
+            Backends.LIVESTATUS,
+            Backends.MULTISITE,
+            Backends.RESTAPI
+        ]:
+            return str(self.__settings[TomlSettings.BACKEND])
         else:  # fallback to defaukt -> exit ??
             LOGGER.warning(
-                f'Unknown backend: {self.__settings["backend"]}. Accepted backends are: '
-                'LIVESTATUS, MULTISITE, RESTAPI. Fall back zo MULTISITE.'
+                f'Unknown backend: {self.__settings[TomlSettings.BACKEND]}. Accepted backends are: '
+                f'{Backends.LIVESTATUS}, {Backends.MULTISITE}, {Backends.RESTAPI}. Fall back to {Backends.MULTISITE}.'
             )
-            return 'MULTISITE'
+            return Backends.MULTISITE
 
     @property  # --case
     def case(self) -> str | None:
-        if self.__settings['case'] in ['LOWER', 'UPPER']:
-            return self.__settings['case']
-        elif self.__settings['case'] is not None:
+        if self.__settings[TomlSettings.CASE] in [Case.LOWER, Case.UPPER]:
+            return self.__settings[TomlSettings.CASE]
+        elif self.__settings[TomlSettings.CASE] is not None:
             LOGGER.warning(
-                    f'Unknon case setting {self.__settings["case"]}. '
-                    'Accepted are LOWER|UPPER. Fallback to no change.'
+                    f'Unknown case setting {self.__settings[TomlSettings.CASE]}. '
+                    f'Accepted are {Case.LOWER}|{Case.UPPER}. Fallback to no change.'
                 )
         return None
 
     @property  # --check-user-data-only
     def check_user_data_only(self) -> bool:
-        return bool(self.__settings['check_user_data_only'])
+        return bool(self.__settings[TomlSettings.CHECK_USER_DATA_ONLY])
 
     @property  # -d --default
     def default(self) -> bool:
-        return bool(self.__settings['default'])
+        return bool(self.__settings[TomlSettings.DEFAULT])
+
+    @property  # --display-l2-neighbours
+    def display_l2_neighbours(self) -> bool:
+        return bool(self.__settings[TomlSettings.DISPLAY_L2_NEIGHBOURS])
 
     @property  # --dont-compare
     def dont_compare(self) -> bool:
-        return bool(self.__settings['dont_compare'])
+        return bool(self.__settings[TomlSettings.DONT_COMPARE])
 
     @property  # --filter-customers
     def filter_customers(self) -> str | None:
-        if self.__settings['filter_customers'] in ['INCLUDE', 'EXCLUDE']:
-            return self.__settings['filter_customers']
-        elif self.__settings['filter_customers'] is not None:
+        if self.__settings[TomlSettings.FILTER_CUSTOMERS] in [IncludeExclude.INCLUDE, IncludeExclude.EXCLUDE]:
+            return self.__settings[TomlSettings.FILTER_CUSTOMERS]
+        elif self.__settings[TomlSettings.FILTER_CUSTOMERS] is not None:
             LOGGER.error(
-                f'Wrong setting for "filter_customers": '
-                f'{self.__settings["filter_customers"]}, supported settings INCLUDE|EXCLUDE.'
+                f'Wrong setting for "{TomlSettings.FILTER_CUSTOMERS}": '
+                f'{self.__settings[TomlSettings.FILTER_CUSTOMERS]}, supported settings {IncludeExclude.INCLUDE}|{IncludeExclude.EXCLUDE}.'
             )
         return None
 
     @property  # --filter-sites
     def filter_sites(self) -> str | None:
-        if self.__settings['filter_sites'] in ['INCLUDE', 'EXCLUDE']:
-            return self.__settings['filter_sites']
-        elif self.__settings['filter_sites'] is not None:
+        if self.__settings[TomlSettings.FILTER_SITES] in [IncludeExclude.INCLUDE, IncludeExclude.EXCLUDE]:
+            return self.__settings[TomlSettings.FILTER_SITES]
+        elif self.__settings[TomlSettings.FILTER_SITES] is not None:
             LOGGER.error(
-                f'Wrong setting for "filter_sites": '
-                f'{self.__settings["filter_sites"]}, supported settings INCLUDE|EXCLUDE.'
+                f'Wrong setting for "{TomlSettings.FILTER_SITES}": '
+                f'{self.__settings[TomlSettings.FILTER_SITES]}, supported settings {IncludeExclude.INCLUDE}|{IncludeExclude.EXCLUDE}.'
             )
         return None
 
+    @property  # --include-l3-hosts
+    def fix_toml(self) -> bool:
+        return bool(self.__settings[TomlSettings.ADJUST_TOML])
+
     @property  # --include-l3-hosts
     def include_l3_hosts(self) -> bool:
-        return bool(self.__settings['include_l3_hosts'])
+        return bool(self.__settings[TomlSettings.INCLUDE_L3_HOSTS])
+
+    @property  # --skip-l3-ip
+    def include_l3_loopback(self) -> bool:
+        return bool(self.__settings[TomlSettings.INCLUDE_L3_LOOPBACK])
 
     @property  # --keep
     def keep(self) -> int | None:
-        if isinstance(self.__settings['keep'], int):
-            return max(self.__settings['keep'], 0)
+        if isinstance(self.__settings[TomlSettings.KEEP], int):
+            return max(self.__settings[TomlSettings.KEEP], 0)
         return None
 
     @property  # --keep-domain
     def remove_domain(self) -> bool:
-        return bool(self.__settings['remove_domain'])
+        return bool(self.__settings[TomlSettings.REMOVE_DOMAIN])
 
     @property  # --layers
     def layers(self) -> List[str]:
-        return self.__settings['layers']
+        return self.__settings[TomlSettings.LAYERS]
 
     @property  # --log-file
     def log_file(self) -> str:
-        raw_log_file = str(Path(str(self.__settings['log_file'])).expanduser())
+        raw_log_file = str(Path(str(self.__settings[TomlSettings.LOG_FILE])).expanduser())
         if is_valid_log_file(raw_log_file):
             return raw_log_file
         else:
@@ -267,67 +278,79 @@ class Settings:
     @property  # --log-level
     def loglevel(self) -> int:
         log_levels = {
-            'DEBUG': DEBUG,
-            'INFO': INFO,
-            'WARNING': WARNING,
-            'ERROR': ERROR,
-            'FATAL': FATAL,
-            'CRITICAL': CRITICAL,
-            'OFF': -1,
+            LogLevels.DEBUG: DEBUG,
+            LogLevels.INFO: INFO,
+            LogLevels.WARNING: WARNING,
+            LogLevels.ERROR: ERROR,
+            LogLevels.FATAL: FATAL,
+            LogLevels.CRITICAL: CRITICAL,
+            LogLevels.OFF: -1,
         }
-        return log_levels.get(self.__settings['log_level'], WARNING)
+        return log_levels.get(self.__settings[TomlSettings.LOG_LEVEL], WARNING)
 
     @property  # --log-to-stdout
     def log_to_stdtout(self) -> bool:
-        return bool(self.__settings['log_to_stdout'])
+        return bool(self.__settings[TomlSettings.LOG_TO_STDOUT])
 
     @property  # --min-age
     def min_age(self) -> int:
-        if isinstance(self.__settings['min_age'], int):
-            return max(self.__settings['min_age'], 0)
+        if isinstance(self.__settings[TomlSettings.MIN_AGE], int):
+            return max(self.__settings[TomlSettings.MIN_AGE], 0)
         else:
             return 0
 
     @property  # --output-directory
     def output_directory(self) -> str:
         # init output directory with current time if not set
-        if not self.__settings['output_directory']:
-            self.__settings['output_directory'] = f'{strftime(self.__settings["time_format"])}'
-        if is_valid_output_directory(str(self.__settings['output_directory'])):
-            return str(self.__settings['output_directory'])
+        if not self.__settings[TomlSettings.OUTPUT_DIRECTORY]:
+            self.__settings[TomlSettings.OUTPUT_DIRECTORY] = f'{strftime(self.__settings[TomlSettings.TIME_FORMAT])}'
+        if is_valid_output_directory(str(self.__settings[TomlSettings.OUTPUT_DIRECTORY])):
+            return str(self.__settings[TomlSettings.OUTPUT_DIRECTORY])
         else:
             LOGGER.error('Falling back to "nvdct"')
             return 'nvdct'
 
     @property  # --prefix
     def prefix(self) -> str | None:
-        if self.__settings['prefix'] is not None:
-            return str(self.__settings['prefix'])
+        if self.__settings[TomlSettings.PREFIX] is not None:
+            return str(self.__settings[TomlSettings.PREFIX])
         return None
 
     @property  # --pre-fill-cache
     def pre_fetch(self) -> bool:
-        return bool(self.__settings['pre_fetch'])
+        return bool(self.__settings[TomlSettings.PRE_FETCH])
 
     @property  # --quiet
     def quiet(self) -> bool:
-        return bool(self.__settings['quiet'])
+        return bool(self.__settings[TomlSettings.QUIET])
+
+    @property  # --skip-l3-cidr-0
+    def skip_l3_cidr_0(self) -> bool:
+        return bool(self.__settings[TomlSettings.SKIP_L3_CIDR_0])
+
+    @property  # --skip-l3-cidr-32-128
+    def skip_l3_cidr_32_128(self) -> bool:
+        return bool(self.__settings[TomlSettings.SKIP_L3_CIDR_32_128])
 
     @property  # --skip-l3-if
     def skip_l3_if(self) -> bool:
-        return bool(self.__settings['skip_l3_if'])
+        return bool(self.__settings[TomlSettings.SKIP_L3_IF])
 
     @property  # --skip-l3-ip
     def skip_l3_ip(self) -> bool:
-        return bool(self.__settings['skip_l3_ip'])
+        return bool(self.__settings[TomlSettings.SKIP_L3_IP])
+
+    @property  # --skip-l3-public
+    def skip_l3_public(self) -> bool:
+        return bool(self.__settings[TomlSettings.SKIP_L3_PUBLIC])
 
     @property  # --time-format
     def time_format(self) -> str:
-        return str(self.__settings['time_format'])
+        return str(self.__settings[TomlSettings.TIME_FORMAT])
 
     @property  # --user-data-file
     def user_data_file(self) -> str:
-        return str(self.__settings['user_data_file'])
+        return str(self.__settings[TomlSettings.USER_DATA_FILE])
 
     #
     #  user data setting
@@ -336,59 +359,36 @@ class Settings:
     def customers(self) -> List[str]:
         if self.__customers is None:
             self.__customers = [
-                str(customer) for customer in set(self.__user_data.get(TOML_CUSTOMERS, []))
+                str(customer) for customer in set(self.__user_data.get(TomlSections.CUSTOMERS, []))
                 if is_valid_customer_name(customer)]
             LOGGER.info(f'Found {len(self.__customers)} to filter on')
         return self.__customers
 
-    @property
-    def custom_layers(self) -> List[Layer]:
-        if self.__custom_layers is None:
-            self.__custom_layers = []
-            for _layer in self.__user_data.get(TOML_CUSTOM_LAYERS, []):
-                try:
-                    self.__custom_layers.append(Layer(
-                        path=_layer['path'],
-                        columns=_layer['columns'],
-                        label=_layer['label'],
-                        host_label=_layer['host_label']
-                    ))
-                except KeyError:
-                    LOGGER.error(
-                        f'Invalid entry in {TOML_CUSTOM_LAYERS} -> {_layer} -> ignored'
-                    )
-                    continue
-            LOGGER.critical(
-                f'Valid entries in {TOML_CUSTOM_LAYERS} found: {len(self.__custom_layers)}/'
-                f'{len(self.__user_data.get(TOML_CUSTOM_LAYERS, []))}'
-            )
-        return self.__custom_layers
-
     @property
     def emblems(self) -> Emblems:
         if self.__emblems is None:
-            raw_emblems = self.__user_data.get(TOML_EMBLEMS, {})
+            raw_emblems = self.__user_data.get(TomlSections.EMBLEMS, {})
             self.__emblems = Emblems(
-                host_node=str(raw_emblems.get('host_node', 'icon_missing')),
-                ip_address=str(raw_emblems.get('ip_address', 'ip-address_80')),
-                ip_network=str(raw_emblems.get('ip_network', 'ip-network_80')),
-                l3_replace=str(raw_emblems.get('l3_replace', 'icon_plugins_cloud')),
-                l3_summarize=str(raw_emblems.get('l3_summarize', 'icon_aggr')),
-                service_node=str(raw_emblems.get('service_node', 'icon_missing')),
+                host_node=str(raw_emblems.get(EmblemNames.HOST_NODE, EmblemValues.ICON_ALERT_UNREACHABLE)),
+                ip_address=str(raw_emblems.get(EmblemNames.IP_ADDRESS, EmblemValues.IP_ADDRESS_80)),
+                ip_network=str(raw_emblems.get(EmblemNames.IP_NETWORK, EmblemValues.IP_NETWORK_80)),
+                l3_replace=str(raw_emblems.get(EmblemNames.L3_REPLACE, EmblemValues.ICON_PLUGINS_CLOUD)),
+                l3_summarize=str(raw_emblems.get(EmblemNames.L3_SUMMARIZE, EmblemValues.ICON_AGGREGATION)),
+                service_node=str(raw_emblems.get(EmblemNames.SERVICE_NODE, EmblemValues.ICON_ALERT_UNREACHABLE)),
             )
         return self.__emblems
 
     @property
-    def l2_drop_hosts(self) -> List[str]:
-        if self.__l2_drop_host is None:
-            self.__l2_drop_host = [str(host) for host in set(self.__user_data.get(TOML_L2_DROP_HOSTS, []))]
-        return self.__l2_drop_host
+    def l2_drop_neighbours(self) -> List[str]:
+        if self.__l2_drop_neighbours is None:
+            self.__l2_drop_neighbours = [str(host) for host in set(self.__user_data.get(TomlSections.L2_DROP_NEIGHBOURS, []))]
+        return self.__l2_drop_neighbours
 
     @property
     def l2_seed_devices(self) -> List[str]:
         if self.__l2_seed_devices is None:
             self.__l2_seed_devices = list(set(str(host) for host in (
-                self.__user_data.get(TOML_L2_SEED_DEVICES, [])) if is_valid_hostname(host)))
+                self.__user_data.get(TomlSections.L2_SEED_DEVICES, [])) if is_valid_hostname(host)))
         return self.__l2_seed_devices
 
     @property
@@ -396,7 +396,7 @@ class Settings:
         if self.__l2_host_map is None:
             self.__l2_host_map = {
                 str(host): str(replace_host) for host, replace_host in self.__user_data.get(
-                    TOML_L2_HOST_MAP, {}
+                    TomlSections.L2_HOST_MAP, {}
                 ).items() if is_valid_hostname(host)
             }
         return self.__l2_host_map
@@ -407,7 +407,7 @@ class Settings:
             self.__l2_neighbour_replace_regex = [
                 (
                     str(regex), str(replace)
-                ) for regex, replace in self.__user_data.get(TOML_L2_NEIGHBOUR_REPLACE_REGEX, {}).items()
+                ) for regex, replace in self.__user_data.get(TomlSections.L2_NEIGHBOUR_REPLACE_REGEX, {}).items()
             ]
         return self.__l2_neighbour_replace_regex
 
@@ -415,7 +415,7 @@ class Settings:
     def l3_ignore_hosts(self) -> List[str]:
         if self.__l3_ignore_hosts is None:
             self.__l3_ignore_hosts = [str(host) for host in set(self.__user_data.get(
-                TOML_L3_IGNORE_HOSTS, []
+                TomlSections.L3_IGNORE_HOSTS, []
             )) if is_valid_hostname(host)]
         return self.__l3_ignore_hosts
 
@@ -423,31 +423,30 @@ class Settings:
     def l3_ignore_ips(self) -> List[ip_network]:
         if self.__l3_ignore_ip is None:
             self.__l3_ignore_ip = []
-            for raw_ip_network in self.__user_data.get(TOML_L3_IGNORE_IP, []):
+            for raw_ip_network in self.__user_data.get(TomlSections.L3_IGNORE_IP, []):
                 try:
                     self.__l3_ignore_ip.append(ip_network(raw_ip_network, strict=False))
                 except (AddressValueError, NetmaskValueError):
                     LOGGER.error(
-                        f'Invalid entry in {TOML_L3_IGNORE_IP} found: {raw_ip_network} -> ignored'
+                        f'Invalid entry in {TomlSections.L3_IGNORE_IP} found: {raw_ip_network} -> ignored'
                     )
                     continue
             LOGGER.info(
-                f'Valid entries in {TOML_L3_IGNORE_IP} found: {len(self.__l3_ignore_ip)}/'
-                f'{len(self.__user_data.get(TOML_L3_IGNORE_IP, []))}'
+                f'Valid entries in {TomlSections.L3_IGNORE_IP} found: {len(self.__l3_ignore_ip)}/'
+                f'{len(self.__user_data.get(TomlSections.L3_IGNORE_IP, []))}'
             )
-
         return self.__l3_ignore_ip
 
     @property
     def l3v4_ignore_wildcard(self) -> List[Wildcard]:
         if self.__l3v4_ignore_wildcard is None:
             self.__l3v4_ignore_wildcard = []
-            for entry in self.__user_data.get(TOML_L3V4_IGNORE_WILDCARD, []):
+            for entry in self.__user_data.get(TomlSections.L3V4_IGNORE_WILDCARD, []):
                 try:
                     raw_ip_address, wildcard = entry
                 except ValueError:
                     LOGGER.error(
-                        f'Invalid entry in {TOML_L3V4_IGNORE_WILDCARD} -> {entry} -> ignored'
+                        f'Invalid entry in {TomlSections.L3V4_IGNORE_WILDCARD} -> {entry} -> ignored'
                     )
                     continue
                 try:
@@ -466,12 +465,12 @@ class Settings:
                     ))
                 except (AddressValueError, NetmaskValueError):
                     LOGGER.error(
-                        f'Invalid entry in {TOML_L3V4_IGNORE_WILDCARD} -> {entry} -> ignored'
+                        f'Invalid entry in {TomlSections.L3V4_IGNORE_WILDCARD} -> {entry} -> ignored'
                     )
                     continue
             LOGGER.info(
-                f'Valid entries in {TOML_L3V4_IGNORE_WILDCARD} found: {len(self.__l3v4_ignore_wildcard)}/'
-                f'{len(self.__user_data.get(TOML_L3V4_IGNORE_WILDCARD, []))}'
+                f'Valid entries in {TomlSections.L3V4_IGNORE_WILDCARD} found: {len(self.__l3v4_ignore_wildcard)}/'
+                f'{len(self.__user_data.get(TomlSections.L3V4_IGNORE_WILDCARD, []))}'
             )
         return self.__l3v4_ignore_wildcard
 
@@ -479,12 +478,12 @@ class Settings:
     def l3_replace(self) -> Dict[str, str]:
         if self.__l3_replace is None:
             self.__l3_replace = {}
-            for raw_ip_network, node in self.__user_data.get(TOML_L3_REPLACE, {}).items():
+            for raw_ip_network, node in self.__user_data.get(TomlSections.L3_REPLACE, {}).items():
                 try:
                     _ip_network = ip_network(raw_ip_network)  # noqa: F841
                 except (AddressValueError, NetmaskValueError):
                     LOGGER.error(
-                        f'Invalid entry in {TOML_L3_REPLACE} found: {raw_ip_network} -> line ignored'
+                        f'Invalid entry in {TomlSections.L3_REPLACE} found: {raw_ip_network} -> line ignored'
                     )
                     continue
                 if not is_valid_hostname(node):
@@ -492,8 +491,8 @@ class Settings:
                     continue
                 self.__l3_replace[raw_ip_network] = str(node)
             LOGGER.info(
-                f'Valid entries in {TOML_L3_REPLACE} found: {len(self.__l3_replace)}/'
-                f'{len(self.__user_data.get(TOML_L3_REPLACE, {}))}'
+                f'Valid entries in {TomlSections.L3_REPLACE} found: {len(self.__l3_replace)}/'
+                f'{len(self.__user_data.get(TomlSections.L3_REPLACE, {}))}'
             )
         return self.__l3_replace
 
@@ -501,17 +500,17 @@ class Settings:
     def l3_summarize(self) -> List[ip_network]:
         if self.__l3_summarize is None:
             self.__l3_summarize = []
-            for raw_ip_network in self.__user_data.get(TOML_L3_SUMMARIZE, []):
+            for raw_ip_network in self.__user_data.get(TomlSections.L3_SUMMARIZE, []):
                 try:
                     self.__l3_summarize.append(ip_network(raw_ip_network, strict=False))
                 except (AddressValueError, NetmaskValueError):
                     LOGGER.error(
-                        f'Invalid entry in {TOML_L3_SUMMARIZE} -> {raw_ip_network} -> ignored'
+                        f'Invalid entry in {TomlSections.L3_SUMMARIZE} -> {raw_ip_network} -> ignored'
                     )
                     continue
             LOGGER.info(
-                f'Valid entries in {TOML_L3_SUMMARIZE} found: {len(self.__l3_summarize)}/'
-                f'{len(self.__user_data.get(TOML_L3_SUMMARIZE, []))}'
+                f'Valid entries in {TomlSections.L3_SUMMARIZE} found: {len(self.__l3_summarize)}/'
+                f'{len(self.__user_data.get(TomlSections.L3_SUMMARIZE, []))}'
             )
         return self.__l3_summarize
 
@@ -520,7 +519,7 @@ class Settings:
         if self.__map_speed_to_thickness is None:
             self.__map_speed_to_thickness = []
             map_speed_to_thickness = self.__user_data.get(
-                TOML_MAP_SPEED_TO_THICKNESS, {}
+                TomlSections.MAP_SPEED_TO_THICKNESS, {}
             )
             for speed, thickness in map_speed_to_thickness.items():
                 try:
@@ -530,12 +529,12 @@ class Settings:
                     ))
                 except ValueError:
                     LOGGER.error(
-                        f'Invalid entry in {TOML_MAP_SPEED_TO_THICKNESS} -> {speed}={thickness} -> ignored'
+                        f'Invalid entry in {TomlSections.MAP_SPEED_TO_THICKNESS} -> {speed}={thickness} -> ignored'
                     )
                     continue
             LOGGER.info(
-                f'Valid entries in {TOML_MAP_SPEED_TO_THICKNESS} found: {len(self.__map_speed_to_thickness)}'  # noqa: E501
-                f'/{len(self.__user_data.get(TOML_MAP_SPEED_TO_THICKNESS, []))}'
+                f'Valid entries in {TomlSections.MAP_SPEED_TO_THICKNESS} found: {len(self.__map_speed_to_thickness)}'
+                f'/{len(self.__user_data.get(TomlSections.MAP_SPEED_TO_THICKNESS, []))}'
             )
         return self.__map_speed_to_thickness
 
@@ -543,7 +542,7 @@ class Settings:
     def protected_topologies(self) -> List[str]:
         if self.__protected_topologies is None:
             self.__protected_topologies = [str(topology) for topology in self.__user_data.get(
-                TOML_PROTECTED_TOPOLOGIES, []
+                TomlSections.PROTECTED_TOPOLOGIES, []
             )]
         return self.__protected_topologies
 
@@ -551,12 +550,12 @@ class Settings:
     def static_connections(self) -> List[StaticConnection]:
         if self.__static_connections is None:
             self.__static_connections = []
-            for connection in self.__user_data.get(TOML_STATIC_CONNECTIONS, []):
+            for connection in self.__user_data.get(TomlSections.STATIC_CONNECTIONS, []):
                 try:
                     left_host, left_service, right_service, right_host = connection
                 except ValueError:
                     LOGGER.error(
-                        f'Wrong entry in {TOML_STATIC_CONNECTIONS} -> {connection} -> ignored'
+                        f'Wrong entry in {TomlSections.STATIC_CONNECTIONS} -> {connection} -> ignored'
                     )
                     continue
                 if not right_host or not left_host:
@@ -571,14 +570,14 @@ class Settings:
                     left_host=str(left_host),
                 ))
             LOGGER.info(
-                f'Valid entries in {TOML_STATIC_CONNECTIONS} found: {len(self.__static_connections)}/'
-                f'{len(self.__user_data.get(TOML_STATIC_CONNECTIONS, []))}'
+                f'Valid entries in {TomlSections.STATIC_CONNECTIONS} found: {len(self.__static_connections)}/'
+                f'{len(self.__user_data.get(TomlSections.STATIC_CONNECTIONS, []))}'
             )
         return self.__static_connections
 
     @property
     def sites(self) -> List[str]:
         if self.__sites is None:
-            self.__sites = [str(site) for site in set(self.__user_data.get(TOML_SITES, [])) if is_valid_site_name(site)]
+            self.__sites = [str(site) for site in set(self.__user_data.get(TomlSections.SITES, [])) if is_valid_site_name(site)]
             LOGGER.info(f'Found {len(self.__sites)} to filter on')
         return self.__sites
diff --git a/source/bin/nvdct/lib/topologies.py b/source/bin/nvdct/lib/topologies.py
index 3fa4cbb028ca607b96a6c3db529bfd9b0f05bb6a..8594526d6b65ce6b5439b7d2f572e9e899d4e2a5 100755
--- a/source/bin/nvdct/lib/topologies.py
+++ b/source/bin/nvdct/lib/topologies.py
@@ -2,7 +2,6 @@
 # -*- coding: utf-8 -*-
 #
 # License: GNU General Public License v2
-import sys
 
 # Author: thl-cmk[at]outlook[dot]com
 # URL   : https://thl-cmk.hopto.org
@@ -11,10 +10,11 @@ import sys
 
 # 2024-12-22: refactoring topology creation into classes
 #             made L3 topology IP version independent
+# 2024-12-25: refactoring, moved function into classes
 
 from abc import abstractmethod
-from collections.abc import Mapping, MutableMapping, Sequence
-from ipaddress import ip_address, ip_network, ip_interface
+from collections.abc import Mapping, MutableMapping, Sequence, MutableSet
+from ipaddress import ip_address, ip_interface, ip_network
 from re import sub as re_sub
 from typing import Dict, List, Tuple
 
@@ -24,15 +24,13 @@ from lib.backends import (
 )
 from lib.constants import (
     CACHE_INTERFACES_DATA,
-    HOST_LABEL_L3V4_HOSTS,
-    HOST_LABEL_L3V4_ROUTER,
-    HOST_LABEL_L3V6_HOSTS,
-    HOST_LABEL_L3V6_ROUTER,
+    HostLabels,
     IPVersion,
+    InvPaths,
     LOGGER,
-    PATH_INTERFACES,
-    PATH_L3,
-    DATAPATH,
+    L2InvColumns,
+    L3InvColumns,
+    TomlSections,
 )
 from lib.settings import (
     Emblems,
@@ -41,32 +39,36 @@ from lib.settings import (
     Wildcard,
 )
 from lib.utils import (
-    InventoryColumns,
-    IpInfo,
-    # is_valid_hostname,
     save_data_to_file,
 )
 
 
 class NvObjects:
-    def __init__(self) -> None:
+    def __init__(
+            self,
+            host_cache: HostCache
+    ) -> None:
         self.nv_objects: Dict[str, any] = {}
         self.host_count: int = 0
         self.host_list: List[str] = []
+        self.host_cache = host_cache
 
     def add_host(
-        self,
-        host: str,
-        host_cache: HostCache,
-        emblem: str | None = None
+            self,
+            host: str,
+            emblem: str | None = None,
+            name: str | None = None,
     ) -> None:
+        if name and host in self.nv_objects:
+            self.nv_objects[host]['name'] = name
+
         if host not in self.nv_objects:
             self.host_count += 1
             self.host_list.append(host)
             link: Dict = {}
             metadata: Dict = {}
             # LOGGER.debug(f'host: {host}, {host_cache.host_exists(host=host)}')
-            if host_cache.host_exists(host=host) is True:
+            if self.host_cache.host_exists(host=host) is True:
                 LOGGER.debug(f'host: {host} exists')
                 link = {'core': host}
             else:
@@ -82,31 +84,30 @@ class NvObjects:
                     }
 
             self.nv_objects[host] = {
-                'name': host,
+                'name': name if name is not None else host,
                 'link': link,
                 'metadata': metadata,
             }
             LOGGER.debug(f'host: {host}, link: {link}, metadata: {metadata}')
 
     def add_service(
-        self,
-        host: str,
-        service: str,
-        host_cache: HostCache,
-        emblem: str | None = None,
-        metadata: Dict | None = None,
-        name: str | None = None,
+            self,
+            host: str,
+            service: str,
+            emblem: str | None = None,
+            metadata: Dict | None = None,
+            name: str | None = None,
     ) -> None:
         if metadata is None:
             metadata = {}
         if name is None:
             name = service
 
-        self.add_host(host=host, host_cache=host_cache)
+        self.add_host(host=host)
         service_object = f'{service}@{host}'
         if service_object not in self.nv_objects:
             link: Dict = {}
-            if host_cache.host_exists(host=host):
+            if self.host_cache.host_exists(host=host):
                 link = {'core': [host, service]}
             elif emblem is not None:
                 metadata.update({
@@ -125,16 +126,14 @@ class NvObjects:
         elif metadata is not {}:
             self.nv_objects[service_object]['metadata'].update(metadata)
 
-
     def add_interface(
-        self,
-        host: str,
-        service: str,
-        host_cache: HostCache,
-        emblem: str | None = None,
-        metadata: Dict | None = None,
-        name: str | None = None,
-        item: str | None = None,
+            self,
+            host: str,
+            service: str,
+            item: str | None,
+            emblem: str | None = None,
+            metadata: Dict | None = None,
+            name: str | None = None,
     ) -> None:
         if metadata is None:
             metadata = {}
@@ -142,16 +141,16 @@ class NvObjects:
             name = service
         speed = None
 
-        self.add_host(host=host, host_cache=host_cache)
+        self.add_host(host=host)
         service_object = f'{service}@{host}'
         if service_object not in self.nv_objects:
             link: Dict = {}
-            if item is None:
-                item = get_service_by_interface(host, service.lstrip('0'), host_cache)
-            if item and host_cache.host_exists(host=host):
+            # if item is None:
+            #     item = get_service_by_interface(host, service.lstrip('0'), host_cache)
+            if item and self.host_cache.host_exists(host=host):
                 service_long = f'Interface {item}'
                 link = {'core': [host, service_long]}
-                if op_data := get_operational_interface_data(host, item, host_cache):
+                if op_data := self.get_operational_interface_data(host, item):
                     metadata.update(op_data)
                     speed = op_data.get('op_speed_int')
             elif emblem is not None:
@@ -176,11 +175,11 @@ class NvObjects:
             self.nv_objects[service_object]['metadata'].update(metadata)
 
     def add_ip_address(
-        self,
-        host: str,
-        raw_ip_address: str,
-        emblem: str,
-        interface: str | None,
+            self,
+            host: str,
+            raw_ip_address: str,
+            emblem: str,
+            interface: str | None,
     ) -> None:
         if interface is not None:
             service_object = f'{raw_ip_address}@{interface}@{host}'
@@ -204,6 +203,54 @@ class NvObjects:
                 }
             }
 
+    def get_operational_interface_data(
+            self,
+            host: str,
+            item: str,
+    ) -> Dict[str, str | int] | None:
+        unit_to_bits_per_second = {
+            'Bit/s': 1,
+            'kBit/s': 1000,
+            'Kbps': 1000,
+            'MBit/s': 1000000,
+            'Mbps': 1000000,
+            'GBit/s': 1000000000,
+            'Gbps': 1000000000,
+        }
+
+        # get dict of interfaces with the item as key
+        interface_data: Dict[str, any] | None = self.host_cache.get_data(
+            host=host, item=CacheItems.interfaces, path=CACHE_INTERFACES_DATA
+        )
+        try:
+            raw_operational_data = interface_data[item]['long_plugin_output']
+        except (KeyError, TypeError):
+            return None
+
+        if raw_operational_data:
+            operational_data: Dict[str, str | int] = {}
+            for _entry in raw_operational_data:
+                try:
+                    key, value = _entry.split(': ', 1)  # split only at the first colon
+                except ValueError:
+                    continue
+                value = value.strip(' ')
+                match key:
+                    case 'MAC':
+                        if len(value) == 17:  # valid MAC: 6C:DD:30:DD:51:8B'
+                            operational_data['mac'] = value
+                    case 'Speed':
+                        try:  # *_ -> ignore rest of string, i.e: (expected: 1 GBit/s)WARN
+                            speed, unit, *_ = value.split(' ')
+                        except ValueError:
+                            pass
+                        else:
+                            operational_data['op_sped_str'] = f'{speed} {unit}'
+                            operational_data['op_speed_int'] = int(float(speed) * unit_to_bits_per_second[unit])
+
+            return operational_data
+        return None
+
     def add_ip_network(self, network: str, emblem: str, ) -> None:
         if network not in self.nv_objects:
             self.nv_objects[network] = {
@@ -254,9 +301,9 @@ class NvConnections:
             self.nv_connections.append([connection])
 
     def add_meta_data_to_connections(
-        self,
-        nv_objects: NvObjects,
-        speed_map: Sequence[Thickness],
+            self,
+            nv_objects: NvObjects,
+            speed_map: Sequence[Thickness],
     ):
         for connection in self.nv_connections:
             warning = False
@@ -281,7 +328,6 @@ class NvConnections:
             left_native_vlan = nv_objects.nv_objects[left].get('metadata', {}).get('native_vlan')
             right_native_vlan = nv_objects.nv_objects[right].get('metadata', {}).get('native_vlan')
 
-
             if right_speed and left_speed:
                 right_thickness = map_speed_to_thickness(right_speed, speed_map)
                 # left_thickness = map_speed_to_thickness(left_speed, speed_map)
@@ -297,7 +343,7 @@ class NvConnections:
                     # metadata = add_tooltip_quickinfo(metadata, right, right_speed_str)
 
                     LOGGER.warning(
-                        f'Connection with speed mismatch: {left} (speed: {left_speed_str})'
+                        f'Connection speed mismatch: {left} (speed: {left_speed_str})'
                         f'<->{right} (speed: {right_speed_str})'
                     )
 
@@ -312,7 +358,7 @@ class NvConnections:
                     )
 
                     LOGGER.warning(
-                        f'Connection with duplex mismatch: {left} (duplex: {left_duplex})'
+                        f'Connection duplex mismatch: {left} (duplex: {left_duplex})'
                         f'<->{right} (duplex: {right_duplex})'
                     )
             if left_native_vlan and right_native_vlan:
@@ -325,13 +371,13 @@ class NvConnections:
                         )
 
                         LOGGER.warning(
-                            f'Connection with native vlan mismatch: '
+                            f'Connection native vlan mismatch: '
                             f'{left} (vlan: {left_native_vlan})<->{right} (vlan: {right_native_vlan})'
                         )
             if warning:
                 metadata['line_config'].update({
-                        'color': 'red',
-                        'thickness': 5,
+                    'color': 'red',
+                    'thickness': 5,
                 })
                 metadata['line_config']['css_styles']['stroke-dasharray'] = '10 5'
                 nv_objects.add_icon_to_object(left, 'icon_warning')
@@ -345,17 +391,20 @@ class Topology:
             self,
             emblems: Emblems,
             host_cache: HostCache,
+            topology: str,
     ):
-        self.nv_objects: NvObjects = NvObjects()
+        self.nv_objects: NvObjects = NvObjects(host_cache=host_cache)
         self.nv_connections: NvConnections = NvConnections()
         self.emblems: Emblems = emblems
         self.host_cache: HostCache = host_cache
+        self.topology = topology
 
     @abstractmethod
     def create(self):
         raise NotImplementedError
 
-    def save(self, label:str, output_directory: str, make_default: bool):
+    def save(self, label: str, output_directory: str, make_default: bool, dont_compare):
+        LOGGER.info(f'{self.topology} saving...')
         data = {
             'version': 1,
             'name': label,
@@ -364,13 +413,130 @@ class Topology:
         }
         save_data_to_file(
             data=data,
-            path=(
-                f'{DATAPATH}/{output_directory}'
-            ),
+            path=output_directory,
             file=f'data_{label}.json',
             make_default=make_default,
+            dont_compare=dont_compare,
         )
 
+    def get_service_by_interface(self, host: str, interface: str) -> str | None:
+        """
+        Returns:
+            Tuple of interface item
+        """
+
+        short_if_names = {
+            'ethernet': 'eth',
+            # 'fastethernet': 'Fa',
+            # 'gigabitethernet': 'gi',
+            # 'tengigabitethernet': 'te',
+            # 'fortygigabitethernet': 'Fo',
+            # 'hundredgigabitethernet': 'Hu',
+            # 'management': 'Ma',
+        }
+
+        def get_short_if_name(long_interface: str) -> str:
+            """
+            returns short interface name from long interface name
+            interface: is the long interface name
+            :type long_interface: str
+            """
+            if not long_interface:
+                return long_interface
+            for interface_prefix in short_if_names.keys():
+                if long_interface.lower().startswith(interface_prefix.lower()):
+                    interface_short = short_if_names[interface_prefix]
+                    return long_interface.lower().replace(interface_prefix.lower(), interface_short, 1)
+            return long_interface
+
+        # try to find the item for an interface
+        def match_entry_with_item(interface_entry: Mapping[str, str], services: Sequence[str]) -> str | None:
+            values = [
+                interface_entry.get('name'.strip()),
+                interface_entry.get('description'.strip()),
+                interface_entry.get('alias').strip()
+            ]
+            for value in values:
+                if value in services:
+                    return value
+
+            index = str(interface_entry.get('index'))
+
+            # try alias+index
+            alias_index = str(interface_entry.get('alias')).strip() + ' ' + index
+            if alias_index in services:
+                LOGGER.info(f'{self.topology} match found by alias-index|{interface_entry}| <-> |{alias_index}|')
+                return alias_index
+
+            # try description+index
+            description_index = str(interface_entry.get('description')).strip() + ' ' + index
+            if description_index in services:
+                LOGGER.info(f'{self.topology} match found by description-index|{interface_entry}| <-> |{description_index}|')
+                return description_index
+
+            # for index try with padding
+            pad_services: List[str] = [x for x in services if x.isdigit()]
+            if pad_services:
+                max_pad: int = len(max(pad_services, key=len)) + 1
+                # min_pad: int = len(min(pad_services, key=len))
+                min_pad: int = len(min(pad_services, key=len))
+                for i in range(min_pad, max_pad):
+                    index_padded = f'{index:0>{i}}'
+                    if index_padded in pad_services:
+                        return index_padded
+                    # still not found try values + index
+                    for value in values:
+                        if f'{value} {index_padded}' in services:
+                            return f'{value} {index_padded}'
+
+            LOGGER.warning(f'{self.topology} no match found |{interface_entry}| <-> |{services}|')
+            return None
+
+        # empty host/neighbour should never happen here
+        if not host:
+            LOGGER.warning(f'{self.topology} no host name |{host}|')
+            return None
+
+        # get dict of interfaces with the item as key
+        interface_data: Mapping[str, Mapping[str, object]] = self.host_cache.get_data(
+            host=host, item=CacheItems.interfaces, path=CACHE_INTERFACES_DATA
+        )
+        if not interface_data:
+            LOGGER.warning(f'{self.topology} no interface data for: {host}')
+            return None
+
+        # try to find the interface in the host interface inventory list
+        inventory = self.host_cache.get_data(
+            host=host, item=CacheItems.inventory, path=InvPaths.INTERFACES
+        )
+        if not inventory:
+            LOGGER.warning(f'{self.topology} no interface inventory for: {host}')
+            return None
+
+        interface_items: Sequence[str] = list(interface_data.keys())
+
+        # the easy case
+        if interface in interface_items:
+            return interface
+
+        for entry in inventory:
+            if interface in [
+                entry.get('name'),
+                entry.get('description'),
+                entry.get('alias'),
+                str(entry.get('index')),
+                entry.get('phys_address'),
+            ]:
+                return match_entry_with_item(entry, interface_items)
+            elif f'1:{interface}' == entry.get('name'):  # Extreme non stack:
+                return match_entry_with_item(entry, interface_items)
+            elif entry.get('name') is not None and get_short_if_name(
+                    entry.get('name')) == str(interface).lower():  # Cisco NXOS
+                return match_entry_with_item(entry, interface_items)
+
+        LOGGER.warning(msg=f'{self.topology} Device: {host}: service for interface |{interface}| not found')
+
+
 class TopologyStatic(Topology):
     def __init__(
             self,
@@ -381,26 +547,24 @@ class TopologyStatic(Topology):
         super().__init__(
             emblems=emblems,
             host_cache=host_cache,
+            topology='[STATIC]',
         )
         self.connections: Sequence[StaticConnection] = connections
 
     def create(self):
         for connection in self.connections:
-            LOGGER.info(msg=f'connection: {connection}')
+            LOGGER.debug(msg=f'{self.topology} connection from {TomlSections.STATIC_CONNECTIONS}: {connection}')
             self.nv_objects.add_host(
                 host=connection.right_host,
-                host_cache=self.host_cache,
                 emblem=self.emblems.host_node
             )
             self.nv_objects.add_host(
                 host=connection.left_host,
-                host_cache=self.host_cache,
                 emblem=self.emblems.host_node
             )
             if connection.right_service:
                 self.nv_objects.add_service(
                     host=connection.right_host,
-                    host_cache=self.host_cache,
                     emblem=self.emblems.service_node,
                     service=connection.right_service
                 )
@@ -412,7 +576,6 @@ class TopologyStatic(Topology):
             if connection.left_service:
                 self.nv_objects.add_service(
                     host=connection.left_host,
-                    host_cache=self.host_cache,
                     emblem=self.emblems.service_node,
                     service=connection.left_service
                 )
@@ -442,118 +605,102 @@ class TopologyStatic(Topology):
                     right=f'{connection.left_host}',
                 )
 
+
 class TopologyL2(Topology):
     def __init__(
             self,
             emblems: Emblems,
             host_cache: HostCache,
-            case: str,
-            inv_columns: InventoryColumns,
-            l2_drop_hosts: List[str],
-            l2_host_map: Dict[str, str],
+            l2_drop_neighbours: List[str],
             l2_neighbour_replace_regex: List[Tuple[str, str]],
             label: str,
             path_in_inventory: str,
-            prefix: str,
-            remove_domain: bool,
             seed_devices: Sequence[str],
+            display_l2_neighbours: bool,
     ):
         super().__init__(
             emblems=emblems,
             host_cache=host_cache,
+            topology = f'[L2 {label}]',
         )
-        self.case: str = case
-        self.inv_columns: InventoryColumns = inv_columns
-        self.l2_drop_hosts: List[str] = l2_drop_hosts
-        self.l2_host_map: Dict[str, str] = l2_host_map
-        self.l2_neighbour_replace_regex: List[Tuple[str, str]] = l2_neighbour_replace_regex
+        self.l2_drop_neighbours: List[str] = l2_drop_neighbours
         self.label: str = label
         self.neighbour_to_host: MutableMapping[str, str] = {}
         self.path_in_inventory: str = path_in_inventory
-        self.prefix: str = prefix
-        self.remove_domain: bool = remove_domain
-        self.seed_devices: Sequence[str] = seed_devices
+        self.hosts_to_go: MutableSet[str] = set(seed_devices)
+        self.raw_neighbour_to_neighbour: Dict[str, str] = {}
+        self.l2_neighbour_replace_regex: List[Tuple[str, str]] = l2_neighbour_replace_regex
+        self.hosts_done: MutableSet[str] = set()
+        self.display_l2_neighbours: bool = display_l2_neighbours
 
     def create(self):
-        if not (devices_to_go := list(set(self.seed_devices))):  # remove duplicates
-            LOGGER.error('No seed devices configured!')
+        if not self.hosts_to_go:
+            LOGGER.error(f'{self.topology} no seed devices !')
             return
 
-        devices_done = []
+        while self.hosts_to_go:
+            host = self.hosts_to_go.pop()
+            self.hosts_done.add(host)
 
-        while devices_to_go:
-            device = devices_to_go[0]
-
-            if device in self.l2_host_map.keys():
-                try:
-                    devices_to_go.remove(device)
-                except ValueError:
-                    pass
-                device = self.l2_host_map[device]
-                if device in devices_done:
-                    continue
-
-            topo_data = self.host_cache.get_data(
-                host=device, item=CacheItems.inventory, path=self.path_in_inventory
+            topo_data: Sequence[Mapping[str, str]] = self.host_cache.get_data(
+                host=host, item=CacheItems.inventory, path=self.path_in_inventory
             )
             if topo_data:
-                self.device_from_inventory(
-                    host=device,
+                self.host_from_inventory(
+                    host=host,
                     inv_data=topo_data,
                 )
 
-                for _entry in self.nv_objects.host_list:
-                    if _entry not in devices_done:
-                        devices_to_go.append(_entry)
-
-            devices_to_go = list(set(devices_to_go))
-            devices_done.append(device)
-            devices_to_go.remove(device)
-            LOGGER.info(msg=f'Device done: {device}, source: {self.label}')
+            LOGGER.info(msg=f'{self.topology} host done : {host}')
 
-    def device_from_inventory(
+    def host_from_inventory(
             self,
             host: str,
-            inv_data,
+            inv_data: Sequence[Mapping[str,str]],
     ):
         for topo_neighbour in inv_data:
             # check if required data are not empty
-            if not (neighbour := topo_neighbour.get(self.inv_columns.neighbour)):
-                LOGGER.warning(f'incomplete data, neighbour missing {topo_neighbour}')
+            if not (raw_neighbour := topo_neighbour.get(L2InvColumns.NEIGHBOUR)):
+                LOGGER.warning(f'{self.topology} incomplete data: neighbour missing {topo_neighbour}')
                 continue
-            if not (raw_local_port := topo_neighbour.get(self.inv_columns.local_port)):
-                LOGGER.warning(f'incomplete data, local port missing {topo_neighbour}')
+            if not (raw_local_port := topo_neighbour.get(L2InvColumns.LOCALPORT)):
+                LOGGER.warning(f'{self.topology} incomplete data: local port missing {topo_neighbour}')
                 continue
-            if not (raw_neighbour_port := topo_neighbour.get(self.inv_columns.neighbour_port)):
-                LOGGER.warning(f'incomplete data, neighbour port missing {topo_neighbour}')
+            if not (raw_neighbour_port := topo_neighbour.get(L2InvColumns.NEIGHBOURPORT)):
+                LOGGER.warning(f'{self.topology} incomplete data: neighbour port missing {topo_neighbour}')
                 continue
 
-            if not (neighbour:= self.get_host_from_neighbour(neighbour)):
+            if not (neighbour := self.adjust_raw_neighbour(raw_neighbour)):
                 continue
 
+            if neighbour_host := self.host_cache.get_host_from_neighbour(neighbour):
+                if neighbour_host not in self.hosts_done:
+                    self.hosts_to_go.add(neighbour_host)
+            else:
+                neighbour_host = neighbour
+
             # getting/checking interfaces
-            local_port = get_service_by_interface(host, raw_local_port, self.host_cache)
+            local_port = self.get_service_by_interface(host, raw_local_port)
             if not local_port:
                 local_port = raw_local_port
-                LOGGER.warning(msg=f'service not found: host: {host}, raw_local_port: {raw_local_port}')
+                LOGGER.warning(msg=f'{self.topology} service not found for local_port: {host}, {raw_local_port}')
             elif local_port != raw_local_port:
                 # local_port = raw_local_port  # don't reset local_port
                 LOGGER.info(
-                    msg=f'host: {host}, raw_local_port: {raw_local_port} -> local_port: {local_port}'
+                    msg=f'{self.topology} map raw_local_port -> local_port: {host}, {raw_local_port} -> {local_port}'
                 )
 
-            neighbour_port = get_service_by_interface(neighbour, raw_neighbour_port, self.host_cache)
+            neighbour_port = self.get_service_by_interface(neighbour_host, raw_neighbour_port)
             if not neighbour_port:
                 neighbour_port = raw_neighbour_port
                 LOGGER.warning(
-                    msg=f'service not found: neighbour: {neighbour}, '
-                        f'raw_neighbour_port: {raw_neighbour_port}'
+                    msg=f'{self.topology} service not found for neighbour port:  {neighbour_host}, {raw_neighbour_port}'
                 )
             elif neighbour_port != raw_neighbour_port:
                 # neighbour_port = raw_neighbour_port  # don't reset neighbour_port
                 LOGGER.info(
-                    msg=f'neighbour: {neighbour}, raw_neighbour_port {raw_neighbour_port} '
-                        f'-> neighbour_port {neighbour_port}'
+                    msg=f'{self.topology} map raw_neighbour_port -> neighbour_port: {neighbour_host}, {raw_neighbour_port} '
+                        f'-> {neighbour_port}'
                 )
 
             metadata = {
@@ -561,83 +708,79 @@ class TopologyL2(Topology):
                 'native_vlan': topo_neighbour.get('native_vlan'),
             }
 
-            self.nv_objects.add_host(host=host, host_cache=self.host_cache)
-            self.nv_objects.add_host(host=neighbour, host_cache=self.host_cache)
+            self.nv_objects.add_host(
+                host=host,
+                emblem=self.emblems.host_node,
+            )
+            self.nv_objects.add_host(
+                host=neighbour_host,
+                name=raw_neighbour if self.display_l2_neighbours else None,
+                emblem=self.emblems.host_node,
+            )
             self.nv_objects.add_interface(
                 host=str(host),
                 service=str(local_port),
-                host_cache=self.host_cache,
                 metadata=metadata,
                 name=str(raw_local_port),
-                item=str(local_port)
+                item=str(local_port),
+                emblem=self.emblems.service_node,
             )
             self.nv_objects.add_interface(
-                host=str(neighbour),
+                host=str(neighbour_host),
                 service=str(neighbour_port),
-                host_cache=self.host_cache,
                 name=str(raw_neighbour_port),
-                item=str(neighbour_port)
+                item=str(neighbour_port),
+                emblem=self.emblems.service_node,
             )
             self.nv_connections.add_connection(
                 left=str(host),
                 right=f'{local_port}@{host}',
             )
             self.nv_connections.add_connection(
-                left=str(neighbour),
-                right=f'{neighbour_port}@{neighbour}',
+                left=str(neighbour_host),
+                right=f'{neighbour_port}@{neighbour_host}',
             )
             self.nv_connections.add_connection(
                 left=f'{local_port}@{host}',
-                right=f'{neighbour_port}@{neighbour}',
+                right=f'{neighbour_port}@{neighbour_host}',
             )
 
-    def get_host_from_neighbour(self, neighbour: str) -> str | None:
+    def adjust_raw_neighbour(self, raw_neighbour: str) -> str | None:
+        """
+        Checks if neighbour should be dropped or adjusted via regex.
+        The next request for the same neighbour will be served from cache.
+        Args:
+            raw_neighbour: the neighbour name to check/adjust
+
+        Returns:
+            the adjusted neighbour name or None
+        """
         try:
-            return self.neighbour_to_host[neighbour]
+            return self.neighbour_to_host[raw_neighbour]
         except KeyError:
             pass
 
-        if neighbour in self.l2_drop_hosts:
-            LOGGER.info(msg=f'drop neighbour: {neighbour}')
-            self.neighbour_to_host[neighbour] = None
+        if raw_neighbour in self.l2_drop_neighbours:
+            LOGGER.info(msg=f'{self.topology} drop in {TomlSections.L2_DROP_NEIGHBOURS}: {raw_neighbour}')
+            self.neighbour_to_host[raw_neighbour] = None
             return None
 
+        adjusted_neighbour = raw_neighbour
         if self.l2_neighbour_replace_regex:
             for re_str, replace_str in self.l2_neighbour_replace_regex:
-                re_neighbour = re_sub(re_str, replace_str, neighbour)
-                if re_neighbour != neighbour:
-                    LOGGER.info(f'regex changed Neighbor |{neighbour}| to |{re_neighbour}|')
-                    neighbour = re_neighbour
-                if not neighbour:
-                    LOGGER.info(f'Neighbour removed by regex (|{neighbour}|, |{re_str}|, |{replace_str}|)')
-                    break
-            if not neighbour:
-                self.neighbour_to_host[neighbour] = None
-                return None
-
-        if self.remove_domain:
-            neighbour = neighbour.split('.')[0]
-
-        # drop neighbour after domain split
-        if neighbour in self.l2_drop_hosts:
-            LOGGER.info(msg=f'drop neighbour: {neighbour}')
-            self.neighbour_to_host[neighbour] = None
-            return None
+                re_neighbour = re_sub(re_str, replace_str, adjusted_neighbour)
+                if not re_neighbour:
+                    LOGGER.info(f'{self.topology} removed by {TomlSections.L2_NEIGHBOUR_REPLACE_REGEX}: (|{adjusted_neighbour}|, |{re_str}|, |{replace_str}|)')
+                    self.neighbour_to_host[raw_neighbour] = None
+                    return None
 
-        if self.case == 'UPPER':
-            neighbour = neighbour.upper()
-            LOGGER.debug(f'Changed neighbour to upper case: {neighbour}')
-        elif self.case == 'LOWER':
-            neighbour = neighbour.lower()
-            LOGGER.debug(f'Changed neighbour to lower case: {neighbour}')
+                if re_neighbour != adjusted_neighbour:
+                    LOGGER.info(f'{self.topology} changed by {TomlSections.L2_NEIGHBOUR_REPLACE_REGEX} |{adjusted_neighbour}| to |{re_neighbour}|')
+                    adjusted_neighbour = re_neighbour
 
-        if self.prefix:
-            neighbour = f'{self.prefix}{neighbour}'
-        # rewrite neighbour if inventory neighbour and checkmk host don't match
-        if neighbour in self.l2_host_map.keys():
-            neighbour = self.l2_host_map[neighbour]
+        self.neighbour_to_host[raw_neighbour] = adjusted_neighbour
+        return adjusted_neighbour
 
-        return neighbour
 
 class TopologyL3(Topology):
     def __init__(
@@ -648,172 +791,209 @@ class TopologyL3(Topology):
             ignore_ips: Sequence[ip_network],
             ignore_wildcard: Sequence[Wildcard],
             include_hosts: bool,
+            include_loopback: bool,
             replace: Mapping[str, str],
+            skip_cidr_0: bool,
+            skip_cidr_32_128: bool,
             skip_if: bool,
             skip_ip: bool,
+            skip_public: bool,
             summarize: Sequence[ip_network],
             version: int
     ):
         super().__init__(
             emblems=emblems,
             host_cache=host_cache,
+            topology=f'[L3 IPv{version}]'
         )
         self.ignore_hosts: Sequence[str] = ignore_hosts
         self.ignore_ips: Sequence[ip_network] = ignore_ips
         self.ignore_wildcard: Sequence[Wildcard] = ignore_wildcard
         self.include_hosts: bool = include_hosts
         self.replace: Mapping[str, str] = replace
+        self.skip_cidr_0: bool = skip_cidr_0
+        self.skip_cidr_32_128: bool = skip_cidr_32_128
         self.skip_if: bool = skip_if
         self.skip_ip: bool = skip_ip
+        self.skip_public: bool = skip_public
+        self.show_loopback: bool = include_loopback
         self.summarize: Sequence[ip_network] = summarize
         self.version = version
 
     def create(self):
         match self.version:
             case IPVersion.IPv4:
-                host_list: Sequence[str] = self.host_cache.get_hosts_by_label(HOST_LABEL_L3V4_ROUTER)
+                host_list: Sequence[str] = self.host_cache.get_hosts_by_label(HostLabels.L3V4_ROUTER)
 
                 if self.include_hosts:
-                    host_list += self.host_cache.get_hosts_by_label(HOST_LABEL_L3V4_HOSTS)
+                    host_list += self.host_cache.get_hosts_by_label(HostLabels.L3V4_HOSTS)
 
             case IPVersion.IPv6:
-                host_list: Sequence[str] = self.host_cache.get_hosts_by_label(HOST_LABEL_L3V6_ROUTER)
+                host_list: Sequence[str] = self.host_cache.get_hosts_by_label(HostLabels.L3V6_ROUTER)
 
                 if self.include_hosts:
-                    host_list += self.host_cache.get_hosts_by_label(HOST_LABEL_L3V6_HOSTS)
+                    host_list += self.host_cache.get_hosts_by_label(HostLabels.L3V6_HOSTS)
 
             case _:
                 host_list = []
 
-        LOGGER.debug(f'host list: {host_list}')
+        LOGGER.debug(f'{self.topology} host to work on: {host_list}')
         if not host_list:
-            LOGGER.warning(
-                msg='No (routing capable) host found. Check if "inv_ip_addresses.mkp" '
+            LOGGER.error(
+                msg=f'{self.topology} No (routing capable) host found. Check if "inv_ip_addresses.mkp" '
                     'added/enabled and inventory and host label discovery has run.'
             )
             return
 
-        LOGGER.debug(f'L3 ignore hosts: {self.ignore_hosts}')
+        LOGGER.debug(f'{self.topology} ignore hosts: {self.ignore_hosts}')
         for raw_host in host_list:
             host = raw_host
             if host in self.ignore_hosts:
-                LOGGER.info(f'L3 host {host} ignored')
+                LOGGER.info(f'{self.topology} host ignored in {TomlSections.L3_IGNORE_HOSTS}: {host}')
                 continue
             if not (inv_ip_addresses := self.host_cache.get_data(
-                    host=host, item=CacheItems.inventory, path=PATH_L3)
+                    host=host, item=CacheItems.inventory, path=InvPaths.L3)
             ):
-                LOGGER.warning(f'No IP address inventory found for host: {host}')
+                LOGGER.warning(f'{self.topology} no IP address inventory found for host: {host}')
                 continue
 
-            self.nv_objects.add_host(host=host, host_cache=self.host_cache)
+            self.nv_objects.add_host(host=host)
             for inv_ip_address in inv_ip_addresses:
                 emblem = self.emblems.ip_network
                 try:
-                    ip_info = IpInfo(
-                        address=inv_ip_address['address'],
-                        device=inv_ip_address['device'],
-                        broadcast=inv_ip_address['broadcast'],
-                        cidr=inv_ip_address['cidr'],
-                        netmask=inv_ip_address['netmask'],
-                        network=inv_ip_address['network'],
-                        type=inv_ip_address['type'],
-                        scope_id=inv_ip_address.get('scope_id'),  # this is an optional field
+                    device = inv_ip_address[L3InvColumns.DEVICE]
+                    interface_address: ip_interface = ip_interface(
+                        f'{inv_ip_address[L3InvColumns.ADDRESS]}/{inv_ip_address[L3InvColumns.CIDR]}'
                     )
-                except KeyError:
-                    LOGGER.warning(f'Drop IP address data for host: {host}, data: {inv_ip_address}')
+                except KeyError as e:
+                    LOGGER.warning(f'{self.topology} drop IP missing data: {host}, {inv_ip_address}, {e}.')
+                    continue
+
+                # skip entries without prefix-length/netmask
+                if self.skip_cidr_0 and interface_address.network.prefixlen == 0:
+                    LOGGER.info(f'{self.topology} drop IP with CIDR "0": {host}, {interface_address.compressed}')
                     continue
 
-                interface_address: ip_interface = ip_interface(f'{ip_info.address}/{ip_info.cidr}')
                 if interface_address.version != self.version:
                     LOGGER.info(
-                        f'host: {host} dropped non IPv{self.version} address: {ip_info.address},'
-                        f' type: {ip_info.type}'
+                        f'{self.topology} drop IP non IPv{self.version} version: {host}, {interface_address.compressed}'
                     )
                     continue
 
-                if interface_address.is_loopback:
-                    LOGGER.info(f'host: {host} dropped loopback address: {ip_info.address}')
+                if not self.show_loopback and interface_address.is_loopback:
+                    LOGGER.info(f'{self.topology} drop IP is loopback: {host},  {interface_address.ip.compressed}')
                     continue
 
-                if interface_address.is_link_local:
-                    LOGGER.info(f'host: {host} dropped link-local address: {ip_info.address}')
+                if self.skip_public and not interface_address.is_private:
+                    LOGGER.info(f'{self.topology} drop IP is public: {host},  {interface_address.ip.compressed}')
                     continue
 
-                # if interface_address.network.prefixlen == 32 or interface_address.network.prefixlen == 128: # drop host addresses
-                #     LOGGER.info(
-                #         f'host: {host} dropped host address: {ip_info.address}/{ip_info.cidr}'
-                #     )
-                #     continue
+                if interface_address.is_link_local and interface_address.version == 6:
+                    LOGGER.info(f'{self.topology} drop IP is link-local: {host}, {interface_address.ip.compressed}')
+                    continue
 
-                if is_ignore_ip(ip_info.address, self.ignore_ips):
-                    LOGGER.info(f'host: {host} dropped ignore address: {ip_info.address}')
+                # drop host addresses  /32 or /128 -> one IP ine network
+                if self.skip_cidr_32_128 and interface_address.network.num_addresses == 1:
+                    LOGGER.info(f'{self.topology} drop IP with CIDR (32 or /128): {host}, {interface_address.compressed}')
                     continue
 
-                if is_ignore_wildcard(ip_info.address, self.ignore_wildcard):
-                    LOGGER.info(f'host: {host} dropped wildcard address: {ip_info.address}')
+                if self.is_ignore_ip(interface_address.ip.compressed):
+                    LOGGER.info(f'{self.topology} rop IP in {TomlSections.L3_IGNORE_IP}: {host}, {interface_address.compressed}')
                     continue
 
-                if network := get_network_summary(
-                        raw_ip_address=ip_info.address,
-                        summarize=self.summarize,
-                ):
+                if self.is_ignore_wildcard(interface_address.ip.compressed):
+                    LOGGER.info(f'{self.topology} drop IP in {TomlSections.L3V4_IGNORE_WILDCARD}: {host}, {interface_address.compressed}')
+                    continue
+
+                if network := self.get_network_summary(raw_ip_address=interface_address.ip.compressed):
                     emblem = self.emblems.l3_summarize
                     LOGGER.info(
-                        f'Network summarized: {ip_info.network}/{ip_info.cidr} -> {network}'
+                        f'{self.topology} Summarized IP in {TomlSections.L3_SUMMARIZE}: {interface_address.compressed} -> {network}'
                     )
                 else:
-                    network = f'{ip_info.network}/{ip_info.cidr}'
+                    network = f'{interface_address.network.compressed}'
 
                 if network in self.replace.keys():
-                    LOGGER.info(f'Replaced network {network} with {self.replace[network]}')
+                    LOGGER.info(f'{self.topology} Replaced network in {TomlSections.L3_REPLACE}: {network} -> {self.replace[network]}')
                     network = self.replace[network]
                     emblem = self.emblems.l3_replace
 
                 self.nv_objects.add_ip_network(network=network, emblem=emblem)
 
+                item = None
+                if not self.skip_if:
+                    item = self.get_service_by_interface(host=host, interface=device)
+
                 if self.skip_if is True and self.skip_ip is True:
                     self.nv_connections.add_connection(left=host, right=network)
                 elif self.skip_if is True and self.skip_ip is False:
                     self.nv_objects.add_ip_address(
                         host=host,
                         interface=None,
-                        raw_ip_address=ip_info.address,
+                        raw_ip_address=interface_address.ip.compressed,
                         emblem=self.emblems.ip_address,
                     )
                     self.nv_objects.add_tooltip_quickinfo(
-                        '{ip_info.address}@{host}', 'Interface', ip_info.device
+                        f'{interface_address.ip.compressed}@{host}', 'Interface', device
                     )
-                    self.nv_connections.add_connection(left=f'{host}', right=f'{ip_info.address}@{host}')
-                    self.nv_connections.add_connection(left=network, right=f'{ip_info.address}@{host}')
+                    self.nv_connections.add_connection(left=f'{host}', right=f'{interface_address.ip.compressed}@{host}')
+                    self.nv_connections.add_connection(left=network, right=f'{interface_address.ip.compressed}@{host}')
                 elif self.skip_if is False and self.skip_ip is True:
-                    self.nv_objects.add_interface(
-                        host=host, service=ip_info.device, host_cache=self.host_cache
-                    )
+                    self.nv_objects.add_interface(host=host, service=device, item=item)
                     self.nv_objects.add_tooltip_quickinfo(
-                        f'{ip_info.device}@{host}', 'IP-address', ip_info.address
+                        f'{device}@{host}', 'IP-address', interface_address.ip.compressed
                     )
-                    self.nv_connections.add_connection(left=f'{host}', right=f'{ip_info.device}@{host}')
-                    self.nv_connections.add_connection(left=network, right=f'{ip_info.device}@{host}')
+                    self.nv_connections.add_connection(left=f'{host}', right=f'{device}@{host}')
+                    self.nv_connections.add_connection(left=network, right=f'{device}@{host}')
                 else:
                     self.nv_objects.add_ip_address(
                         host=host,
-                        interface=ip_info.device,
-                        raw_ip_address=ip_info.address,
+                        interface=device,
+                        raw_ip_address=interface_address.ip.compressed,
                         emblem=self.emblems.ip_address,
                     )
-                    self.nv_objects.add_interface(
-                        host=host, service=ip_info.device, host_cache=self.host_cache,
-                    )
+                    self.nv_objects.add_interface(host=host, service=device, item=item)
                     self.nv_connections.add_connection(
-                        left=host, right=f'{ip_info.device}@{host}')
+                        left=host, right=f'{device}@{host}')
                     self.nv_connections.add_connection(
-                        left=f'{ip_info.device}@{host}',
-                        right=f'{ip_info.address}@{ip_info.device}@{host}',
+                        left=f'{device}@{host}',
+                        right=f'{interface_address.ip.compressed}@{device}@{host}',
                     )
                     self.nv_connections.add_connection(
-                        left=network, right=f'{ip_info.address}@{ip_info.device}@{host}',
+                        left=network, right=f'{interface_address.ip.compressed}@{device}@{host}',
                     )
 
+    def get_network_summary(self, raw_ip_address: str) -> str | None:
+        for network in self.summarize:
+            try:
+                if ip_network(raw_ip_address).subnet_of(network):
+                    LOGGER.debug(f'{self.topology} IP address {raw_ip_address} is in subnet -> ({network})')
+                    return network.compressed
+            except TypeError:
+                pass
+        return None
+
+    def is_ignore_ip(self, raw_ip_address: str) -> bool:
+        for ip in self.ignore_ips:
+            try:
+                if ip_network(raw_ip_address).subnet_of(ip):
+                    LOGGER.debug(f'{self.topology} IP address {raw_ip_address} is in ignore list -> ({ip})')
+                    return True
+            except TypeError:
+                continue
+        return False
+
+    def is_ignore_wildcard(self, raw_ip_address: str) -> bool:
+        int_ip_address = int(ip_address(raw_ip_address))
+        for wildcard in self.ignore_wildcard:
+            if int_ip_address & wildcard.int_wildcard == wildcard.bit_pattern:
+                LOGGER.debug(
+                    f'{self.topology} IP address {raw_ip_address} matches ignore wildcard '
+                    f'list ({wildcard.ip_address}/{wildcard.wildcard})'
+                )
+                return True
+        return False
+
 
 def map_speed_to_thickness(speed_to_map: int, speed_map: Sequence[Thickness]) -> int:
     thickness: int = 1  # use in case of empty MAP_SPEED_TO_THICKNESS
@@ -823,173 +1003,6 @@ def map_speed_to_thickness(speed_to_map: int, speed_map: Sequence[Thickness]) ->
     return thickness
 
 
-def get_operational_interface_data(
-    host: str,
-    item: str,
-    host_cache: HostCache,
-
-) -> Dict[str, str | int] | None:
-    unit_to_bits_per_second = {
-        'Bit/s': 1,
-        'kBit/s': 1000,
-        'Kbps': 1000,
-        'MBit/s': 1000000,
-        'Mbps': 1000000,
-        'GBit/s': 1000000000,
-        'Gbps': 1000000000,
-    }
-
-    # get dict of interfaces with the item as key
-    interface_data: Dict[str, any] | None = host_cache.get_data(
-        host=host, item=CacheItems.interfaces, path=CACHE_INTERFACES_DATA
-    )
-    try:
-        raw_opdata = interface_data[item]['long_plugin_output']
-    except (KeyError, TypeError):
-        return None
-
-    if raw_opdata:
-        opdata: Dict[str, str | int] = {}
-        for _entry in raw_opdata:
-            try:
-                key, value = _entry.split(': ', 1)  # split only at the first colon
-            except ValueError:
-                continue
-            value = value.strip(' ')
-            match key:
-                case 'MAC':
-                    if len(value) == 17:  # valid MAC: 6C:DD:30:DD:51:8B'
-                        opdata['mac'] = value
-                case 'Speed':
-                    try:           # *_ -> ignore rest of string, i.e: (expected: 1 GBit/s)WARN
-                        speed, unit, *_ = value.split(' ')
-                    except ValueError:
-                        pass
-                    else:
-                        opdata['op_sped_str'] = f'{speed} {unit}'
-                        opdata['op_speed_int'] = int(float(speed) * unit_to_bits_per_second[unit])
-
-        return opdata
-    return None
-
-
-def get_service_by_interface(host: str, interface: str, host_cache: HostCache) -> str | None:
-    """
-    Returns:
-        Tuple of interface item
-    """
-
-    _alternate_if_name = {
-        'ethernet': 'eth',
-        # 'fastethernet': 'Fa',
-        # 'gigabitethernet': 'gi',
-        # 'tengigabitethernet': 'te',
-        # 'fortygigabitethernet': 'Fo',
-        # 'hundredgigabitethernet': 'Hu',
-        # 'management': 'Ma',
-    }
-
-    def _get_short_if_name(interface_: str) -> str:
-        """
-        returns short interface name from long interface name
-        interface: is the long interface name
-        :type interface_: str
-        """
-        if not interface_:
-            return interface_
-        for interface_prefix in _alternate_if_name.keys():
-            if interface_.lower().startswith(interface_prefix.lower()):
-                interface_short = _alternate_if_name[interface_prefix]
-                return interface_.lower().replace(interface_prefix.lower(), interface_short, 1)
-        return interface_
-
-    # try to find the item for an interface
-    def _match_entry_with_item(_entry: Mapping[str, str], services: Sequence[str]) -> str | None:
-        values = [
-            _entry.get('name'.strip()),
-            _entry.get('description'.strip()),
-            _entry.get('alias').strip()
-        ]
-        for value in values:
-            if value in services:
-                return value
-
-        index = str(_entry.get('index'))
-
-        # try alias+index
-        alias_index = str(_entry.get('alias')).strip() + ' ' + index
-        if alias_index in services:
-            LOGGER.info(f'match found by alias-index|{_entry}|{alias_index}|')
-            return alias_index
-
-        # try descrption+index
-        description_index = str(_entry.get('description')).strip() + ' ' + index
-        if description_index in services:
-            LOGGER.info(f'match found by alias-index|{_entry}|{description_index}|')
-            return description_index
-
-        # for index try with padding
-        pad_services: List[str] = [x for x in services if x.isdigit()]
-        if pad_services:
-            max_pad = len(max(pad_services, key=len)) + 1
-            min_pad = len(min(pad_services, key=len))
-            for i in range(min_pad, max_pad):
-                index_padded = f'{index:0>{i}}'
-                if index_padded in pad_services:
-                    return index_padded
-                # still not found try values + index
-                for value in values:
-                    if f'{value} {index_padded}' in services:
-                        return f'{value} {index_padded}'
-
-        LOGGER.warning(f'no match found |{_entry}|{services}|')
-        return None
-
-    # empty host/neighbour should never happen here
-    if not host:
-        LOGGER.warning(f'no host name |{host}|')
-        return None
-
-    # get dict of interfaces with the item as key
-    interface_data: Mapping[str, Mapping[str, object]] = host_cache.get_data(
-        host=host, item=CacheItems.interfaces, path=CACHE_INTERFACES_DATA
-    )
-    if not interface_data:
-        LOGGER.warning(f'got no interface data for: {host}')
-        return None
-
-    # try to find the interface in the host interface inventory list
-    inventory = host_cache.get_data(
-        host=host, item=CacheItems.inventory, path=PATH_INTERFACES
-    )
-    if not inventory:
-        LOGGER.warning(f'no interface inventory for host: {host}')
-        return None
-
-    interface_items: Sequence[str] = list(interface_data.keys())
-
-    # the easy case
-    if interface in interface_items:
-        return interface
-
-    for _entry in inventory:
-        if interface in [
-            _entry.get('name'),
-            _entry.get('description'),
-            _entry.get('alias'),
-            str(_entry.get('index')),
-            _entry.get('phys_address'),
-        ]:
-            return _match_entry_with_item(_entry, interface_items)
-        elif f'1:{interface}' == _entry.get('name'):  # Extreme non stack:
-            return _match_entry_with_item(_entry, interface_items)
-        elif _entry.get('name') is not None and _get_short_if_name(
-                _entry.get('name')) == str(interface).lower():  # Cisco NXOS
-            return _match_entry_with_item(_entry, interface_items)
-
-    LOGGER.warning(msg=f'Device: {host}: service for interface |{interface}| not found')
-
-
 def add_tooltip_html(
         metadata: Dict,
         type_: str,
@@ -1004,61 +1017,61 @@ def add_tooltip_html(
     if metadata['tooltip'].get('html') is None:
         css_style = (
             '<style>'
-                'div.mismatch {'
-                    'background-color: rgba(70,70,70,.1);'
-                    'border-radius: 5px;'
-                    'padding: 5px;'
-                '}'
-                'p.mismatch {'
-                    'text-align: center;'
-                '}'
-                'table.mismatch {'
-                    'text-align: left;'
-                    'border-radius: 5px;'
-                '}'
-                'tr.mismatch {'
-                '}'
-                'tr.mismatch:nth-child(even) {'
-                    'background-color: rgba(100,100,100,.3);'
-
-                '}'
-                'tr.mismatch:nth-child(odd) {'
-                    'background-color: rgba(100,100,100,.2);'
-
-                '}'
-                'th.mismatch {'
-                    'background-color: rgba(120,120,120,.3);'
-                    'padding: 5px;'   # inside the element
-                    # 'margin: 5px;'  # outside of the lement
-                '}'
-                'td.mismatch {'
-                    'padding: 5px;'
-                '}'
+            'div.mismatch {'
+            'background-color: rgba(70,70,70,.1);'
+            'border-radius: 5px;'
+            'padding: 5px;'
+            '}'
+            'p.mismatch {'
+            'text-align: center;'
+            '}'
+            'table.mismatch {'
+            'text-align: left;'
+            'border-radius: 5px;'
+            '}'
+            'tr.mismatch {'
+            '}'
+            'tr.mismatch:nth-child(even) {'
+            'background-color: rgba(100,100,100,.3);'
+
+            '}'
+            'tr.mismatch:nth-child(odd) {'
+            'background-color: rgba(100,100,100,.2);'
+
+            '}'
+            'th.mismatch {'
+            'background-color: rgba(120,120,120,.3);'
+            'padding: 5px;'  # inside the element
+            # 'margin: 5px;'  # outside of the lement
+            '}'
+            'td.mismatch {'
+            'padding: 5px;'
+            '}'
             '</style>'
         )
         header = (
             '<thead>'
-                '<tr>'
-                    f'<th {css_class}>Node</th>'
-                    f'<th {css_class}>{left}</th>'
-                    f'<th {css_class}>{right}</th>'
-                '</tr>'
+            '<tr>'
+            f'<th {css_class}>Node</th>'
+            f'<th {css_class}>{left}</th>'
+            f'<th {css_class}>{right}</th>'
+            '</tr>'
             '</thead>'
         )
         metadata['tooltip']['html'] = (
             f'{css_style}'
             f'<div {css_class}>'
-                f'<p {css_class}>WARNING: Mismatch found!</p>'
-                f'<table {css_class}>'
-                    f'{header}'
-                    '<tbody>'
+            f'<p {css_class}>WARNING: Mismatch found!</p>'
+            f'<table {css_class}>'
+            f'{header}'
+            '<tbody>'
         )
 
     metadata['tooltip']['html'] += (
         f'<tr {css_class}>'
-            f'<td {css_class}>{type_}</td>'
-            f'<td {css_class}>{left_value}</td>'
-            f'<td {css_class}>{right_value}</td>'
+        f'<td {css_class}>{type_}</td>'
+        f'<td {css_class}>{left_value}</td>'
+        f'<td {css_class}>{right_value}</td>'
         '</tr>'
     )
 
@@ -1066,45 +1079,6 @@ def add_tooltip_html(
 
 
 def close_tooltip_html(metadata: Dict) -> Dict:
-    metadata['tooltip']['html'] += '</table></div>'
+    if metadata.get('tooltip', {}).get('html'):
+        metadata['tooltip']['html'] += '</table></div>'
     return metadata
-
-
-def get_network_summary(raw_ip_address: str, summarize: Sequence[ip_network]) -> str | None:
-    for network in summarize:
-        try:
-            if ip_network(raw_ip_address).subnet_of(network):
-                return network.compressed
-        except TypeError:
-            pass
-    return None
-
-
-def is_ignore_ip(raw_ip_address: str, ignore_ips: Sequence[ip_network]) -> bool:
-    for ip in ignore_ips:
-        try:
-            if ip_network(raw_ip_address).subnet_of(ip):
-                LOGGER.info(f'IP address {raw_ip_address} is in ignore list -> ({ip})')
-                return True
-        except TypeError:
-            continue
-    return False
-
-
-def is_ignore_wildcard(raw_ip_address: str, ignore_wildcard: Sequence[Wildcard]) -> bool:
-    int_ip_address = int(ip_address(raw_ip_address))
-    for wildcard in ignore_wildcard:
-        if int_ip_address & wildcard.int_wildcard == wildcard.bit_pattern:
-            LOGGER.info(
-                f'IP address {raw_ip_address} matches ignore wildcard '
-                f'list ({wildcard.ip_address}/{wildcard.wildcard})'
-            )
-            return True
-    return False
-
-
-def get_list_of_devices(data: Mapping) -> List[str]:
-    devices: List[str] = []
-    for connection in data.values():
-        devices.append(connection[0])
-    return list(set(devices))
diff --git a/source/bin/nvdct/lib/utils.py b/source/bin/nvdct/lib/utils.py
index c5049225044bfd0d16c151777508ffcec9ff324f..9d1514931eba64b82f260820bb880e328fe33a8b 100755
--- a/source/bin/nvdct/lib/utils.py
+++ b/source/bin/nvdct/lib/utils.py
@@ -8,45 +8,34 @@
 # File  : nvdct/lib/utils.py
 
 from ast import literal_eval
-from collections.abc import Mapping, Sequence
-from dataclasses import dataclass
-from json import dumps
+from collections.abc import Mapping, MutableSequence, Sequence
+from json import dumps, loads
 from logging import disable as log_off, Formatter, getLogger, StreamHandler
 from logging.handlers import RotatingFileHandler
 from pathlib import Path
-from re import match as re_match
+from re import match as re_match, findall as re_findall, sub as re_sub
 from socket import socket, AF_UNIX, AF_INET, SOCK_STREAM, SHUT_WR
 from sys import stdout, exit as sys_exit
 from time import time as now_time
 from tomllib import loads as toml_loads, TOMLDecodeError
-from typing import List, Dict, TextIO
+from typing import Dict, List, TextIO
 
 from lib.constants import (
+    Backends,
     CMK_SITE_CONF,
+    Case,
     DATAPATH,
+    EmblemValues,
+    EmblemNames,
     ExitCodes,
     LOGGER,
+    LogLevels,
     OMD_ROOT,
+    TomlSections,
+    TomlSettings,
 )
 
 
-@dataclass(frozen=True)
-class IpInfo:
-    address: str
-    device: str
-    broadcast: str
-    cidr: int
-    netmask: str
-    network: str
-    type: str
-    scope_id: str | None
-
-@dataclass(frozen=True)
-class InventoryColumns:
-    neighbour: str
-    local_port: str
-    neighbour_port: str
-
 def get_local_cmk_version() -> str:
     return Path(f'{OMD_ROOT}/version').readlink().name
 
@@ -64,7 +53,7 @@ def get_data_form_live_status(query: str) -> Dict | List | None:
     sock.connect(address)
     sock.sendall(query.encode())
     sock.shutdown(SHUT_WR)
-    chunks: List = []
+    chunks: MutableSequence = []
     while len(chunks) == 0 or chunks[-1] != "":
         chunks.append(sock.recv(4096).decode())
     sock.close()
@@ -112,13 +101,14 @@ def remove_old_data(keep: int, min_age: int, raw_path: str, protected: Sequence[
     path: Path = Path(raw_path)
     default_topo = path.joinpath('default')
     directories = [str(directory) for directory in list(path.iterdir())]
-    # keep default top
+
+    # keep default topo
     if str(default_topo) in directories:
         directories.remove(str(default_topo))
         keep -= 1
         if default_topo.is_symlink():
             try:
-                directories.remove(str(default_topo.readlink()))
+                directories.remove(str(path.joinpath(str(default_topo.readlink()))))
             except ValueError:
                 pass
 
@@ -140,80 +130,56 @@ def remove_old_data(keep: int, min_age: int, raw_path: str, protected: Sequence[
         if Path(directory).is_dir():
             topo_by_age[Path(directory).stat().st_ctime] = directory
 
-    topo_age = list(topo_by_age.keys())
-    topo_age.sort()
+    topo_age: List = list(topo_by_age.keys())
+    topo_age.sort(reverse=True)
 
     while len(topo_by_age) > keep:
-        if min_age * 86400 > now_time() - topo_age[0]:
+        entry = topo_age.pop()
+        if min_age * 86400 > now_time() - entry:
             LOGGER.info(
-                msg=f'Topology "{Path(topo_by_age[topo_age[0]]).name}'
+                msg=f'Topology "{Path(topo_by_age[entry]).name}'
                     f'" not older then {min_age} day(s). not deleted.'
             )
             return
-        LOGGER.info(f'delete old topology: {topo_by_age[topo_age[0]]}')
-        rm_tree(Path(topo_by_age[topo_age[0]]))
-        topo_by_age.pop(topo_age[0])
-        topo_age.pop(0)
+        LOGGER.info(f'delete old topology: {topo_by_age[entry]}')
+        rm_tree(Path(topo_by_age[entry]))
+        topo_by_age.pop(entry)
 
 
-def save_data_to_file(data: Mapping, path: str, file: str, make_default: bool) -> None:
+def save_data_to_file(
+        data: Mapping,
+        path: str,
+        file: str,
+        make_default: bool,
+        dont_compare: bool,
+) -> None:
     """
     Save the data as json file.
     Args:
         data: the topology data
-        path: the path were to save the dat
-        file: the file name to save  data in
+        path: the path inder DATATAPATH
+        file: the file name to save the data in
         make_default: if True, create the symlink "default" with path as target
-
+        dont_compare: if True, data will not be compared with default data
     Returns:
         None
     """
+    if not Path(f'{DATAPATH}/default').exists():
+        make_default = True
+    elif Path(f'{DATAPATH}/default/{file}').exists() and not dont_compare:
+        if is_equal_with_default(data, f'{DATAPATH}/default/{file}'):
+            LOGGER.warning(f'Data identical to default. Not saved! Use "--dont-compare".')
+            return
 
-    path_file = f'{path}/{file}'
+    path_file = f'{DATAPATH}/{path}/{file}'
     save_file = Path(f'{path_file}')
     save_file.parent.mkdir(exist_ok=True, parents=True)
     save_file.write_text(dumps(data))
 
-    parent_path = Path(f'{path}').parent
-    if not Path(f'{parent_path}/default').exists():
-        make_default = True
-    if make_default:
-        Path(f'{parent_path}/default').unlink(missing_ok=True)
-        Path(f'{parent_path}/default').symlink_to(target=Path(path), target_is_directory=True)
-
 
-# CMK version 2.2.x format
-def save_topology(
-        data: dict,
-        base_directory: str,
-        output_directory: str,
-        dont_compare: bool,
-        make_default: bool,
-        topology_file_name: str,
-) -> None:
-    path = f'{base_directory}/{output_directory}'
-
-    def _save():
-        save_data_to_file(
-            data=data,
-            path=path,
-            file=topology_file_name,
-            make_default=make_default,
-        )
-
-    if dont_compare:
-        _save()
-    else:
-        if not is_equal_with_default(
-                data=data,
-                file=f'{base_directory}/default/{topology_file_name}'
-        ):
-            _save()
-        else:
-            LOGGER.warning(
-                msg='Topology matches default topology, not saved! Use'
-                    '"--dont-compare" to save identical topologies.'
-            )
+    if make_default:
+        Path(f'{DATAPATH}/default').unlink(missing_ok=True)
+        Path(f'{DATAPATH}/default').symlink_to(target=Path(path), target_is_directory=True)
 
 
 def is_mac_address(mac_address: str) -> bool:
@@ -249,8 +215,11 @@ def is_list_of_str_equal(list1: List[str], list2: List[str]) -> bool:
     """
     tmp_list1 = list1.copy()
     tmp_list2 = list2.copy()
-    tmp_list1.sort()
-    tmp_list2.sort()
+    try:
+        tmp_list1.sort()
+        tmp_list2.sort()
+    except TypeError:  # list of dict cant be sorted (?)
+        pass
     return tmp_list1 == tmp_list2
 
 
@@ -262,6 +231,7 @@ def is_valid_hostname(host: str) -> bool:
         LOGGER.error(f'Invalid hostname found: {host}')
         return False
 
+
 def is_valid_site_name(site: str) -> bool:
     re_host_pattern = r'^[0-9a-z-A-Z\.\-\_]{1,16}$'
     if re_match(re_host_pattern, site):
@@ -270,6 +240,7 @@ def is_valid_site_name(site: str) -> bool:
         LOGGER.error(f'Invalid site name found: {site}')
         return False
 
+
 def is_valid_customer_name(customer: str) -> bool:
     re_host_pattern = r'^[0-9a-z-A-Z\.\-\_]{1,16}$'
     if re_match(re_host_pattern, customer):
@@ -288,6 +259,7 @@ def is_valid_output_directory(directory: str) -> bool:
         LOGGER.error(f'Invalid output directory name found: {directory}')
         return False
 
+
 def is_valid_log_file(log_file: str) -> bool:
     if not log_file.startswith(f'{OMD_ROOT}/var/log/'):
         LOGGER.error(f'Logg file needs to be under "{OMD_ROOT}/var/log/"! Got {Path(log_file).absolute()}')
@@ -295,32 +267,6 @@ def is_valid_log_file(log_file: str) -> bool:
     return True
 
 
-# not used in cmk 2.3.x format
-def merge_topologies(topo_pri: Dict, topo_sec: Dict) -> Dict:
-    """
-    Merge dict_prim into dict_sec
-    Args:
-        topo_pri: data of dict_pri will overwrite the data in dict_sec
-        topo_sec: dict where the data of dict_pri will be merged to
-
-    Returns:
-        Dict: topo_sec that contains merged data from top_sec and top_pri
-    """
-    keys_pri = list(topo_pri.keys())
-
-    # first transfer all completely missing items from dict_prim to dict_sec
-    for key in keys_pri:
-        if key not in topo_sec.keys():
-            topo_sec[key] = topo_pri[key]
-        else:
-            topo_sec[key]['connections'].update(topo_pri[key].get('connections', {}))
-            topo_sec[key]['interfaces'] = list(
-                set((topo_sec[key]['interfaces'] + topo_pri[key].get('interfaces', [])))
-            )
-        topo_pri.pop(key)
-    return topo_sec
-
-
 def compare_dicts(dict1: Mapping, dict2: Mapping) -> bool:
     # check top level keys
     if not is_list_of_str_equal(list(dict1.keys()), list(dict2.keys())):
@@ -329,23 +275,35 @@ def compare_dicts(dict1: Mapping, dict2: Mapping) -> bool:
         LOGGER.debug(f'dict1: {list(dict2.keys())}')
         return False
 
+    LOGGER.debug('Top level matches')
     for key, value in dict1.items():
-        _type = type(value)
-        if _type == dict:
+        type_ = type(value)
+        if type_ == dict:
+            LOGGER.debug(f'compare dict: {key}')
             if not compare_dicts(value, dict2[key]):
                 return False
-        elif _type == list:
+        elif type_ == list:
             if not is_list_of_str_equal(value, dict2[key]):
                 LOGGER.debug(f'list1: {value}')
                 LOGGER.debug(f'list2: {dict2[key]}')
                 return False
-        elif _type == str:
+        elif type_ in [str, int]:
+            if not value == dict2[key]:
+                LOGGER.debug('value dont match')
+                LOGGER.debug(f'value1: {value}')
+                LOGGER.debug(f'value2: {dict2[key]}')
+                return False
+        elif value is None:
             if not value == dict2[key]:
                 LOGGER.debug('value dont match')
                 LOGGER.debug(f'value1: {value}')
-                LOGGER.debug(f'value2 {dict2[key]}')
+                LOGGER.debug(f'value2: {dict2[key]}')
                 return False
         else:
+            LOGGER.debug(f'Compare unknown type {type_}')
+            LOGGER.debug(f'key: {key}')
+            LOGGER.debug(f'value1: {value}')
+            LOGGER.debug(f'value2: {dict2[key]}')
             return False
 
     return True
@@ -354,10 +312,26 @@ def compare_dicts(dict1: Mapping, dict2: Mapping) -> bool:
 def is_equal_with_default(data: Mapping, file: str) -> bool:
     default_file = Path(file)
     if default_file.exists():
-        default_data = literal_eval(default_file.read_text())
+        LOGGER.info(f'compare data with {file}')
+        default_data = loads(default_file.read_text())
         return compare_dicts(data, default_data)
     return False
 
+def get_attributes_from_inventory(inventory: Dict[str, object], raw_path: str):
+    # print(inventory['Nodes']['networking']['Nodes']['lldp_cache']['Attributes']['Pairs'])
+    path: List[str] = ('Nodes,' + ',Nodes,'.join(raw_path.split(',')) + ',Attributes,Pairs').split(',')
+    try:
+        table = inventory.copy()
+    except AttributeError:
+        return None
+    for m in path:
+        try:
+            table = table[m]
+        except KeyError:
+            LOGGER.info(msg=f'Inventory attributes for {path} not found')
+            return None
+    return dict(table)
+
 
 def get_table_from_inventory(inventory: Dict[str, object], raw_path: str) -> List | None:
     path: List[str] = ('Nodes,' + ',Nodes,'.join(raw_path.split(',')) + ',Table,Rows').split(',')
@@ -422,3 +396,66 @@ class StdoutQuiet:
 
     def flush(self):
         self._org_stdout.flush()
+
+
+def adjust_toml(toml_file: str):
+    fix_options = {
+        'DROP_HOSTS': TomlSections.L2_DROP_NEIGHBOURS,
+        'HOST_MAP': TomlSections.L2_HOST_MAP,
+        'L2_DROP_HOSTS': TomlSections.L2_DROP_NEIGHBOURS,  # needs to be before DROP_HOST
+        'L3V4_IGNORE_HOSTS': TomlSections.L3_IGNORE_HOSTS,
+        'L3V4_IGNORE_IP': TomlSections.L3_IGNORE_IP,
+        'L3V4_IRNORE_WILDCARD': TomlSections.L3V4_IGNORE_WILDCARD,
+        'L3V4_REPLACE': TomlSections.L3_REPLACE,
+        'L3V3_REPLACE': TomlSections.L3_REPLACE,
+        'L3V4_SUMMARIZE': TomlSections.L3_SUMMARIZE,
+        'SEED_DEVICES': TomlSections.L2_SEED_DEVICES,
+        'icon_missinc': EmblemValues.ICON_ALERT_UNREACHABLE,
+        'icon_missing': EmblemValues.ICON_ALERT_UNREACHABLE,
+        'l3v4_replace': EmblemNames.L3_REPLACE,
+        'l3v4_summarize': EmblemNames.L3_SUMMARIZE,
+        'keep_domain = true': f'{TomlSettings.REMOVE_DOMAIN} = false',
+        'keep_domain = false': f'{TomlSettings.REMOVE_DOMAIN} = true',
+    }
+    old_options = {
+        'lowercase': f'{TomlSettings.CASE} = {Case.LOWER}',
+        'uppercase': f'{TomlSettings.CASE} = {Case.UPPER}',
+        f'FILESYSTEM': {Backends.MULTISITE},
+        'debug': f'{TomlSettings.LOG_LEVEL} = {LogLevels.DEBUG}',
+        'keep_domain': f'{TomlSettings.REMOVE_DOMAIN} = true/false'
+    }
+    changed: bool = False
+    org_file = Path(toml_file)
+    if org_file.exists():
+        print(f'Checking file.: {org_file.name}')
+        org_content: str = org_file.read_text()
+        content: str = org_content
+        for old, new in fix_options.items():
+            re_pattern = f'\\b{old}\\b'
+            count = len(re_findall(re_pattern, org_content))
+            if count > 0:
+                changed = True
+                content = re_sub(re_pattern, new, content)
+                print(f'Found value...: "{old}" {count} times, replaced by "{new}"')
+
+        for old, new in old_options.items():
+            re_pattern = f'\\b{old}\\b'
+            count = len(re_findall(re_pattern, org_content))
+            if count > 0:
+                print(f'Obsolete......: "{old}", use "{new}" instead')
+
+        if changed:
+            backup_file = Path(f'{toml_file}.backup')
+            if not backup_file.exists():
+                org_file.rename(backup_file)
+                print(f'Renamed TOML..: {backup_file.name}')
+                new_file = Path(toml_file)
+                new_file.open('w').write(content)
+                print(f'Written fixed.: {new_file.name}')
+            else:
+                print(
+                    f'Can not create backup file {backup_file.name}, file exists. Aborting!\n'
+                    f'Nothing has changed.'
+                )
+        else:
+            print('Finished......: Nothing found to fix.')
diff --git a/source/bin/nvdct/nvdct.py b/source/bin/nvdct/nvdct.py
index ffd846826268393d412350be70890c5955070d1f..8d93b27f1fab784df07622d786bfaf99cf23915d 100755
--- a/source/bin/nvdct/nvdct.py
+++ b/source/bin/nvdct/nvdct.py
@@ -157,7 +157,23 @@
 #                 [EMBLEMS]
 #                 l3v4_replace         -> l3_replace
 #                 l3v4_summarize       -> l3_summarize
+# 2024-12-25: fixed "--dont-compare", data will only be saved if the are different from the default
+#             changed: is seed devices is not configured use all CDP/LLDP devices (by host label)
+# 2024-12-26: INCOMPATIBLE: renamed L2_DROP_HOSTS -> L2_DROP_NEIGHBOURS
+#             added option --display-l2-neighbours
+# 2024-12-27: added options
+#               --adjust-toml
+#               --include-l3-loopback
+#               --skip-l3-cidr-0
+#               --skip-l3-cidr-32-128
+#               --skip-l3-public
+#             fixed: keep default topology
+#             fixed: cleanup -> remove the oldest topologies not the newest
+#             INCOMPATIBLE: removed: CUSTOM_LAYERS
+#             refactoring constants
+#
 
+#
 # creating topology data json from inventory data
 #
 # This script creates the topology data file needed for the Checkmk "network_visualization" plugin
@@ -254,7 +270,10 @@ __data = {
 """
 
 import sys
+
+from collections.abc import MutableSequence
 from time import strftime, time_ns
+
 from typing import List
 
 from lib.args import parse_arguments
@@ -265,14 +284,17 @@ from lib.backends import (
     HostCacheRestApi,
 )
 from lib.constants import (
+    Backends,
     DATAPATH,
-    HOME_URL,
+    URLs,
+    HostLabels,
     IPVersion,
-    LABEL_L3v4,
-    LAYERS,
+    InvPaths,
     LOGGER,
-    Layer,
+    Layers,
     NVDCT_VERSION,
+    TomlSections,
+    TomlSettings,
 )
 from lib.settings import Settings
 from lib.topologies import (
@@ -282,8 +304,8 @@ from lib.topologies import (
 )
 from lib.utils import (
     ExitCodes,
-    InventoryColumns,
     StdoutQuiet,
+    adjust_toml,
     configure_logger,
     remove_old_data,
 )
@@ -307,20 +329,29 @@ def main():
     print(
         f'Network Visualisation Data Creation Tool (NVDCT)\n'
         f'by thl-cmk[at]outlook[dot]com, version {NVDCT_VERSION}\n'
-        f'see {HOME_URL}'
+        f'see {URLs.NVDCT}'
     )
     print('')
     print(f'Start time....: {strftime(settings.time_format)}')
 
+    if settings.fix_toml:
+        adjust_toml(settings.user_data_file)
+        print(f'Time taken....: {(time_ns() - start_time) / 1e9}/s')
+        print(f'End time......: {strftime(settings.time_format)}')
+        print('')
+
+        LOGGER.critical('Data creation finished')
+        sys.exit()
+
     match settings.backend:
-        case 'RESTAPI':
+        case Backends.RESTAPI:
             host_cache: HostCache = HostCacheRestApi(
                 pre_fetch=settings.pre_fetch,
                 api_port=settings.api_port,
                 filter_sites=settings.filter_sites,
-                sites=settings.sites
+                sites=settings.sites,
             )
-        case 'MULTISITE':
+        case Backends.MULTISITE:
             host_cache: HostCache = HostCacheMultiSite(
                 pre_fetch=settings.pre_fetch,
                 filter_sites=settings.filter_sites,
@@ -328,42 +359,47 @@ def main():
                 filter_customers=settings.filter_customers,
                 customers=settings.customers,
             )
-        case 'LIVESTATUS':
+        case Backends.LIVESTATUS:
             host_cache: HostCache = HostCacheLiveStatus(
                 pre_fetch=settings.pre_fetch,
             )
         case _:
-            LOGGER.error(msg=f'Backend {settings.backend} not (yet) implemented')
+            LOGGER.error(msg=f'Backend {settings.backend} not implemented')
             host_cache: HostCache | None = None  # to keep linter happy
             sys.exit(ExitCodes.BACKEND_NOT_IMPLEMENTED)
 
-    jobs: List[Layer] = []
+    host_cache.init_neighbour_to_host(
+        case=settings.case,
+        l2_host_map=settings.l2_host_map,
+        prefix=settings.prefix,
+        remove_domain=settings.remove_domain,
+    )
+
+    jobs: MutableSequence = []
     pre_fetch_layers: List[str] = []
     pre_fetch_host_list: List[str] = []
 
     for layer in settings.layers:
         match layer:
-            case 'STATIC':
+            case Layers.STATIC:
+                jobs.append(layer)
+            case Layers.L3V4:
                 jobs.append(layer)
-            case 'L3v4':
+                host_cache.add_inventory_path(path=InvPaths.L3)
+                pre_fetch_layers.append(HostLabels.L3V4_ROUTER)
+            case Layers.CDP | Layers.LLDP:
                 jobs.append(layer)
-                host_cache.add_inventory_path(path=LAYERS[layer].path)
-                pre_fetch_layers.append(LAYERS[layer].host_label)
-            case 'CUSTOM':
-                for entry in settings.custom_layers:
-                    jobs.append(entry)
-                    host_cache.add_inventory_path(entry.path)
-            case 'CDP' | 'LLDP':
-                jobs.append(LAYERS[layer])
-                host_cache.add_inventory_path(path=LAYERS[layer].path)
-                pre_fetch_layers.append(LAYERS[layer].host_label)
+                host_cache.add_inventory_path(InvPaths.CDP if layer == Layers.CDP else InvPaths.LLDP)
+                pre_fetch_layers.append(HostLabels.CDP if layer == Layers.CDP else HostLabels.LLDP)
             case _:
                 LOGGER.warning(f'Unknown layer {layer} dropped.')
                 continue
 
     if not jobs:
-        message = ('No layer to work on. Please configura at least one layer (i.e. CLI option "-l CDP")\n'
-                   'See ~/local/bin/nvdct/conf/nvdct.toml -> SETTINGS -> layers')
+        message = (
+            f'No layer to work on. Please configura at least one layer (i.e. CLI option "-l {Layers.CDP}")\n'
+            f'See {settings.user_data_file} -> {TomlSections.SETTINGS} -> {TomlSettings.LAYERS}'
+        )
         LOGGER.warning(message)
         print(message)
         sys.exit(ExitCodes.NO_LAYER_CONFIGURED)
@@ -371,8 +407,8 @@ def main():
     if settings.pre_fetch:
         LOGGER.info('Pre fill cache...')
         for host_label in pre_fetch_layers:
-            if _host_list := host_cache.get_hosts_by_label(host_label):
-                pre_fetch_host_list = list(set(pre_fetch_host_list + _host_list))
+            if host_list := host_cache.get_hosts_by_label(host_label):
+                pre_fetch_host_list = list(set(pre_fetch_host_list + list(host_list)))
         LOGGER.info(f'Fetching data for {len(pre_fetch_host_list)} hosts start')
         print(f'Prefetch start: {strftime(settings.time_format)}')
         print(f'Prefetch hosts: {len(pre_fetch_host_list)} of {len(host_cache.cache)}')
@@ -382,16 +418,14 @@ def main():
 
     for job in jobs:
         match job:
-            case 'STATIC':
+            case Layers.STATIC:
                 label = job
                 topology = TopologyStatic(
                     connections=settings.static_connections,
                     emblems=settings.emblems,
                     host_cache=host_cache,
                 )
-                topology.create()
-
-            case 'L3v4':
+            case Layers.L3V4:
                 label = job
                 topology = TopologyL3(
                     emblems=settings.emblems,
@@ -401,37 +435,42 @@ def main():
                     ignore_wildcard=settings.l3v4_ignore_wildcard,
                     include_hosts=settings.include_l3_hosts,
                     replace=settings.l3_replace,
+                    skip_cidr_0=settings.skip_l3_cidr_0,
+                    skip_cidr_32_128=settings.skip_l3_cidr_32_128,
                     skip_if=settings.skip_l3_if,
                     skip_ip=settings.skip_l3_ip,
+                    skip_public=settings.skip_l3_public,
+                    include_loopback=settings.include_l3_loopback,
                     summarize=settings.l3_summarize,
-                    version=IPVersion.IPv4 if job == LABEL_L3v4 else IPVersion.IPv6
+                    version=IPVersion.IPv4 if job == Layers.L3V4 else IPVersion.IPv6
                 )
-                topology.create()
-
-            case _:
-                label = job.label.upper()
-                columns = job.columns.split(',')
+            case Layers.CDP | Layers.LLDP:
+                label = job
+                if job == Layers.CDP:
+                    host_label = HostLabels.CDP
+                    inv_path = InvPaths.CDP
+                else:
+                    host_label = HostLabels.LLDP
+                    inv_path = InvPaths.LLDP
+                if not (seed_devices := settings.l2_seed_devices):
+                    seed_devices = host_cache.get_hosts_by_label(host_label)
                 topology = TopologyL2(
-                    case=settings.case,
                     emblems=settings.emblems,
                     host_cache=host_cache,
-                    inv_columns=InventoryColumns(
-                        neighbour=columns[0],
-                        local_port=columns[1],
-                        neighbour_port=columns[2]
-                    ),
-                    l2_drop_hosts=settings.l2_drop_hosts,
-                    l2_host_map=settings.l2_host_map,
+                    l2_drop_neighbours=settings.l2_drop_neighbours,
                     l2_neighbour_replace_regex=settings.l2_neighbour_replace_regex,
                     label=label,
-                    path_in_inventory=job.path,
-                    prefix=settings.prefix,
-                    remove_domain=settings.remove_domain,
-                    seed_devices=settings.l2_seed_devices,
+                    path_in_inventory=inv_path,
+                    seed_devices=seed_devices,
+                    display_l2_neighbours=settings.display_l2_neighbours,
                 )
-                topology.create()
-
+            case _:
+                LOGGER.warning(f'Unknown layer {job}, ignoring.')
+                continue
+        pre_message = f'Layer {label:.<8s}: '
+        print(pre_message, end ='', flush=True)
 
+        topology.create()
         topology.nv_connections.add_meta_data_to_connections(
             nv_objects=topology.nv_objects,
             speed_map=settings.map_speed_to_thickness,
@@ -440,14 +479,15 @@ def main():
         topology.save(
             label=label,
             output_directory=settings.output_directory,
-            make_default=settings.default
+            make_default=settings.default,
+            dont_compare=settings.dont_compare,
         )
 
         message = (
-            f'Layer {label:.<8s}: Devices/Objects/Connections added {topology.nv_objects.host_count}/'
+            f'Devices/Objects/Connections added {topology.nv_objects.host_count}/'
             f'{len(topology.nv_objects.nv_objects)}/{len(topology.nv_connections.nv_connections)}'
         )
-        LOGGER.info(msg=message)
+        LOGGER.info(msg=f'{pre_message} {message}')
         print(message)
 
     if settings.keep:
diff --git a/source/packages/nvdct b/source/packages/nvdct
index 329c316c4d67f1236001a95535f01b1a214b0213..32ad046946260c2eb190ea09a64f9b71d419de41 100644
--- a/source/packages/nvdct
+++ b/source/packages/nvdct
@@ -47,7 +47,7 @@
                    'htdocs/images/icons/location_80.png']},
  'name': 'nvdct',
  'title': 'Network Visualization Data Creation Tool (NVDCT)',
- 'version': '0.9.6-20241222',
+ 'version': '0.9.7-20241230',
  'version.min_required': '2.3.0b1',
  'version.packaged': 'cmk-mkp-tool 0.2.0',
  'version.usable_until': '2.4.0p1'}