def address(self): if not self.__read: from initat.icsw.service.instance import InstanceXML from initat.cluster.backbone.server_enums import icswServiceEnum _xml = InstanceXML(quiet=True) _port = _xml.get_port_dict(icswServiceEnum.memcached, command=True) self.__address = ["127.0.0.1:{:d}".format(_port)] self.__read = True return self.__address
def _get_parser(): _xml = InstanceXML(quiet=True) parser = argparse.ArgumentParser("set a passive check command") parser.add_argument("-p", type=int, default=_xml.get_port_dict(icswServiceEnum.monitor_slave, command=True), dest="port", help="target port [%(default)d]") parser.add_argument("-H", type=str, default="localhost", dest="host", help="target host [%(default)s]") parser.add_argument("--device", type=str, default="", help="device [%(default)s]", required=True) parser.add_argument("--check", type=str, default="", help="name of check [%(default)s]", required=True) parser.add_argument("--state", type=str, default="OK", choices=["OK", "WARN", "CRITICAL"], help="check state [%(default)s]") parser.add_argument("--output", type=str, default="", help="check output [%(default)s]", required=True) parser.add_argument("-v", help="verbose mode [%(default)s]", default=False, dest="verbose", action="store_true") parser.add_argument("-t", type=int, default=10, dest="timeout", help="set timeout [%(default)d]") return parser
def _register_local_syncer(self): _inst_xml = InstanceXML(log_com=self.log) self.__local_syncer_uuid = "urn:uuid:{}:{}:".format( uuid_tools.get_uuid(), _inst_xml.get_uuid_postfix(icswServiceEnum.monitor_slave)) self.__local_syncer_addr = "tcp://127.0.0.1:{:d}".format( _inst_xml.get_port_dict(icswServiceEnum.monitor_slave, command=True)) self.log("connecting to local syncer {} (uuid={})".format( self.__local_syncer_addr, self.__local_syncer_uuid, )) self.main_socket.connect(self.__local_syncer_addr)
def process_init(self): global_config.enable_pm(self) self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], context=self.zmq_context) # db_tools.close_connection() self._instance = InstanceXML(log_com=self.log) self._init_network() self._init_capabilities() self.__last_user_scan = None self.__scan_running = False self.register_timer(self._update, 2 if global_config["DEBUG"] else 30, instant=True)
def SendCommandDefaults(**kwargs): from initat.icsw.service.instance import InstanceXML from initat.host_monitoring.client_enums import icswServiceEnum _def = argparse.Namespace( arguments=[], timeout=10, port=InstanceXML(quiet=True).get_port_dict( icswServiceEnum.host_monitoring, command=True), protocoll="tcp", host="localhost", verbose=False, identity_string="sc_default_{}_{:d}".format(os.uname()[1], os.getpid()), iterations=1, raw=False, kv=[], kva=[], kv_path="", split=False, only_send=False, quiet=True, ) for key, value in kwargs.items(): setattr(_def, key, value) return _def
def __init__(self): threading_tools.icswProcessPool.__init__( self, "main", ) self.CC.init(icswServiceEnum.package_server, global_config) self.CC.check_config() self.__pc_port = InstanceXML(quiet=True).get_port_dict( icswServiceEnum.package_client, command=True ) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.register_exception("hup_error", self._hup_error) db_tools.close_connection() self.CC.log_config() self.CC.re_insert_config() self.EC.init(global_config) self._init_clients() self._init_network_sockets() self.add_process(RepoProcess("repo"), start=True) # close DB connection db_tools.close_connection() # not needed, 0MQ is smart enough to keep the connections alive # self.reconnect_to_clients() self.send_to_process("repo", "rescan_repos")
def __init__(self, log_com, build_proc=None, s_check=None, host=None, build_cache=None, parent_check=None, **kwargs): self.__log_com = log_com self.__hm_port = InstanceXML(quiet=True).get_port_dict( icswServiceEnum.host_monitoring, command=True) for key in dir(SpecialBase.Meta): if not key.startswith("__") and not hasattr(self.Meta, key): setattr(self.Meta, key, getattr(SpecialBase.Meta, key)) _name = self.__class__.__name__ if _name.count("_"): _name = _name.split("_", 1)[1] elif _name.startswith("Special"): _name = _name[7:] self.Meta.name = _name # set a meaningfull, well-formatted database name self.Meta.database_name = inflection.underscore(self.Meta.name) self.ds_name = self.Meta.name # print "ds_name=", self.ds_name self.build_process = build_proc self.s_check = s_check self.parent_check = parent_check self.host = host self.build_cache = build_cache self.__hints_loaded = False # init with default self.__call_idx = 0
def local_main(): my_parser = argparse.ArgumentParser() my_parser.add_argument("-i", dest="IDENTITY_STRING", type=str, default="collclient", help="identity string [%(default)s]") my_parser.add_argument("--timeout", dest="TIMEOUT", default=10, type=int, help="set timeout [%(default)d]") my_parser.add_argument("-p", dest="COMMAND_PORT", default=InstanceXML(quiet=True).get_port_dict( "host-monitoring", command=True), type=int, help="set comport [%(default)d]") my_parser.add_argument("--host", dest="HOST", type=str, default="localhost", help="set target host [%(default)s]") my_parser.add_argument(dest="ARGUMENTS", nargs="+", help="additional arguments") sys.exit(main.main(options=my_parser.parse_args()))
def send_to_remote_server_ip(self, srv_addr, dev_uuid, srv_type_enum, send_obj): if self.__target_dict is None: from initat.icsw.service.instance import InstanceXML self.__target_dict = {} self.__strs_instance = InstanceXML(quiet=True) if srv_type_enum not in self.__target_dict: self.__target_dict[srv_type_enum] = RemoteServerAddressIP(self, srv_type_enum) _rsa = self.__target_dict[srv_type_enum] _rsa.check_for_address(self.__strs_instance, srv_addr, dev_uuid) return self.send_to_remote_server_int(_rsa, send_obj)
def _ensure_settings(self): from initat.icsw.service.instance import InstanceXML from initat.cluster.backbone.server_enums import icswServiceEnum if self._MC_SETTINGS is None: self._MC_SETTINGS = { "PORT": InstanceXML(quiet=True).get_port_dict( icswServiceEnum.memcached, command=True), "ADDRESS": "127.0.0.1", } self.log("set basic data")
def __init__(self, process): self.__process = process self.inst_xml = InstanceXML(self.log) self.__register_timer = False self.__build_in_progress, self.__build_version = (False, 0) # setup local master, always set (also on satellite nodes) self.__local_master = None # master config for distribution master (only set on distribution master) self.__master_config = None self.__process.register_timer(self._init_local_master, 60, first_timeout=2)
def parse_args(self, server_mode, arg_list=None): # set constants inst_xml = InstanceXML(quiet=True) self._populate_all(server_mode, inst_xml) # print(dir(self.sub_parser)) # print(self.sub_parser._get_subactions) if arg_list is None: opt_ns = self._parser.parse_args() else: opt_ns = self._parser.parse_args(args=arg_list) if not hasattr(opt_ns, "execute"): self._parser.print_help() sys.exit(0) return opt_ns
def init(r_process, backlog_size, timeout, verbose, force_resolve): ZMQDiscovery.relayer_process = r_process ZMQDiscovery.backlog_size = backlog_size ZMQDiscovery.timeout = timeout ZMQDiscovery.verbose = verbose ZMQDiscovery.force_resolve = force_resolve ZMQDiscovery.pending = {} # last discovery try ZMQDiscovery.last_try = {} ZMQDiscovery.__cur_maps = set() ZMQDiscovery.vanished = set() ZMQDiscovery.hm_port = InstanceXML(quiet=True).get_port_dict( "host-monitoring", command=True) ZMQDiscovery.reload_mapping()
def __init__(self, discovery_process): self.discovery_process = discovery_process # runs per device self.__device_planned_runs = {} # lut for external connection commands (0MQ) self.__ext_con_lut = {} # lut for external background commands self.__ext_bg_lut = {} self.log("init Dispatcher") # quasi-static constants self.__hm_port = InstanceXML(quiet=True).get_port_dict( "host-monitoring", command=True) self.schedule_items = []
def __init__(self): _inst = InstanceXML(quiet=True) all_instances = sum([ _inst.xpath(".//config-enums/config-enum/text()") for _inst in _inst.get_all_instances() ], []) all_perms = [ _perm.perm_name for _perm in csw_permission.objects.all() ] + ["$$CHECK_FOR_SUPERUSER"] _content = file( "{}/menu_relax.xml".format(os.path.join(settings.FILE_ROOT, "menu")), "r", ).read() _content = _content.replace( "<value>RIGHTSLIST</value>", "".join(["<value>{}</value>".format(_pn) for _pn in all_perms])).replace( "<value>SERVICETYPESLIST</value>", "".join([ "<value>{}</value>".format(_stn) for _stn in all_instances ])) # sys.exit(0) self.ng = etree.RelaxNG(etree.fromstring(_content, ))
def __init__(self, process): self.__process = process threading_tools.PollerBase.__init__(self) # set flag self.debug_zmq = False # list of all sockets self.__sock_list = [] # dict of send / recv sockets via server name self.__sock_lut = {} # dict id -> dc_action self.__pending_messages = {} self.__msg_id = 0 self.__msg_prefix = "ipc_com_{:d}".format(os.getpid()) self.__hm_port = InstanceXML(quiet=True).get_port_dict( icswServiceEnum.host_monitoring, command=True) self.__hbc_dict = {} self.log("init")
def _init_network(self): _v_conn_str = process_tools.get_zmq_ipc_name( "vector", s_name="collserver", connect_to_root_instance=True) vector_socket = self.zmq_context.socket(zmq.PUSH) vector_socket.setsockopt(zmq.LINGER, 0) vector_socket.connect(_v_conn_str) self.vector_socket = vector_socket c_port = InstanceXML(quiet=True).get_port_dict( icswServiceEnum.collectd_server, ptype="receive", ) _c_conn_str = "tcp://127.0.0.1:{:d}".format(c_port) collectd_socket = self.zmq_context.socket(zmq.PUSH) collectd_socket.setsockopt(zmq.LINGER, 0) collectd_socket.setsockopt(zmq.IMMEDIATE, 1) collectd_socket.connect(_c_conn_str) self.collectd_socket = collectd_socket
def main(): prog_name = global_config.name() if COLLCLIENT: prog_name = "collclient" global_config.add_config_entries([ ("DEBUG", configfile.bool_c_var(False, help_string="enable debug mode [%(default)s]", short_options="d", only_commandline=True)), ("VERBOSE", configfile.int_c_var(0, help_string="set verbose level [%(default)d]", short_options="v", only_commandline=True)), ]) if prog_name == "collclient": global_config.add_config_entries([ ("IDENTITY_STRING", configfile.str_c_var("collclient", help_string="identity string", short_options="i")), ("TIMEOUT", configfile.int_c_var(10, help_string="set timeout [%(default)d", only_commandline=True)), ("COMMAND_PORT", configfile.int_c_var( InstanceXML(quiet=True).get_port_dict("host-monitoring", command=True), info="listening Port", help_string="port to communicate [%(default)d]", short_options="p")), ("HOST", configfile.str_c_var("localhost", help_string="host to connect to")), ]) options = global_config.handle_commandline( description="{}, version is {}".format(prog_name, VERSION_STRING), positional_arguments=prog_name in ["collclient"], partial=prog_name in ["collclient"], ) ret_state = run_code(prog_name, global_config) return ret_state
def init(cls, r_process, backlog_size, timeout, verbose): ZMQMapping.init(cls) cls.db_map = MappingDB( os.path.join(r_process.state_directory, "mapping.sqlite"), r_process.log, ) cls.relayer_process = r_process cls.backlog_size = backlog_size cls.timeout = timeout cls.verbose = verbose # requests pending cls._pending = {} # last discovery try cls.last_try = {} cls.__cur_maps = set() cls.vanished = set() cls.hm_port = InstanceXML(quiet=True).get_port_dict("host-monitoring", command=True) cls.reload_mapping()
def __init__(self): threading_tools.process_pool.__init__(self, "main", zmq=True) self.register_exception("int_error", self._int_error) self.register_exception("term_error", self._int_error) self.CC.init(icswServiceEnum.config_server, global_config) self.CC.check_config() self.CC.read_config_from_db([ ("TFTP_DIR", configfile.str_c_var("/tftpboot")), ("MONITORING_PORT", configfile.int_c_var( InstanceXML(quiet=True).get_port_dict("host-monitoring", command=True))), ("LOCALHOST_IS_EXCLUSIVE", configfile.bool_c_var(True)), ("HOST_CACHE_TIME", configfile.int_c_var(10 * 60)), ("WRITE_REDHAT_HWADDR_ENTRY", configfile.bool_c_var(True)), ("ADD_NETDEVICE_LINKS", configfile.bool_c_var(False)), ]) global_config.add_config_entries([ ("CONFIG_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "config"))), ("IMAGE_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "images"))), ("KERNEL_DIR", configfile.str_c_var( os.path.join(global_config["TFTP_DIR"], "kernels"))), ]) self.__pid_name = global_config["PID_NAME"] # close DB connection (daemonize) db_tools.close_connection() self.CC.re_insert_config() self._log_config() self._init_subsys() self._init_network_sockets() self.add_process(build_process("build"), start=True) db_tools.close_connection() self.register_func("client_update", self._client_update) self.register_func("complex_result", self._complex_result) self.__run_idx = 0 self.__pending_commands = {}
def SendCommandDefaults(): from initat.icsw.service.instance import InstanceXML _def = argparse.Namespace( arguments=[], timeout=10, port=InstanceXML(quiet=True).get_port_dict("host-monitoring", command=True), protocoll="tcp", host="localhost", verbose=False, identity_string="sc_default_{}_{:d}".format(os.uname()[1], os.getpid()), iterations=1, raw=False, kv=[], kva=[], kv_path="", split=False, only_send=False, quiet=True, ) return _def
def __init__(self, log_com, build_proc=None, s_check=None, host=None, global_config=None, build_cache=None, parent_check=None, **kwargs): self.__log_com = log_com self.__hm_port = InstanceXML(quiet=True).get_port_dict( "host-monitoring", command=True) for key in dir(SpecialBase.Meta): if not key.startswith("__") and not hasattr(self.Meta, key): setattr(self.Meta, key, getattr(SpecialBase.Meta, key)) self.Meta.name = self.__class__.__name__.split("_", 1)[1] self.ds_name = self.Meta.name # print "ds_name=", self.ds_name self.build_process = build_proc self.s_check = s_check self.parent_check = parent_check self.host = host self.build_cache = build_cache
else: if DEBUG: print("No valid database found") # print("*", DATABASES) # build a cache key for accessing memcached ICSW_CACHE_KEY_LONG = _c_key.hexdigest() # short ICSW_CACHE_KEY ICSW_CACHE_KEY = ICSW_CACHE_KEY_LONG[:4] FILE_ROOT = os.path.normpath(os.path.dirname(__file__)) try: _mc_port = InstanceXML(quiet=True).get_port_dict( # need to use name here beacuse icswServiceEnums need settings.py "memcached", command=True) except KeyError: _mc_port = 0 CACHES = { "default": { "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", "LOCATION": "127.0.0.1:{:d}".format(_mc_port), } } TIME_ZONE = "Europe/Vienna" # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html
def handle(self, **options): print("creating fixtures...") # global settings # LogSource factories.LogSourceFactory(identifier="webfrontend", description="via Webfrontend", device=None) factories.LogSourceFactory(identifier="commandline", description="via CommandLine", device=None) # partition fs factories.PartitionFS(name="unknown", identifier="f", descr="Unknown Filesystem", need_hexid=False, hexid="") factories.PartitionFS(name="reiserfs", identifier="f", descr="ReiserFS Filesystem", hexid="83", kernel_module="reiserfs") factories.PartitionFS(name="ext2", identifier="f", descr="Extended 2 Fileystem", hexid="83", kernel_module="ext2") factories.PartitionFS(name="ext3", identifier="f", descr="Extended 3 Fileystem", hexid="83", kernel_module="ext3") factories.PartitionFS(name="ext4", identifier="f", descr="Extended 4 Fileystem", hexid="83", kernel_module="ext4") factories.PartitionFS(name="swap", identifier="s", descr="SwapSpace", hexid="82") factories.PartitionFS(name="ext", identifier="e", descr="Extended Partition", hexid="f") factories.PartitionFS(name="empty", identifier="d", descr="Empty Partition", hexid="0") factories.PartitionFS(name="lvm", identifier="l", descr="LVM Partition", hexid="8e", kernel_module="dm_map") factories.PartitionFS(name="xfs", identifier="f", descr="XFS Filesystem", hexid="83", kernel_module="xfs") factories.PartitionFS(name="btrfs", identifier="f", descr="BTRFS Filesystem", hexid="83", kernel_module="btrfs") factories.PartitionFS(name="ocfs2", identifier="f", descr="OCFS2 Filesystem", hexid="83", kernel_module="ocfs2") factories.PartitionFS(name="gpfs", identifier="f", descr="GPFS Filesystem", hexid="00", kernel_module="mmfs26", need_hexid=False) factories.PartitionFS(name="biosboot", identifier="e", descr="BIOS Boot Partition", hexid="ef02") factories.PartitionFS(name="ntfs", identifier="f", descr="NTFS", hexid="07") factories.PartitionFS(name="fat", identifier="f", descr="Windows FAT", need_hexid=False, hexid="") factories.PartitionFS(name="linux_raid_member", identifier="f", descr="Linux raid auto", hexid="fd") # LogLevel, see log.coffee lines 230ff factories.LogLevelFactory(identifier="c", level=logging_tools.LOG_LEVEL_CRITICAL, name="critical") factories.LogLevelFactory(identifier="e", level=logging_tools.LOG_LEVEL_ERROR, name="error") factories.LogLevelFactory(identifier="w", level=logging_tools.LOG_LEVEL_WARN, name="warning") factories.LogLevelFactory(identifier="o", level=logging_tools.LOG_LEVEL_OK, name="ok") # status factories.Status(status="memtest", memory_test=True) factories.Status(status="boot_local", boot_local=True) factories.Status(status="boot_iso", boot_iso=True) factories.Status(status="boot_clean", prod_link=True, is_clean=True) factories.Status(status="installation_clean", prod_link=True, do_install=True, is_clean=True) factories.Status(status="boot", prod_link=True) # # FIXME ? factories.Status(status="installation", prod_link=True, do_install=True) # # FIXME ? # network types factories.NetworkType(identifier="b", description="boot network") factories.NetworkType(identifier="p", description="production network") factories.NetworkType(identifier="s", description="slave network") factories.NetworkType(identifier="o", description="other network") factories.NetworkType(identifier="l", description="local network") # netdevice speed factories.NetDeviceSpeed(speed_bps=0, check_via_ethtool=False, full_duplex=True) factories.NetDeviceSpeed(speed_bps=10000000, check_via_ethtool=False, full_duplex=True) factories.NetDeviceSpeed(speed_bps=10000000, check_via_ethtool=True, full_duplex=True) factories.NetDeviceSpeed(speed_bps=100000000, check_via_ethtool=True, full_duplex=True) factories.NetDeviceSpeed(speed_bps=100000000, check_via_ethtool=True, full_duplex=False) # 1GBit / se factories.NetDeviceSpeed(speed_bps=1000000000, check_via_ethtool=False, full_duplex=True) factories.NetDeviceSpeed(speed_bps=1000000000, check_via_ethtool=True, full_duplex=True) # Trunks with 2 and 4 GB/sec factories.NetDeviceSpeed(speed_bps=2000000000, check_via_ethtool=True, full_duplex=True) factories.NetDeviceSpeed(speed_bps=4000000000, check_via_ethtool=True, full_duplex=True) factories.NetDeviceSpeed(speed_bps=10000000000, check_via_ethtool=True, full_duplex=True) factories.NetDeviceSpeed(speed_bps=40000000000, check_via_ethtool=True, full_duplex=True) factories.NetDeviceSpeed(speed_bps=56000000000, check_via_ethtool=True, full_duplex=True) # host check command factories.HostCheckCommand(name="check-host-alive", command_line="$USER2$ -m localhost ping $HOSTADDRESS$ 5 5.0") factories.HostCheckCommand(name="check-host-alive-2", command_line="$USER2$ -m $HOSTADDRESS$ version") factories.HostCheckCommand(name="check-host-ok", command_line="$USER1$/check_dummy 0 up") factories.HostCheckCommand(name="check-host-down", command_line="$USER1$/check_dummy 2 down") # virtual desktop protocols factories.VirtualDesktopProtocol(name="vnc", description="VNC", binary="vncserver") factories.VirtualDesktopProtocol(name="rdc", description="Remote Desktop Connection", binary="") factories.VirtualDesktopProtocol(name="spice", description="SPICE", binary="") # window managers factories.WindowManager(name="kde", description="KDE", binary="startkde") factories.WindowManager(name="gnome", description="GNOME", binary="gnome-session") factories.WindowManager(name="windowmaker", description="Window Maker", binary="wmaker") # SensorAction # deleted old ones for _name in ["mail"]: try: SensorAction.objects.get(Q(name=_name)).delete() except SensorAction.DoesNotExist: pass factories.SensorActionFactory( name="nothing", description="do nothing (to record events)", action="none", hard_control=False, ) factories.SensorActionFactory( name="halt (software)", description="tries to halt the devices via software", action="halt", hard_control=False, ) factories.SensorActionFactory( name="poweroff (software)", description="tries to poweroff the devices via software", action="poweroff", hard_control=False, ) factories.SensorActionFactory( name="reboot (software)", description="tries to reboot the devices via software", action="reboot", hard_control=False, ) factories.SensorActionFactory( name="halt (hardware)", description="tries to halt the devices via IPMI or APCs", action="halt", hard_control=True, ) factories.SensorActionFactory( name="reboot (hardware)", description="tries to reboot the devices via IPMI or APCs", action="reboot", hard_control=True, ) factories.SensorActionFactory( name="poweron (hardware)", description="tries to turn on the devices via IPMI or APCs", action="poweron", hard_control=True, ) # ComCapabilities factories.ComCapability( matchcode=ComCapability_Model.MatchCode.hm.name, name="host-monitoring", info="init.at host-monitoring software", port_spec="{:d}/tcp".format(InstanceXML(quiet=True).get_port_dict("host-monitoring", command=True)), ) factories.ComCapability( matchcode=ComCapability_Model.MatchCode.snmp.name, name="SNMP", info="Simple Network Management Protocol", port_spec="161/tcp, 161/udp", ) factories.ComCapability( matchcode=ComCapability_Model.MatchCode.ipmi.name, name="IPMI", info="Intelligent Platform Management Interface", port_spec="623/udp", ) factories.ComCapability( matchcode=ComCapability_Model.MatchCode.wmi.name, name="WMI", info="Windows Management Instrumentation", port_spec="135/tcp", ) factories.ComCapability( matchcode=ComCapability_Model.MatchCode.nrpe.name, name="NRPE", info="Nagios Remote Plugin Executor", port_spec="5666/tcp", ) # hints _server_cfg = factories.ConfigHint( config_name="server", valid_for_meta=False, config_description="server device", help_text_short="activate device as a server", help_text_html=""" <h2>Use this option to activate server functionality</h2> """, ) modules_cfg = factories.ConfigHint( config_name="modules_system", config_description="modules system (client part)", valid_for_meta=True, help_text_short="activate module system", help_text_html=""" <h2>Enable the module system<h2> """, ) factories.ConfigScriptHint( config_hint=modules_cfg, script_name="client_modules", help_text_short="configures module access for clients", help_text_html=""" <h3>Enables the module system on a client</h3> May be relative to the NFS4 root export """, ac_flag=True, ac_description="config script", ac_value=""" # add link config.add_link_object("/opt/modulefiles", "/.opt/modulefiles") """, ) # modules export modules_export_cfg = factories.ConfigHint( config_name="modules_export", exact_match=False, config_description="export entry for the modules share (server)", valid_for_meta=True, help_text_short="export entry for the modules share", help_text_html=""" <h2>Configures an export entry for the modules system</h2> Configures a cluster-wide filesystem share for the modules dir. Attach to a device to create the according automounter entries """, ) factories.ConfigVarHint( config_hint=modules_export_cfg, var_name="export", help_text_short="the directory to export", help_text_html=""" <h3>Define the directory to export</h3> May be relative to the NFS4 root export """, ac_flag=True, ac_type="str", ac_description="export path", ac_value="/opt/cluster/Modules/modulefiles", ) factories.ConfigVarHint( config_hint=modules_export_cfg, var_name="import", help_text_short="the import path", help_text_html=""" <h3>Define the import path</h3> Used for automounter maps """, ac_flag=True, ac_type="str", ac_description="import path", ac_value="/.opt/modulefiles", ) factories.ConfigVarHint( config_hint=modules_export_cfg, var_name="options", help_text_short="the mount options", help_text_html=""" <h3>Sets the mount options</h3> Used for automounter maps """, ac_flag=True, ac_type="str", ac_description="options", ac_value="-soft,tcp,soft,rsize=1048576,wsize=1048576,ac,vers=4,port=2049", ) # export entries export_cfg = factories.ConfigHint( config_name="export", exact_match=False, config_description="export entry (share)", valid_for_meta=True, help_text_short="creates an export entry", help_text_html=""" <h2>Configures an export entry (for sharing)</h2> Configures a cluster-wide filesystem share. Attach to a device to create the according automounter entries """, ) factories.ConfigVarHint( config_hint=export_cfg, var_name="export", help_text_short="the directory to export", help_text_html=""" <h3>Define the directory to export</h3> May be relative to the NFS4 root export """, ac_flag=True, ac_type="str", ac_description="export path", ac_value="/export", ) factories.ConfigVarHint( config_hint=export_cfg, var_name="import", help_text_short="the import path", help_text_html=""" <h3>Define the import path</h3> Used for automounter maps """, ac_flag=True, ac_type="str", ac_description="import path", ac_value="/import", ) factories.ConfigVarHint( config_hint=export_cfg, var_name="options", help_text_short="the mount options", help_text_html=""" <h3>Sets the mount options</h3> Used for automounter maps """, ac_flag=True, ac_type="str", ac_description="options", ac_value="-soft,tcp,soft,rsize=1048576,wsize=1048576,ac,vers=4,port=2049", ) factories.ConfigVarHint( config_hint=export_cfg, var_name="create_automount_entries", help_text_short="create automount entries", help_text_html=""" <h3>Control creation of automounter entries in YP / LDAP</h3> Used for automounter maps for normal exports """, ac_flag=True, ac_type="bool", ac_description="autofs creation", ac_value=True, ) # home export entries homedir_export_cfg = factories.ConfigHint( config_name="homedir_export", exact_match=False, config_description="export entry (share) for home", valid_for_meta=True, help_text_short="creates an export entry for home", help_text_html=""" <h2>Configures an export entry (for sharing)</h2> Configures a cluster-wide filesystem share. Attach to a device to create the according automounter entries """, ) factories.ConfigVarHint( config_hint=homedir_export_cfg, var_name="homeexport", help_text_short="the directory to export", help_text_html=""" <h3>Define the directory to export</h3> May be relative to the NFS4 root export """, ac_flag=True, ac_type="str", ac_description="export path", ac_value="/export_change_me", ) factories.ConfigVarHint( config_hint=homedir_export_cfg, var_name="createdir", help_text_short="where to create the homes", help_text_html=""" <h3>Define the creation path</h3> Used by the clusterserver, can be different from export_path (for example when NFSv4 is used) """, ac_flag=True, ac_type="str", ac_description="create path", ac_value="/create_change_me", ) factories.ConfigVarHint( config_hint=homedir_export_cfg, var_name="options", help_text_short="the mount options", help_text_html=""" <h3>Sets the mount options</h3> Used for automounter maps """, ac_flag=True, ac_type="str", ac_description="options", ac_value="-soft,tcp,soft,rsize=1048576,wsize=1048576,ac,vers=4,port=2049", ) factories.ConfigVarHint( config_hint=homedir_export_cfg, var_name="create_automount_entries", help_text_short="create automount entries", help_text_html=""" <h3>Control creation of automounter entries in YP / LDAP</h3> Used for automounter maps (for homedir exports) """, ac_flag=True, ac_type="bool", ac_description="autofs creation", ac_value=True, ) ldap_server_cfg = factories.ConfigHint( config_name="ldap_server", config_description="LDAP Server", valid_for_meta=False, help_text_short="device acts as an LDAP-server", help_text_html=""" <h2>Enable LDAP-server functionality</h2> The following server command are available: <ul> <li><tt>init_ldap_config</tt> Create basic LDAP entries</li> <li><tt>sync_ldap_config</tt> Syncs the LDAP tree with the Cluster database</li> </ul> """ ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="base_dn", help_text_short="define LDAP base DN", help_text_html=""" <h3>Define the base DN for the LDAP sync</h3> """, ac_flag=True, ac_type="str", ac_description="Base DN", ac_value="dc=test,dc=ac,dc=at", ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="admin_cn", help_text_short="CN of the admin user", help_text_html=""" <h3>CN of the admin user</h3> Enter without 'cn=', in most cases admin is enough """, ac_flag=True, ac_type="str", ac_description="admin CN (relative to base DN without 'cn=')", ac_value="admin", ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="root_passwd", help_text_short="password of the admin user", help_text_html=""" <h3>Password of the admin user</h3> Stored as cleartext password, handle with care. """, ac_flag=True, ac_type="str", ac_description="LDAP admin password", ac_value="changeme", ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="user_object_classes", help_text_short="object classes for user objects", help_text_html=""" <h3>Object Classes to use for user objects</h3> A space (or comma) separated list of object classes to use for user objects. Can contain one or more of <ul> <li>account</li> <li>posixAccount</li> <li>shadowAccount</li> <li>shadowAccount</li> <li>top</li> </ul> """ ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="group_object_classes", help_text_short="object classes for group objects", help_text_html=""" <h3>Object Classes to use for group objects</h3> A space (or comma) separated list of object classes to use for group objects. Can contain one or more of <ul> <li>posixGroup</li> <li>top</li> <li>namedObject</li> </ul> """ ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="group_dn_template", help_text_short="template to create group dn", help_text_html=""" <h3>Template to specify group DN (distinguished name)</h3> The template to create the group DN. Defaults to<br> cn={GROUPNAME}<br> where GROUPNAME extends to the name of the group. """ ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="user_dn_template", help_text_short="template to create user dn", help_text_html=""" <h3>Template to specify user DN (distinguished name)</h3> The template to create the user DN. Defaults to<br> uid={USERNAME}<br> where USERNAME extends to the login name of the user. """ ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="group_base_template", help_text_short="template to create the group base dn", help_text_html=""" <h3>Template to specify the group base DN</h3> This template define the DN for groups. A full group DN contains of the group_dn_template plus the group_base template:<br> GROUP_DN={GROUP_DN_TEMPLATE},{GROUP_BASE_TEMPLATE} """ ) factories.ConfigVarHint( config_hint=ldap_server_cfg, var_name="user_base_template", help_text_short="template to create the user base dn", help_text_html=""" <h3>Template to specify the user base DN</h3> This template define the DN for users. A full user DN contains of the user_dn_template plus the user_base template:<br> USER_DN={USER_DN_TEMPLATE},{USER_BASE_TEMPLATE} """ ) _add_snmp_fixtures() add_fixtures(**options)
from initat.tools import sge_tools except ImportError: sge_tools = None RMS_ADDON_KEYS = [ key for key in sys.modules.keys() if key.startswith("initat.cluster.frontend.rms_addons.") and sys.modules[key] ] RMS_ADDONS = [ sys.modules[key].modify_rms() for key in RMS_ADDON_KEYS if key.split(".")[-1] not in ["base"] ] # memcached port and address MC_PORT = InstanceXML(quiet=True).get_port_dict("memcached", command=True) MC_ADDRESS = "127.0.0.1" logger = logging.getLogger("cluster.rms") if sge_tools: class ThreadLockedSGEInfo(sge_tools.SGEInfo): # sge_info object with thread lock layer def __init__(self): self._init = False def ensure_init(self): if not self._init: self._init = True _srv_type = "rms-server"
from lxml import etree from initat.cluster.backbone.models import device from initat.icsw.service.instance import InstanceXML from initat.tools import uuid_tools, logging_tools, server_command from initat.tools.config_tools import server_check, device_with_config, RouterObject from initat.cluster.backbone.server_enums import icswServiceEnum logger = logging.getLogger("cluster.routing") def _log(what, log_level): logger.log(log_level, what) _INSTANCE = InstanceXML(_log) def get_type_from_config(c_name): _REVERSE_MAP = { "package_server": "package", "package-server": "package", "config-server": "config", "config_server": "config", } return _REVERSE_MAP.get(c_name, None) def get_server_uuid(srv_type=None, uuid=None): if uuid is None: uuid = uuid_tools.get_uuid().get_urn()
def __init__(self, log_com, full_build, routing_fingerprint=None, router_obj=None): tm = logging_tools.MeasureTime(log_com=self.log) self.log_com = log_com self.router = routing.SrvTypeRouting(log_com=self.log_com) self.instance_xml = InstanceXML(log_com=self.log, quiet=True) # build cache to speed up config generation # stores various cached objects # routing handling if router_obj is None: # slave, no consumer self.consumer = None self.routing_fingerprint = routing_fingerprint # must exist self.__trace_gen = MonHostTraceGeneration.objects.get( Q(fingerprint=self.routing_fingerprint)) else: # master, install the egg consumer self.consumer = server_mixins.EggConsumeObject(self) self.consumer.init( {"SERVICE_ENUM_NAME": icswServiceEnum.monitor_server.name}) self.routing_fingerprint = router_obj.fingerprint # get generation try: self.__trace_gen = MonHostTraceGeneration.objects.get( Q(fingerprint=self.routing_fingerprint)) except MonHostTraceGeneration.DoesNotExist: self.log("creating new tracegeneration") self.__trace_gen = router_obj.create_trace_generation() # delete old ones MonHostTrace.objects.exclude( Q(generation=self.__trace_gen)).delete() # global luts # print("i0") self.mcc_lut_3 = { _check.pk: _check for _check in mon_check_command.objects.all() } # add dummy entries for _value in self.mcc_lut_3.values(): # why ? FIXME # _value.mccs_id = None # _value.check_command_pk = _value.pk pass self.mcc_lut = { key: (v0, v1, v2) for key, v0, v1, v2 in mon_check_command.objects.all().values_list( "pk", "name", "description", "config_rel__name") } # lookup table for config -> mon_check_commands self.mcc_lut_2 = {} for v_list in mon_check_command.objects.all().values_list( "name", "config_rel__name"): self.mcc_lut_2.setdefault(v_list[1], []).append(v_list[0]) # print("i1") # import pprint # pprint.pprint(self.mcc_lut) # host list, set from caller self.host_list = [] self.dev_templates = None self.serv_templates = None self.single_build = False self.debug = False self.__var_cache = VarCache(prefill=full_build, def_dict={ "SNMP_VERSION": 2, "SNMP_READ_COMMUNITY": "public", "SNMP_WRITE_COMMUNITY": "private", }) self.join_char = "_" if global_config["SAFE_NAMES"] else " " # device_group user access self.dg_user_access = {} mon_user_pks = list( user.objects.filter(Q(mon_contact__pk__gt=0)).values_list( "pk", flat=True)) for _dg in device_group.objects.all().prefetch_related("user_set"): self.dg_user_access[_dg.pk] = list([ _user for _user in _dg.user_set.all() if _user.pk in mon_user_pks ]) # all hosts dict self.all_hosts_dict = { cur_dev.pk: cur_dev for cur_dev in device.objects.filter( Q(device_group__enabled=True) & Q(enabled=True)).select_related( "domain_tree_node", "device_group").prefetch_related( "monhosttrace_set") } for _host in self.all_hosts_dict.values(): _host.reachable = True # print(_res) # traces in database self.log("traces found in database: {:d}".format( MonHostTrace.objects.all().count())) # read traces self.__host_traces = {} for _trace in MonHostTrace.objects.filter( Q(generation=self.__trace_gen)): self.__host_traces.setdefault(_trace.device_id, []).append(_trace) # import pprint # pprint.pprint(self.__host_traces) # host / service clusters clusters = {} for _obj, _name in [ (mon_host_cluster, SpecialTypesEnum.mon_host_cluster), (mon_service_cluster, SpecialTypesEnum.mon_service_cluster) ]: _lut = {} _query = _obj.objects.all() if _name == SpecialTypesEnum.mon_service_cluster: _query = _query.select_related("mon_check_command") for _co in _query: _lut[_co.pk] = _co.main_device_id _co.devices_list = [] clusters.setdefault(_name, {}).setdefault(_co.main_device_id, []).append(_co) for _entry in _obj.devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_cluster: _pk = _entry.mon_host_cluster_id else: _pk = _entry.mon_service_cluster_id _tco = [ _co for _co in clusters[_name][_lut[_pk]] if _co.pk == _pk ][0] _tco.devices_list.append(_entry.device_id) # clusters[_name][_entry.] self.__clusters = clusters # host / service dependencies deps = {} for _obj, _name in [ (mon_host_dependency, SpecialTypesEnum.mon_host_dependency), (mon_service_dependency, SpecialTypesEnum.mon_service_dependency) ]: _lut = {} _query = _obj.objects.all().prefetch_related( "devices", "dependent_devices") if _name == SpecialTypesEnum.mon_host_dependency: _query = _query.select_related( "mon_host_dependency_templ", "mon_host_dependency_templ__dependency_period", ) else: _query = _query.select_related( "mon_service_cluster", "mon_check_command", "dependent_mon_check_command", "mon_service_dependency_templ", "mon_service_dependency_templ__dependency_period", ) for _do in _query: # == slaves _do.devices_list = [] # == dependent devices _do.master_list = [] _lut[_do.pk] = [] for _dd in _do.dependent_devices.all(): _lut[_do.pk].append(_dd.pk) deps.setdefault(_name, {}).setdefault(_dd.pk, []).append(_do) for _entry in _obj.devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_dependency: _pk = _entry.mon_host_dependency_id else: _pk = _entry.mon_service_dependency_id for _devpk in _lut[_pk]: _tdo = [ _do for _do in deps[_name][_devpk] if _do.pk == _pk ][0] _tdo.devices_list.append(_entry.device_id) for _entry in _obj.dependent_devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_dependency: _pk = _entry.mon_host_dependency_id else: _pk = _entry.mon_service_dependency_id for _devpk in _lut[_pk]: _tdo = [ _do for _do in deps[_name][_devpk] if _do.pk == _pk ][0] _tdo.master_list.append(_entry.device_id) self.__dependencies = deps # init snmp sink self.snmp_sink = SNMPSink(log_com) tm.step("init build_cache")
def __init__(self): argparse.ArgumentParser.__init__( self, "send command to servers of the init.at Clustersoftware") self.inst_xml = InstanceXML(quiet=True) inst_list = [] for inst in self.inst_xml.get_all_instances(): if len(inst.xpath(".//network/ports/port[@type='command']")): inst_list.append(inst.get("name")) self.add_argument("arguments", nargs="+", help="additional arguments, first one is command") self.add_argument("-t", help="set timeout [%(default)d]", default=10, type=int, dest="timeout") self.add_argument("-p", help="port or instance/service [%(default)s]", default="{:d}".format( self.inst_xml.get_port_dict( icswServiceEnum.host_monitoring, command=True)), dest="port", type=str) self.add_argument("-P", help="protocoll [%(default)s]", type=str, default="tcp", choices=["tcp", "ipc"], dest="protocoll") self.add_argument("-S", help="servername [%(default)s]", type=str, default="collrelay", dest="server_name") self.add_argument("-H", "--host", help="host [%(default)s] or server", default="localhost", dest="host") self.add_argument("-v", help="verbose mode [%(default)s]", default=False, dest="verbose", action="store_true") self.add_argument("-i", help="set identity substring [%(default)s]", type=str, default="sc", dest="identity_substring") self.add_argument( "-I", help="set identity string [%(default)s], has precedence over -i", type=str, default="", dest="identity_string") self.add_argument("-n", help="set number of iterations [%(default)d]", type=int, default=1, dest="iterations") self.add_argument("-q", help="be quiet [%(default)s], overrides verbose", default=False, action="store_true", dest="quiet") self.add_argument("--raw", help="do not convert to server_command", default=False, action="store_true") self.add_argument("--root", help="connect to root-socket [%(default)s]", default=False, action="store_true") self.add_argument("--kv", help="key-value pair, colon-separated [key:value]", action="append") self.add_argument( "--kva", help="key-attribute pair, colon-separated [key:attribute:value]", action="append") self.add_argument("--kv-path", help="path to store key-value pairs under", type=str, default="") self.add_argument( "--split", help="set read socket (for split-socket command), [%(default)s]", type=str, default="") self.add_argument("--only-send", help="only send command, [%(default)s]", default=False, action="store_true")
def handle(self, **options): if options["parse_file"]: f = open("disk_output_data") lines = f.read() lines = lines.split("\n") f = open(ASSET_MANAGEMENT_TEST_LOCATION, "rb") data = pickle.load(f) f.close() new_device_for_next_line = False current_device = None parse_partition = False parse_partition_cnt = 0 parse_disk = False parse_disk_cnt = 0 parse_disk_device_name = "" parse_disk_size = "" parse_partition_mountpount = "" parse_partition_size = "" parse_logical = False parse_logical_cnt = 0 parse_logical_device_name = "" parse_logical_size = "" parse_logical_free = "" parse_logical_filesystem = "" for line in lines: if line == "------": new_device_for_next_line = True continue if new_device_for_next_line: new_device_for_next_line = False for device in data: if device.identifier == line: current_device = device current_device.expected_hdds = [] current_device.expected_partitions = [] current_device.expected_logical_volumes = [] if line == "--DISK--": parse_partition = False parse_partition_cnt = 0 parse_disk = True parse_disk_cnt = 0 parse_logical = False parse_logical_cnt = 0 continue if parse_disk: if parse_disk_cnt == 0: parse_disk_device_name = line parse_disk_cnt += 1 elif parse_disk_cnt == 1: parse_disk_size = int(line) parse_disk_cnt += 1 elif parse_disk_cnt == 2: parse_disk_serial = line parse_disk = False parse_disk_cnt = 0 new_hdd = ExpectedHdd(parse_disk_device_name, parse_disk_serial, parse_disk_size) current_device.expected_hdds.append(new_hdd) if line == "--PARTITION--": parse_partition = True parse_partition_cnt = 0 parse_disk = False parse_disk_cnt = 0 parse_logical = False parse_logical_cnt = 0 continue if parse_partition: if parse_partition_cnt == 0: parse_partition_mountpount = line parse_partition_cnt += 1 elif parse_partition_cnt == 1: parse_partition_size = int(line) parse_partition_cnt += 1 elif parse_partition_cnt == 2: parse_partition = False parse_partition_cnt = 0 _partition = ExpectedPartition( parse_disk_device_name, parse_partition_mountpount, parse_partition_size, line) current_device.expected_partitions.append(_partition) if line == "--LOGICAL--": parse_partition = False parse_partition_cnt = 0 parse_disk = False parse_disk_cnt = 0 parse_logical = True parse_logical_cnt = 0 continue if parse_logical: if parse_logical_cnt == 0: parse_logical_device_name = line parse_logical_cnt += 1 elif parse_logical_cnt == 1: parse_logical_size = None if line != "None": parse_logical_size = int(line) parse_logical_cnt += 1 elif parse_logical_cnt == 2: parse_logical_free = None if line != "None": parse_logical_free = int(line) parse_logical_cnt += 1 elif parse_logical_cnt == 3: parse_logical_filesystem = line parse_logical_cnt += 1 elif parse_logical_cnt == 4: parse_logical = False parse_logical_cnt = 0 elv = ExpectedLogicalVolume(parse_logical_device_name, parse_logical_size, parse_logical_free, parse_logical_filesystem, line) current_device.expected_logical_volumes.append(elv) f = open(ASSET_MANAGEMENT_TEST_LOCATION, "wb") pickle.dump(data, f) f.close() return if options['delete_index'] is not None: self.handle_delete(options['delete_index']) return if options['list']: self.handle_list() return if options['expected_hdd'] is not None: self.handle_add_expected_hdd(options['expected_hdd']) return if options['expected_partition'] is not None: self.handle_add_expected_partition(options['expected_partition']) return if options['expected_logical_volume'] is not None: self.handle_add_expected_logical_volume( options['expected_logical_volume']) return for _property in options['ignore_tests']: if _property not in self.ignorable_properties: print("Invalid property: {}".format(_property)) return if options['scan_type'] is None: print("Scan Type missing") return if options['scan_type'] not in ['HM', 'NRPE']: print("Invalid Scan Type: {}".format(options['scan_type'])) if options['ip'] is None: print("IP/Hostname missing") return if options['identifier'] is None: print("Identifier for this entry missing") return result_dict = {} if options['scan_type'] == "HM": scan_type = ScanType.HM hm_port = InstanceXML(quiet=True).get_port_dict("host-monitoring", command=True) conn_str = "tcp://{}:{:d}".format(options['ip'], hm_port) for asset_type, hm_command in list( ASSETTYPE_HM_COMMAND_MAP.items()): result_dict[asset_type] = None print("Running command [{}] on {}".format( hm_command, conn_str)) srv_com = server_command.srv_command(command=hm_command) new_con = net_tools.ZMQConnection(hm_command, timeout=30) new_con.add_connection(conn_str, srv_com) result = new_con.loop() if result: result = result[0] if result: (status_string, server_result_code) = result.get_result() if server_result_code == server_command.SRV_REPLY_STATE_OK: result_dict[asset_type] = etree.tostring( result.tree) valid = all([ result_dict[asset_type] is not None for asset_type in ASSETTYPE_HM_COMMAND_MAP ]) else: scan_type = ScanType.NRPE for asset_type, nrpe_command in list( ASSETTYPE_NRPE_COMMAND_MAP.items()): result_dict[asset_type] = None _com = "/opt/cluster/sbin/check_nrpe -H{} -2 -P1048576 -p{} -n -c{} -t{}".format( options['ip'], DEFAULT_NRPE_PORT, nrpe_command, 1000, ) output = subprocess.check_output(_com.split(" ")) if output and len(output) > 0: result_dict[asset_type] = output valid = all([ result_dict[asset_type] is not None for asset_type in ASSETTYPE_NRPE_COMMAND_MAP ]) if valid: try: f = open(ASSET_MANAGEMENT_TEST_LOCATION, "rb") data = pickle.load(f) f.close() except IOError: data = [] data.append( ResultObject(options['identifier'], options['ignore_tests'], result_dict, scan_type)) f = open(ASSET_MANAGEMENT_TEST_LOCATION, "wb") pickle.dump(data, f) f.close() print("New entry added") else: print("Failed to generate new entry") missing_types = [] for asset_type, result in list(result_dict.items()): if result is None: missing_types.append(asset_type) print("No result for: {}".format(missing_types))
def ClientCode(global_config): from initat.host_monitoring import modules if global_config["VERBOSE"] > 1: print("{:d} import errors:".format(len(modules.IMPORT_ERRORS))) for mod, com, _str in modules.IMPORT_ERRORS: print("{:<30s} {:<20s} {}".format(com, mod.split(".")[-1], _str)) conn_str = "tcp://{}:{:d}".format(global_config["HOST"], global_config["COMMAND_PORT"]) arg_stuff = global_config.get_argument_stuff() arg_list = arg_stuff["arg_list"] com_name = arg_list.pop(0) if com_name in modules.command_dict: srv_com = server_command.srv_command(command=com_name) for src_key, dst_key in [("HOST", "host"), ("COMMAND_PORT", "port")]: srv_com[dst_key] = global_config[src_key] com_struct = modules.command_dict[com_name] try: cur_ns, rest = com_struct.handle_commandline(arg_list) except ValueError, what: ret = ExtReturn(limits.mon_STATE_CRITICAL, "error parsing: {}".format(what[1])) else: # see also struct.py in collrelay if hasattr(cur_ns, "arguments"): for arg_index, arg in enumerate(cur_ns.arguments): srv_com["arguments:arg{:d}".format(arg_index)] = arg srv_com["arguments:rest"] = " ".join(rest) for key, value in vars(cur_ns).iteritems(): srv_com["namespace:{}".format(key)] = value result = net_tools.ZMQConnection( "{}:{:d}".format(global_config["IDENTITY_STRING"], os.getpid()), timeout=global_config["TIMEOUT"], ).add_connection( conn_str, srv_com, immediate=True, ) if result: if global_config["COMMAND_PORT"] == InstanceXML( quiet=True).get_port_dict("host-monitoring", command=True): error_result = result.xpath(".//ns:result[@state != '0']", smart_strings=False) if error_result: error_result = error_result[0] ret = ExtReturn(int(error_result.attrib["state"]), error_result.attrib["reply"]) else: if hasattr(com_struct, "interpret"): ret = ExtReturn.get_ext_return( com_struct.interpret(result, cur_ns)) else: _result = result.xpath(".//ns:result", smart_strings=False)[0] ret = ExtReturn( server_command.srv_reply_to_nag_state( int(_result.attrib["state"])), result.attrib["reply"]) else: ret_str, ret_state = result.get_log_tuple() ret = ExtReturn( server_command.srv_reply_to_nag_state(ret_state), ret_str) else: ret = ExtReturn(limits.mon_STATE_CRITICAL, "timeout")