def initialize(self, conf_reader, msgQlist, products): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread and InternalMsgQ super(IEMSensor, self).initialize(conf_reader) # Initialize internal message queues for this module super(IEMSensor, self).initialize_msgQ(msgQlist) # Read configurations self._log_file_path = self._conf_reader._get_value_with_default( self.SENSOR_NAME.upper(), self.LOG_FILE_PATH_KEY, self.DEFAULT_LOG_FILE_PATH) self._timestamp_file_path = self._conf_reader._get_value_with_default( self.SENSOR_NAME.upper(), self.TIMESTAMP_FILE_PATH_KEY, self.DEFAULT_TIMESTAMP_FILE_PATH) self._site_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION.upper(), COMMON_CONFIGS.get(self.SYSTEM_INFORMATION.upper()).get(self.SITE_ID_KEY), self.DEFAULT_SITE_ID) self._rack_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION.upper(), COMMON_CONFIGS.get(self.SYSTEM_INFORMATION.upper()).get(self.RACK_ID_KEY), self.DEFAULT_RACK_ID) self._node_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION.upper(), COMMON_CONFIGS.get(self.SYSTEM_INFORMATION.upper()).get(self.NODE_ID_KEY), self.DEFAULT_NODE_ID) self._cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION.upper(), COMMON_CONFIGS.get(self.SYSTEM_INFORMATION.upper()).get(self.CLUSTER_ID_KEY), self.DEFAULT_CLUSTER_ID) return True
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread and InternalMsgQ super(RAIDIntegritySensor, self).initialize(conf_reader) # Initialize internal message queues for this module super(RAIDIntegritySensor, self).initialize_msgQ(msgQlist) self._alert_msg = None self._alert_resolved = True self._suspended = False self._site_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.SITE_ID), '001') self._cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.CLUSTER_ID), '001') self._rack_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.RACK_ID), '001') self._node_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.NODE_ID), '001') self._timestamp_file_path = self._conf_reader._get_value_with_default( self.RAIDIntegritySensor, self.TIMESTAMP_FILE_PATH_KEY, self.DEFAULT_TIMESTAMP_FILE_PATH) self._polling_interval = int(self._conf_reader._get_value_with_default( self.RAIDIntegritySensor, self.POLLING_INTERVAL, self.DEFAULT_POLLING_INTERVAL)) return True
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread and InternalMsgQ super(MemFaultSensor, self).initialize(conf_reader) super(MemFaultSensor, self).initialize_msgQ(msgQlist) self._site_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.SITE_ID_KEY), '001') self._cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.CLUSTER_ID_KEY), '001') self._rack_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.RACK_ID_KEY), '001') self._node_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.NODE_ID_KEY), '001') # get the mem fault implementor from configuration mem_fault_utility = self._conf_reader._get_value_with_default( self.name().capitalize(), self.PROBE, "procfs") self.polling_interval = int( self._conf_reader._get_value_with_default( self.SENSOR_NAME.upper(), self.POLLING_INTERVAL_KEY, self.DEFAULT_POLLING_INTERVAL)) # Creating the instance of ToolFactory class self.tool_factory = ToolFactory() try: # Get the instance of the utility using ToolFactory self._utility_instance = self._utility_instance or \ self.tool_factory.get_instance(mem_fault_utility) # self._utility_instance.initialize() except KeyError as key_error: logger.error( "Unable to get the instance of {} \ Utility. Hence shutting down the sensor {}"\ .format(mem_fault_utility, MemFaultSensor.SENSOR_NAME)) self.shutdown() cache_dir_path = os.path.join(DATA_PATH, self.CACHE_DIR_NAME) self.MEM_FAULT_SENSOR_DATA = os.path.join( cache_dir_path, f'MEM_FAULT_SENSOR_DATA_{self._node_id}') return True
def _configure_exchange(self, retry=False): """Configure the RabbitMQ exchange with defaults available""" # Make methods locally available get_value_with_default = self._conf_reader._get_value_with_default try: self._virtual_host = get_value_with_default( self.RABBITMQPROCESSOR, self.VIRT_HOST, 'SSPL') self._primary_rabbitmq_host = get_value_with_default( self.RABBITMQPROCESSOR, self.PRIMARY_RABBITMQ_HOST, 'localhost') self._exchange_name = get_value_with_default( self.RABBITMQPROCESSOR, self.EXCHANGE_NAME, 'sspl-in') self._queue_name = get_value_with_default(self.RABBITMQPROCESSOR, self.QUEUE_NAME, 'actuator-req-queue') self._routing_key = get_value_with_default(self.RABBITMQPROCESSOR, self.ROUTING_KEY, 'actuator-req-key') self._username = get_value_with_default(self.RABBITMQPROCESSOR, self.USER_NAME, 'sspluser') self._password = get_value_with_default(self.RABBITMQPROCESSOR, self.PASSWORD, 'sspl4ever') cluster_id = get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.CLUSTER_ID_KEY), '') node_id = get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.NODE_ID_KEY), '') # Decrypt RabbitMQ Password decryption_key = encryptor.gen_key(cluster_id, ServiceTypes.RABBITMQ.value) self._password = encryptor.decrypt(decryption_key, self._password.encode('ascii'), "RabbitMQingressProcessor") # Create a routing key unique to this instance unique_routing_key = f'{self._routing_key}_node{node_id}' logger.info(f"Connecting using routing key: {unique_routing_key}") self._connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._exchange_name, unique_routing_key, self._queue_name) except Exception as ex: logger.error("RabbitMQingressProcessor, _configure_exchange: %r" % ex)
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread and InternalMsgQ super(RAIDsensor, self).initialize(conf_reader) # Initialize internal message queues for this module super(RAIDsensor, self).initialize_msgQ(msgQlist) self._RAID_status_file = self._get_RAID_status_file() logger.info(f"Monitoring RAID status file: {self._RAID_status_file}") # The status file contents self._RAID_status_contents = "N/A" # The mdX status line in the status file self._RAID_status = {} self._faulty_drive_list = {} self._faulty_device_list = {} self._drives = {} self._total_drives = {} self._devices = [] self._missing_drv = {} self._prev_drive_dict = {} self._site_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.SITE_ID), '001') self._cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.CLUSTER_ID), '001') self._rack_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.RACK_ID), '001') self._node_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.NODE_ID), '001') # Allow systemd to process all the drives so we can map device name to serial numbers #time.sleep(120) return True
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(NodeControllerMsgHandler, self).initialize(conf_reader) # Initialize internal message queues for this module super(NodeControllerMsgHandler, self).initialize_msgQ(msgQlist) # Find a meaningful hostname to be used self.host_id = socket.getfqdn() self._HPI_actuator = None self._GEM_actuator = None self._PDU_actuator = None self._RAID_actuator = None self._IPMI_actuator = None self._hdparm_actuator = None self._smartctl_actuator = None self._command_line_actuator = None self._NodeHW_actuator = None self._import_products(product) self.setup = self._conf_reader._get_value_with_default(self.SYS_INFORMATION, COMMON_CONFIGS.get(self.SYS_INFORMATION).get(self.SETUP), "ssu") self.ipmi_client_name = None
def _configure_exchange(self, retry=False): """Configure the RabbitMQ exchange with defaults available""" try: self._virtual_host = self._conf_reader._get_value_with_default( self.LOGGINGPROCESSOR, self.VIRT_HOST, 'SSPL') self._exchange_name = self._conf_reader._get_value_with_default( self.LOGGINGPROCESSOR, self.EXCHANGE_NAME, 'sspl-in') self._queue_name = self._conf_reader._get_value_with_default( self.LOGGINGPROCESSOR, self.QUEUE_NAME, 'iem-queue') self._routing_key = self._conf_reader._get_value_with_default( self.LOGGINGPROCESSOR, self.ROUTING_KEY, 'iem-key') self._username = self._conf_reader._get_value_with_default( self.LOGGINGPROCESSOR, self.USER_NAME, 'sspluser') self._password = self._conf_reader._get_value_with_default( self.LOGGINGPROCESSOR, self.PASSWORD, 'sspl4ever') cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.CLUSTER_ID_KEY), '') # Decrypt RabbitMQ Password decryption_key = encryptor.gen_key(cluster_id, ServiceTypes.RABBITMQ.value) self._password = encryptor.decrypt(decryption_key, self._password.encode('ascii'), "LoggingProcessor") self._connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._exchange_name, self._routing_key, self._queue_name) except Exception as ex: logger.error("_configure_exchange: %s" % ex)
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread and InternalMsgQ super(CPUFaultSensor, self).initialize(conf_reader) super(CPUFaultSensor, self).initialize_msgQ(msgQlist) self._site_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.SITE_ID_KEY), '001') self._cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.CLUSTER_ID_KEY), '001') self._rack_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.RACK_ID_KEY), '001') self._node_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.NODE_ID_KEY), '001') # get the cpu fault implementor from configuration cpu_fault_utility = self._conf_reader._get_value_with_default( self.name().capitalize(), self.PROBE, 'sysfs') # Creating the instance of ToolFactory class self.tool_factory = ToolFactory() try: # Get the instance of the utility using ToolFactory self._utility_instance = self._utility_instance or \ self.tool_factory.get_instance(cpu_fault_utility) except Exception as e: logger.error( f"Error while initializing, shutting down CPUFaultSensor : {e}" ) self.shutdown() cache_dir_path = os.path.join(DATA_PATH, self.CACHE_DIR_NAME) self.CPU_FAULT_SENSOR_DATA = os.path.join( cache_dir_path, f'CPU_FAULT_SENSOR_DATA_{self._node_id}') return True
def _read_config(self): """Configure the RabbitMQ exchange with defaults available""" try: self._virtual_host = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.VIRT_HOST, 'SSPL') # Read common RabbitMQ configuration self._primary_rabbitmq_host = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.PRIMARY_RABBITMQ_HOST, 'localhost') # Read RabbitMQ configuration for sensor messages self._queue_name = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.QUEUE_NAME, 'sensor-queue') self._exchange_name = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.EXCHANGE_NAME, 'sspl-out') self._routing_key = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.ROUTING_KEY, 'sensor-key') # Read RabbitMQ configuration for Ack messages self._ack_queue_name = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.ACK_QUEUE_NAME, 'sensor-queue') self._ack_routing_key = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.ACK_ROUTING_KEY, 'sensor-key') self._username = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.USER_NAME, 'sspluser') self._password = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.PASSWORD, '') self._signature_user = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.SIGNATURE_USERNAME, 'sspl-ll') self._signature_token = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.SIGNATURE_TOKEN, 'FAKETOKEN1234') self._signature_expires = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.SIGNATURE_EXPIRES, "3600") self._iem_route_addr = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.IEM_ROUTE_ADDR, '') self._iem_route_exchange_name = self._conf_reader._get_value_with_default( self.RABBITMQPROCESSOR, self.IEM_ROUTE_EXCHANGE_NAME, 'sspl-in') cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION_KEY, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION_KEY).get( self.CLUSTER_ID_KEY), '') # Decrypt RabbitMQ Password decryption_key = encryptor.gen_key(cluster_id, ServiceTypes.RABBITMQ.value) self._password = encryptor.decrypt(decryption_key, self._password.encode('ascii'), "RabbitMQegressProcessor") if self._iem_route_addr != "": logger.info(" Routing IEMs to host: %s" % self._iem_route_addr) logger.info(" Using IEM exchange: %s" % self._iem_route_exchange_name) except Exception as ex: logger.error("RabbitMQegressProcessor, _read_config: %r" % ex)
def __init__(self, executor, conf_reader): super(NodeHWactuator, self).__init__() self._site_id = conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.SITE_ID), '001') self._rack_id = conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.RACK_ID), '001') self._node_id = conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.NODE_ID), '001') self.host_id = socket.getfqdn() self.sensor_id_map = None self._executor = executor self.fru_specific_info = {} self._resource_id = "" self._sensor_type = ""
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(RealStorActuatorMsgHandler, self).initialize(conf_reader) # Initialize internal message queues for this module super(RealStorActuatorMsgHandler, self).initialize_msgQ(msgQlist) self._real_stor_actuator = None self._import_products(product) self.setup = self._conf_reader._get_value_with_default(self.SYS_INFORMATION, COMMON_CONFIGS.get(self.SYS_INFORMATION).get(self.SETUP), "ssu")
def __init__(self): # Validate configuration file for required valid values try: self.conf_reader = ConfigReader() except (IOError, ConfigReader.Error) as err: logger.error("[ Error ] when validating the config file {0} - {1}"\ .format(self.CONF_FILE, err)) self.vol_ras = self.conf_reader._get_value_with_default(\ self.SYSINFO, COMMON_CONFIGS.get(self.SYSINFO).get("data_path"), self.DEFAULT_RAS_VOL) self.encl_cache = self.vol_ras + "encl/" self.frus = self.encl_cache + "frus/" self.encl.update({"frus":self.memcache_frus}) self.encl.update({"system":self.memcache_system}) self._check_ras_vol()
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread and InternalMsgQ super(SASPortSensor, self).initialize(conf_reader) super(SASPortSensor, self).initialize_msgQ(msgQlist) self._site_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.SITE_ID), '001') self._cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.CLUSTER_ID), '001') self._rack_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.RACK_ID), '001') self._node_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.NODE_ID), '001') # Get the sas port implementor from configuration sas_port_utility = self._conf_reader._get_value_with_default( self.name().capitalize(), self.PROBE, "sysfs") self.polling_interval = int(self._conf_reader._get_value_with_default( self.SENSOR_NAME.upper(), self.POLLING_INTERVAL, self.DEFAULT_POLLING_INTERVAL)) # Creating the instance of ToolFactory class self.tool_factory = ToolFactory() cache_dir_path = os.path.join(DATA_PATH, self.CACHE_DIR_NAME) self.SAS_PORT_SENSOR_DATA = os.path.join(cache_dir_path, f'SAS_PORT_SENSOR_DATA_{self._node_id}') alert_type = None try: # Get the instance of the utility using ToolFactory self._utility_instance = self._utility_instance or \ self.tool_factory.get_instance(sas_port_utility) self._utility_instance.initialize() phy_status = None link_value_phy_status_collection = () # Call to sas phy dirctory which will return a dictionary # which has phy_name to negotiated link rate mapping # Ex: {"phy-0:0": "<12.0, Unknown>"} self.phy_dir_to_linkrate_mapping = \ self._utility_instance.get_phy_negotiated_link_rate() # Iterate over populated dictionary and restructure it # Ex: if phy-0:0 is 12.0/6.0/3.0, considered as UP. # {"phy-0:0": ("link_rate", <Up/Down>)} for phy, value in self.phy_dir_to_linkrate_mapping.items(): if 'Gbit'.lower() in value.strip().lower(): phy_status = 'up' # Increment global phy_link count for UP status self.phy_link_count += 1 else: phy_status = 'fault' link_value_phy_status_collection = (value, phy_status) self.phy_dir_to_linkrate_mapping[phy] = link_value_phy_status_collection # Get the stored previous alert info self.sas_phy_stored_alert = store.get(self.SAS_PORT_SENSOR_DATA) self.check_and_send_alert() except KeyError as key_error: logger.error( "Unable to get the instance of {} \ Utility. Hence shutting down the sensor".format(sas_port_utility)) self.shutdown() except Exception as e: if e == errno.ENOENT: logger.error( "Problem occured while reading from sas_phy \ directory. directory path doesn't directory. Hence \ shuting down the sensor") elif e == errno.EACCES: logger.error( "Problem occured while reading from sas_phy directory. \ Not enough permission to read from the directory. \ Hence shuting down the sensor") else: logger.error( "Problem occured while reading from sas_phy directory. \ {0}. Hence shuting down the sensor".format(e)) self.shutdown() return True
def __init__(self): super(RealStorEnclosure, self).__init__() # WS Request common headers self.ws = WebServices() self.common_reqheaders = {} self.encl_conf = self.CONF_SECTION_MC self.system_persistent_cache = self.encl_cache + "system/" self.faults_persistent_cache = self.system_persistent_cache + "faults.json" # Read in mc value from configuration file self.mc1 = self.conf_reader._get_value_with_default( self.encl_conf, COMMON_CONFIGS.get(self.encl_conf).get("primary_controller_ip"), self.DEFAULT_MC_IP) self.mc1_wsport = self.conf_reader._get_value_with_default( self.encl_conf, COMMON_CONFIGS.get(self.encl_conf).get("primary_controller_port"), '') self.mc2 = self.conf_reader._get_value_with_default( self.encl_conf, COMMON_CONFIGS.get(self.encl_conf).get("secondary_controller_ip"), self.DEFAULT_MC_IP) self.mc2_wsport = self.conf_reader._get_value_with_default( self.encl_conf, COMMON_CONFIGS.get( self.encl_conf).get("secondary_controller_port"), '') self.active_ip = self.mc1 self.active_wsport = self.mc1_wsport self.user = self.conf_reader._get_value_with_default( self.encl_conf, COMMON_CONFIGS.get(self.encl_conf).get("user"), self.DEFAULT_USER) self.passwd = self.conf_reader._get_value_with_default( self.encl_conf, COMMON_CONFIGS.get(self.encl_conf).get("password"), self.DEFAULT_PASSWD) self.mc_interface = self.conf_reader._get_value_with_default( self.encl_conf, COMMON_CONFIGS.get(self.encl_conf).get("mgmt_interface"), "cliapi") self.pollfreq = int( self.conf_reader._get_value_with_default(self.CONF_REALSTORSENSORS, "polling_frequency", self.DEFAULT_POLL)) self.site_id = self.conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.SITE_ID), '001') self.rack_id = self.conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.RACK_ID), '001') self.node_id = self.conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.NODE_ID), '001') # Need to keep cluster_id string here to generate decryption key self.cluster_id = self.conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.CLUSTER_ID), '001') # Decrypt MC Password decryption_key = encryptor.gen_key( self.cluster_id, ServiceTypes.STORAGE_ENCLOSURE.value) self.passwd = encryptor.decrypt(decryption_key, self.passwd.encode('ascii'), "RealStoreEncl") if self.mc_interface not in self.realstor_supported_interfaces: logger.error("Unspported Realstor interface configured," " monitoring and alerts generation may hamper") return # login to mc to get session key, required for querying resources # periodically self.login()
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(NodeDataMsgHandler, self).initialize(conf_reader) # Initialize internal message queues for this module super(NodeDataMsgHandler, self).initialize_msgQ(msgQlist) self._transmit_interval = int( self._conf_reader._get_value_with_default(self.NODEDATAMSGHANDLER, self.TRANSMIT_INTERVAL, 60)) self._units = self._conf_reader._get_value_with_default( self.NODEDATAMSGHANDLER, self.UNITS, "MB") self._disk_usage_threshold = self._conf_reader._get_value_with_default( self.NODEDATAMSGHANDLER, self.DISK_USAGE_THRESHOLD, self.DEFAULT_DISK_USAGE_THRESHOLD) self._cpu_usage_threshold = self._conf_reader._get_value_with_default( self.NODEDATAMSGHANDLER, self.CPU_USAGE_THRESHOLD, self.DEFAULT_CPU_USAGE_THRESHOLD) self._host_memory_usage_threshold = self._conf_reader._get_value_with_default( self.NODEDATAMSGHANDLER, self.HOST_MEMORY_USAGE_THRESHOLD, self.DEFAULT_HOST_MEMORY_USAGE_THRESHOLD) self.site_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.SITE_ID), '001') self.rack_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.RACK_ID), '001') self.node_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, COMMON_CONFIGS.get(self.SYSTEM_INFORMATION).get(self.NODE_ID), '001') self.cluster_id = self._conf_reader._get_value_with_default( self.SYSTEM_INFORMATION, self.CLUSTER_ID, '0') self.prev_nw_status = {} self.bmcNwStatus = None self.severity_reader = SeverityReader() self.prev_cable_cnxns = {} self._node_sensor = None self._login_actuator = None self.disk_sensor_data = None self.host_sensor_data = None self.if_sensor_data = None self.cpu_sensor_data = None self.raid_sensor_data = None self.sensor_type = None self._epoch_time = str(int(time.time())) self._raid_drives = [] self._raid_device = "N/A" self.os_sensor_type = { "disk_space": self.disk_sensor_data, "system": self.host_sensor_data, "nw": self.if_sensor_data, "cpu": self.cpu_sensor_data, "raid_data": self.raid_sensor_data } # Dir to maintain fault detected state for interface # in case of cable fault detection self.interface_fault_state = {} # UUID used in json msgs self._uuid = None # Dict of drives by device name from systemd self._drive_by_device_name = {} # Dict of drive path by-ids by serial number from systemd self._drive_byid_by_serial_number = {} self._import_products(product)