def initialize(self, conf_reader, msgQlist, products): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(EgressAccumulatedMsgsProcessor, self).initialize(conf_reader) super(EgressAccumulatedMsgsProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() self._read_config() producer_initialized.wait() self.create_MsgProducer_obj()
def initialize(self, conf_reader, msgQlist, products): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(EgressAccumulatedMsgsProcessor, self).initialize(conf_reader) super(EgressAccumulatedMsgsProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() self._read_config() producer_initialized.wait() self._producer = MessageProducer(producer_id="acuumulated processor", message_type=self._message_type, method=self._method)
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(EgressProcessor, self).initialize(conf_reader) # Initialize internal message queues for this module super(EgressProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() # Flag denoting that a shutdown message has been placed # into our message queue from the main sspl_ll_d handler self._request_shutdown = False self._read_config() self.create_MsgProducer_obj() producer_initialized.set()
def initialize(self, conf_reader, msgQlist, products): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(RabbitMQEgressAccumulatedMsgsProcessor, self).initialize(conf_reader) super(RabbitMQEgressAccumulatedMsgsProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() self._read_config() self._connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._exchange_name, self._routing_key, self._queue_name)
def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(RabbitMQegressProcessor, self).initialize(conf_reader) # Initialize internal message queues for this module super(RabbitMQegressProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() # Flag denoting that a shutdown message has been placed # into our message queue from the main sspl_ll_d handler self._request_shutdown = False self._product = product # Configure RabbitMQ Exchange to transmit messages self._connection = None self._read_config() self._connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._exchange_name, self._routing_key, self._queue_name ) self._ack_connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._exchange_name, self._ack_routing_key, self._ack_queue_name ) self._iem_connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._iem_route_exchange_name, self._routing_key, self._queue_name ) # Display values used to configure pika from the config file self._log_debug("RabbitMQ user: %s" % self._username) self._log_debug("RabbitMQ exchange: %s, routing_key: %s, vhost: %s" % (self._exchange_name, self._routing_key, self._virtual_host))
class RabbitMQegressProcessor(ScheduledModuleThread, InternalMsgQ): """Handles outgoing messages via rabbitMQ over localhost""" MODULE_NAME = "RabbitMQegressProcessor" PRIORITY = 1 # Section and keys in configuration file RABBITMQPROCESSOR = MODULE_NAME.upper() SIGNATURE_USERNAME = '******' SIGNATURE_TOKEN = 'message_signature_token' SIGNATURE_EXPIRES = 'message_signature_expires' IEM_ROUTE_ADDR = 'iem_route_addr' PRODUCER_ID = 'producer_id' MESSAGE_TYPE = 'message_type' METHOD = 'method' @staticmethod def name(): """ @return: name of the module.""" return RabbitMQegressProcessor.MODULE_NAME def __init__(self): super(RabbitMQegressProcessor, self).__init__(self.MODULE_NAME, self.PRIORITY) def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(RabbitMQegressProcessor, self).initialize(conf_reader) # Initialize internal message queues for this module super(RabbitMQegressProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() # Flag denoting that a shutdown message has been placed # into our message queue from the main sspl_ll_d handler self._request_shutdown = False self._read_config() self._producer = MessageProducer(message_bus, producer_id=self._producer_id, message_type=self._message_type, method=self._method) producer_initialized.set() def run(self): """Run the module periodically on its own thread. """ self._log_debug("Start accepting requests") # self._set_debug(True) # self._set_debug_persist(True) try: # Loop thru all messages in queue until and transmit while not self._is_my_msgQ_empty(): self._jsonMsg, self._event = self._read_my_msgQ() if self._jsonMsg is not None: self._transmit_msg_on_exchange() except Exception: # Log it and restart the whole process when a failure occurs logger.error("RabbitMQegressProcessor restarting") self._log_debug("Finished processing successfully") # Shutdown is requested by the sspl_ll_d shutdown handler # placing a 'shutdown' msg into our queue which allows us to # finish processing any other queued up messages. if self._request_shutdown is True: self.shutdown() else: self._scheduler.enter(1, self._priority, self.run, ()) def _read_config(self): """Configure the RabbitMQ exchange with defaults available""" try: self._signature_user = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_USERNAME}", 'sspl-ll') self._signature_token = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_TOKEN}", 'FAKETOKEN1234') self._signature_expires = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_EXPIRES}", "3600") self._producer_id = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.PRODUCER_ID}", "sspl-sensor") self._message_type = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.MESSAGE_TYPE}", "alerts") self._method = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.METHOD}", "sync") except Exception as ex: logger.error("RabbitMQegressProcessor, _read_config: %r" % ex) def _add_signature(self): """Adds the authentication signature to the message""" self._log_debug("_add_signature, jsonMsg: %s" % self._jsonMsg) self._jsonMsg["username"] = self._signature_user self._jsonMsg["expires"] = int(self._signature_expires) self._jsonMsg["time"] = str(int(time.time())) if use_security_lib: authn_token_len = len(self._signature_token) + 1 session_length = int(self._signature_expires) token = ctypes.create_string_buffer( SSPL_SEC.sspl_get_token_length()) SSPL_SEC.sspl_generate_session_token(self._signature_user, authn_token_len, self._signature_token, session_length, token) # Generate the signature msg_len = len(self._jsonMsg) + 1 sig = ctypes.create_string_buffer(SSPL_SEC.sspl_get_sig_length()) SSPL_SEC.sspl_sign_message(msg_len, str(self._jsonMsg), self._signature_user, token, sig) self._jsonMsg["signature"] = str(sig.raw, encoding='utf-8') else: self._jsonMsg["signature"] = "SecurityLibNotInstalled" def _transmit_msg_on_exchange(self): """Transmit json message onto RabbitMQ exchange""" self._log_debug("_transmit_msg_on_exchange, jsonMsg: %s" % self._jsonMsg) try: # Check for shut down message from sspl_ll_d and set a flag to shutdown # once our message queue is empty if self._jsonMsg.get("message").get( "actuator_response_type") is not None and \ self._jsonMsg.get("message").get( "actuator_response_type").get( "thread_controller") is not None and \ self._jsonMsg.get("message").get( "actuator_response_type").get("thread_controller").get( "thread_response") == \ "SSPL-LL is shutting down": logger.info( "RabbitMQegressProcessor, _transmit_msg_on_exchange, received" "global shutdown message from sspl_ll_d") self._request_shutdown = True # Publish json message to the correct channel # NOTE: We need to route ThreadController messages to ACK channel. # We can't modify schema as it will affect other modules too. As a # temporary solution we have added a extra check to see if actuator_response_type # is "thread_controller". # TODO: Find a proper way to solve this issue. Avoid changing # core egress processor code if self._jsonMsg.get("message").get( "actuator_response_type") is not None and \ (self._jsonMsg.get("message").get( "actuator_response_type").get("ack") is not None or self._jsonMsg.get("message").get( "actuator_response_type").get( "thread_controller") is not None): self._add_signature() self._producer.send([json.dumps(self._jsonMsg)]) logger.debug( "_transmit_msg_on_exchange, Successfully Sent: %s" % self._jsonMsg) # Routing requests for IEM msgs sent from the LoggingMsgHandler elif self._jsonMsg.get("message").get("IEM_routing") is not None: log_msg = self._jsonMsg.get("message").get("IEM_routing").get( "log_msg") if self._iem_route_addr != "": self._producer.send([json.dumps(self._jsonMsg)]) else: logger.warn( "RabbitMQegressProcessor, Attempted to route IEM without a valid 'iem_route_addr' set." ) logger.debug( "_transmit_msg_on_exchange, Successfully Sent: %s" % log_msg) else: self._add_signature() jsonMsg = json.dumps(self._jsonMsg) try: if self.store_queue.is_empty(): self._producer.send([jsonMsg]) logger.info(f"Published Alert: {jsonMsg}") else: logger.info("'Accumulated msg queue' is not Empty." + " Adding the msg to the end of the queue") self.store_queue.put(jsonMsg) except MessageBusError as e: logger.error( f"RabbitMQegressProcessor, _transmit_msg_on_exchange, error {e} in producing message,\ adding message to consul {self._jsonMsg}") self.store_queue.put(jsonMsg) except Exception as err: logger.error( f'RabbitMQegressProcessor, _transmit_msg_on_exchange, Unknown error {err} while publishing the message, adding to persistent store {self._jsonMsg}' ) self.store_queue.put(jsonMsg) # If event is added by sensors, set it if self._event: self._event.set() except Exception as ex: logger.error( f'RabbitMQegressProcessor, _transmit_msg_on_exchange, problem while publishing the message:{ex}, adding message to consul: {self._jsonMsg}' ) def shutdown(self): """Clean up scheduler queue and gracefully shutdown thread""" super(RabbitMQegressProcessor, self).shutdown()
class RabbitMQegressProcessor(ScheduledModuleThread, InternalMsgQ): """Handles outgoing messages via rabbitMQ over localhost""" MODULE_NAME = "RabbitMQegressProcessor" PRIORITY = 1 # Section and keys in configuration file RABBITMQPROCESSOR = MODULE_NAME.upper() VIRT_HOST = 'virtual_host' PRIMARY_RABBITMQ_HOST = 'primary_rabbitmq_host' EXCHANGE_NAME = 'exchange_name' QUEUE_NAME = 'queue_name' ROUTING_KEY = 'routing_key' ACK_QUEUE_NAME = 'ack_queue_name' ACK_ROUTING_KEY = 'ack_routing_key' USER_NAME = 'username' PASSWORD = '******' SIGNATURE_USERNAME = '******' SIGNATURE_TOKEN = 'message_signature_token' SIGNATURE_EXPIRES = 'message_signature_expires' IEM_ROUTE_ADDR = 'iem_route_addr' IEM_ROUTE_EXCHANGE_NAME = 'iem_route_exchange_name' SYSTEM_INFORMATION_KEY = 'SYSTEM_INFORMATION' CLUSTER_ID_KEY = 'cluster_id' NODE_ID_KEY = 'node_id' @staticmethod def name(): """ @return: name of the module.""" return RabbitMQegressProcessor.MODULE_NAME def __init__(self): super(RabbitMQegressProcessor, self).__init__(self.MODULE_NAME, self.PRIORITY) def initialize(self, conf_reader, msgQlist, product): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(RabbitMQegressProcessor, self).initialize(conf_reader) # Initialize internal message queues for this module super(RabbitMQegressProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() # Flag denoting that a shutdown message has been placed # into our message queue from the main sspl_ll_d handler self._request_shutdown = False self._product = product # Configure RabbitMQ Exchange to transmit messages self._connection = None self._read_config() self._connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._exchange_name, self._routing_key, self._queue_name ) self._ack_connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._exchange_name, self._ack_routing_key, self._ack_queue_name ) self._iem_connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._iem_route_exchange_name, self._routing_key, self._queue_name ) # Display values used to configure pika from the config file self._log_debug("RabbitMQ user: %s" % self._username) self._log_debug("RabbitMQ exchange: %s, routing_key: %s, vhost: %s" % (self._exchange_name, self._routing_key, self._virtual_host)) def run(self): """Run the module periodically on its own thread. """ self._log_debug("Start accepting requests") #self._set_debug(True) #self._set_debug_persist(True) try: # Loop thru all messages in queue until and transmit while not self._is_my_msgQ_empty(): self._jsonMsg, self._event = self._read_my_msgQ() if self._jsonMsg is not None: self._transmit_msg_on_exchange() except Exception: # Log it and restart the whole process when a failure occurs logger.error("RabbitMQegressProcessor restarting") self._log_debug("Finished processing successfully") # Shutdown is requested by the sspl_ll_d shutdown handler # placing a 'shutdown' msg into our queue which allows us to # finish processing any other queued up messages. if self._request_shutdown is True: self.shutdown() else: self._scheduler.enter(1, self._priority, self.run, ()) def _read_config(self): """Configure the RabbitMQ exchange with defaults available""" try: self._virtual_host = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.VIRT_HOST}", 'SSPL') # Read common RabbitMQ configuration self._primary_rabbitmq_host = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.PRIMARY_RABBITMQ_HOST}", 'localhost') # Read RabbitMQ configuration for sensor messages self._queue_name = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.QUEUE_NAME}", 'sensor-queue') self._exchange_name = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.EXCHANGE_NAME}", 'sspl-out') self._routing_key = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.ROUTING_KEY}", 'sensor-key') # Read RabbitMQ configuration for Ack messages self._ack_queue_name = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.ACK_QUEUE_NAME}", 'sensor-queue') self._ack_routing_key = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.ACK_ROUTING_KEY}", 'sensor-key') self._username = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.USER_NAME}", 'sspluser') self._password = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.PASSWORD}",'') self._signature_user = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_USERNAME}", 'sspl-ll') self._signature_token = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_TOKEN}", 'FAKETOKEN1234') self._signature_expires = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_EXPIRES}", "3600") self._iem_route_addr = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.IEM_ROUTE_ADDR}",'') self._iem_route_exchange_name = Conf.get(SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.IEM_ROUTE_EXCHANGE_NAME}", 'sspl-in') cluster_id = Conf.get(GLOBAL_CONF, f"{CLUSTER}>{self.CLUSTER_ID_KEY}",'CC01') # Decrypt RabbitMQ Password decryption_key = encryptor.gen_key(cluster_id, ServiceTypes.RABBITMQ.value) self._password = encryptor.decrypt(decryption_key, self._password.encode('ascii'), "RabbitMQegressProcessor") if self._iem_route_addr != "": logger.info(" Routing IEMs to host: %s" % self._iem_route_addr) logger.info(" Using IEM exchange: %s" % self._iem_route_exchange_name) except Exception as ex: logger.error("RabbitMQegressProcessor, _read_config: %r" % ex) def _add_signature(self): """Adds the authentication signature to the message""" self._log_debug("_add_signature, jsonMsg: %s" % self._jsonMsg) self._jsonMsg["username"] = self._signature_user self._jsonMsg["expires"] = int(self._signature_expires) self._jsonMsg["time"] = str(int(time.time())) if use_security_lib: authn_token_len = len(self._signature_token) + 1 session_length = int(self._signature_expires) token = ctypes.create_string_buffer(SSPL_SEC.sspl_get_token_length()) SSPL_SEC.sspl_generate_session_token( self._signature_user, authn_token_len, self._signature_token, session_length, token) # Generate the signature msg_len = len(self._jsonMsg) + 1 sig = ctypes.create_string_buffer(SSPL_SEC.sspl_get_sig_length()) SSPL_SEC.sspl_sign_message(msg_len, str(self._jsonMsg), self._signature_user, token, sig) self._jsonMsg["signature"] = str(sig.raw, encoding='utf-8') else: self._jsonMsg["signature"] = "SecurityLibNotInstalled" def _transmit_msg_on_exchange(self): """Transmit json message onto RabbitMQ exchange""" self._log_debug("_transmit_msg_on_exchange, jsonMsg: %s" % self._jsonMsg) try: # Check for shut down message from sspl_ll_d and set a flag to shutdown # once our message queue is empty if self._jsonMsg.get("message").get("actuator_response_type") is not None and \ self._jsonMsg.get("message").get("actuator_response_type").get("thread_controller") is not None and \ self._jsonMsg.get("message").get("actuator_response_type").get("thread_controller").get("thread_response") == \ "SSPL-LL is shutting down": logger.info("RabbitMQegressProcessor, _transmit_msg_on_exchange, received" \ "global shutdown message from sspl_ll_d") self._request_shutdown = True msg_props = pika.BasicProperties() msg_props.content_type = "text/plain" # Publish json message to the correct channel # NOTE: We need to route ThreadController messages to ACK channel. # We can't modify schema as it will affect other modules too. As a # temporary solution we have added a extra check to see if actuator_response_type # is "thread_controller". # TODO: Find a proper way to solve this issue. Avoid changing # core egress processor code if self._jsonMsg.get("message").get("actuator_response_type") is not None and \ (self._jsonMsg.get("message").get("actuator_response_type").get("ack") is not None or \ self._jsonMsg.get("message").get("actuator_response_type").get("thread_controller") is not None): self._add_signature() jsonMsg = json.dumps(self._jsonMsg).encode('utf8') self._ack_connection.publish(exchange=self._exchange_name, routing_key=self._ack_routing_key, properties=msg_props, body=jsonMsg) logger.debug("_transmit_msg_on_exchange, Successfully Sent: %s" % jsonMsg) # Routing requests for IEM msgs sent from the LoggingMsgHandler elif self._jsonMsg.get("message").get("IEM_routing") is not None: log_msg = self._jsonMsg.get("message").get("IEM_routing").get("log_msg") if self._iem_route_addr != "": self._iem_connection.publish(exchange=self._iem_route_exchange_name, routing_key=self._routing_key, properties=msg_props, body=str(log_msg)) else: logger.warn("RabbitMQegressProcessor, Attempted to route IEM without a valid 'iem_route_addr' set.") logger.debug("_transmit_msg_on_exchange, Successfully Sent: %s" % log_msg) else: self._add_signature() jsonMsg = json.dumps(self._jsonMsg).encode('utf8') try: if self.store_queue.is_empty(): self._connection.publish(exchange=self._exchange_name, routing_key=self._routing_key, properties=msg_props, body=jsonMsg) logger.info(f"Published Alert: {jsonMsg}") else: self.store_queue.put(jsonMsg) logger.info("'Accumulated msg queue' is not Empty." +\ " Adding the msg to the end of the queue") except connection_exceptions: logger.error("RabbitMQegressProcessor, _transmit_msg_on_exchange, rabbitmq connectivity lost, adding message to consul %s" % self._jsonMsg) self.store_queue.put(jsonMsg) except Exception as err: logger.error(f'RabbitMQegressProcessor, _transmit_msg_on_exchange, Unknown error {err} while publishing the message, adding to persistent store {self._jsonMsg}') self.store_queue.put(jsonMsg) # If event is added by sensors, set it if self._event: self._event.set() except Exception as ex: logger.error(f'RabbitMQegressProcessor, _transmit_msg_on_exchange, problem while publishing the message:{ex}, adding message to consul: {self._jsonMsg}') def shutdown(self): """Clean up scheduler queue and gracefully shutdown thread""" super(RabbitMQegressProcessor, self).shutdown()
class Iem: # event_name will be added in this list in case of fault iem. # and before raising fault_resolved iems, # will check if event is present in this list or not. fault_iems = [] Severity = {"INFO": "I", "WARN": "W", "ERROR": "E", "CRITICAL": "C"} # EVENT_CODE = [event_code, event] EVENT_CODE = { "IPMITOOL_ERROR": ["0050010001", "ipmitool"], "IPMITOOL_AVAILABLE": ["0050010002", "ipmitool"], "HDPARM_ERROR": ["0050010003", "hdparm"], "HDPARM_AVAILABLE": ["0050010004", "hdparm"], "SMARTCTL_ERROR": ["0050010005", "smartctl"], "SMARTCTL_AVAILABLE": ["0050010006", "smartctl"], "UDISKS2_UNAVAILABLE": ["0050010007", "udisks2"], "UDISKS2_AVAILABLE": ["0050010008", "udisks2"], "KAFKA_NOT_ACTIVE": ["0050020001", "kafka"], "KAFKA_ACTIVE": ["0050020002", "kafka"] } # EVENT_STRING = { event_code : [description, impact, recommendation] } EVENT_STRING = { "0050010001": [ "ipmitool command execution error.", "Server resource monitoring through IPMI halted.", "Reinstall/reconfigure ipmitool package." ], "0050010002": [ "ipmitool command execution success again.", "Server resource monitoring through IPMI enabled again.", "" ], "0050010003": [ "hdparm command execution error.", "Server local drives monitoring through hdparm halted.", "Reinstall/reconfigure hdparm package." ], "0050010004": [ "hdparm command execution success again.", "Server local drives monitoring through hdparm enabled again.", "" ], "0050010005": [ "smartctl command execution error.", "Unable to fetch server drive SMART test results and related health info.", "Reinstall/reconfigure smartmonotools package." ], "0050010006": [ "smartctl command execution success again.", "Enabled again to fetch server drive SMART test results and related health info.", "" ], "0050010007": [ "udisks2 is not installed.", "Unable to fetch server drive info using systemd dbus interface.", "Reinstall/reconfigure udisks2 package." ], "0050010008": [ "udisks2 is available.", "Enabled again to fetch server drive info using systemd dbus interface.", "" ], "0050020001": [ "Kafka service is not in active state.", "Cortx health alerts may not be delivered to consumers like CSM.", "Reconfigure/start kafka service." ], "0050020002": [ "Kafka service is back in active state.", "Cortx health alerts will get delivered to consumers like CSM.", "" ] } iem_store_queue = StoreQueue() def check_existing_iem_event(self, event_name, event_code): """Before logging iem, check if is already present.""" previous_iem_event = None iem_exist = False if not os.path.exists(IEM_DATA_PATH): os.makedirs(IEM_DATA_PATH) iem_event_path = f'{IEM_DATA_PATH}/iem_{event_name}' if not os.path.exists(iem_event_path): with open(iem_event_path, 'w') as f: f.write(event_code) f.close() else: with open(iem_event_path, 'r') as f: previous_iem_event = f.read().strip() if previous_iem_event != event_code: with open(iem_event_path, 'w') as file: file.write(event_code) file.close() else: logger.info("%s - IEM already created." % event_code) iem_exist = True f.close() return iem_exist def check_fault_event(self, event_name, *events): """Before logging fault_resolved iem event, Check if fault iem event is present for that particular event.""" fault_iem = False iem_event_path = f'{IEM_DATA_PATH}/iem_{event_name}' if os.path.exists(iem_event_path): with open(iem_event_path, 'r') as f: previous_iem_event = f.read().strip() if previous_iem_event in events: fault_iem = True return fault_iem def create_iem_fields(self, event, severity, event_type=None): event_code = event[0] event_name = event[1] description = self.EVENT_STRING[event_code][0] if event_type == "fault_resolved" and event_name in self.fault_iems: iem_event_path = f'{IEM_DATA_PATH}/iem_{event_name}' if os.path.exists(iem_event_path): os.remove(iem_event_path) self.generate_iem(event_name, event_code, severity, description) else: previous_iem = self.check_existing_iem_event( event_name, event_code) if not previous_iem: self.generate_iem(event_name, event_code, severity, description) def iem_fault(self, event): event = self.EVENT_CODE[event] severity = self.Severity["ERROR"] self.create_iem_fields(event, severity) def iem_fault_resolved(self, fault_res_event): severity = self.Severity["INFO"] event = self.EVENT_CODE[fault_res_event] event_type = "fault_resolved" self.create_iem_fields(event, severity, event_type) def check_existing_fault_iems(self): """Incase of sspl restart or node reboot, Check if previous iems fault are present.""" fault_events = [ "IPMITOOL_ERROR", "HDPARM_ERROR", "UDISKS2_UNAVAILABLE", "SMARTCTL_ERROR", "KAFKA_NOT_ACTIVE" ] for event in fault_events: event_data = self.EVENT_CODE[event] event_name = event_data[1] prev_fault_iem_event = self.check_fault_event( event_name, event_data[0]) if prev_fault_iem_event: self.fault_iems.append(event_name) @staticmethod def generate_iem(module, event_code, severity, description): """Generate iem and send it to a MessgaeBroker.""" IEM_msg = json.dumps({ "iem": { "module": module, "event_code": event_code, "severity": severity, "description": description } }) try: if Iem.iem_store_queue.is_empty(): logger.info(f"Sending IEM alert for module:{module}" f" and event_code:{event_code}") Iem.raise_iem_event(module, event_code, severity, description) else: logger.info("'Accumulated iem queue' is not Empty." " Adding IEM to the end of the queue") Iem.iem_store_queue.put(IEM_msg) except (EventMessageError, Exception) as e: logger.error(f"Failed to send IEM alert. Error:{e}." f" Adding IEM in accumulated queue. {IEM_msg}") Iem.iem_store_queue.put(IEM_msg) @staticmethod def raise_iem_event(module, event_code, severity, description): """Send IEM message.""" # check if IEM Framework initialized, # if not, retry initializing the IEM Framework if os.path.exists(IEM_INIT_FAILED): with open(IEM_INIT_FAILED, 'r') as f: sspl_pid = f.read() if sspl_pid and psutil.pid_exists(int(sspl_pid)): EventMessage.init(component='sspl', source='S') logger.info("IEM framework initialization completed!!") os.remove(IEM_INIT_FAILED) EventMessage.send(module=module, event_id=event_code, severity=severity, message_blob=description)
class EgressAccumulatedMsgsProcessor(ScheduledModuleThread, InternalMsgQ): """Send any unsent message to message bus.""" SENSOR_NAME = "EgressAccumulatedMsgsProcessor" PRIORITY = 1 # TODO: read egress config from common place # Section and keys in configuration file # Section and keys in configuration file PROCESSOR = 'EgressProcessor' SIGNATURE_USERNAME = '******' SIGNATURE_TOKEN = 'message_signature_token' SIGNATURE_EXPIRES = 'message_signature_expires' IEM_ROUTE_ADDR = 'iem_route_addr' PRODUCER_ID = 'producer_id' MESSAGE_TYPE = 'message_type' METHOD = 'method' # 300 seconds for 5 mins MSG_TIMEOUT = 300 @staticmethod def name(): """@return: name of the monitoring module.""" return EgressAccumulatedMsgsProcessor.SENSOR_NAME def __init__(self): super(EgressAccumulatedMsgsProcessor, self).__init__(self.SENSOR_NAME, self.PRIORITY) def initialize(self, conf_reader, msgQlist, products): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(EgressAccumulatedMsgsProcessor, self).initialize(conf_reader) super(EgressAccumulatedMsgsProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() self._read_config() producer_initialized.wait() self._producer = MessageProducer(producer_id="acuumulated processor", message_type=self._message_type, method=self._method) def read_data(self): """This method is part of interface. Currently it is not in use. """ return {} def run(self): """Run the sensor on its own thread""" logger.debug("Consul accumulated messages processing started") if not self._is_my_msgQ_empty(): # Check for shut down message from sspl_ll_d and set a flag to shutdown # once our message queue is empty self._jsonMsg, _ = self._read_my_msgQ() if self._jsonMsg.get("message").get( "actuator_response_type") is not None and \ self._jsonMsg.get("message").get( "actuator_response_type").get( "thread_controller") is not None and \ self._jsonMsg.get("message").get( "actuator_response_type").get("thread_controller").get( "thread_response") == \ "SSPL-LL is shutting down": logger.info("EgressAccumulatedMsgsProcessor, run, received" "global shutdown message from sspl_ll_d") self.shutdown() try: # TODO : Fix accumulated message processor when message bus changes are available to # error out in case of failure (EOS-17626) if not self.store_queue.is_empty(): logger.debug( "Found accumulated messages, trying to send again") while not self.store_queue.is_empty(): message = self.store_queue.get() dict_msg = json.loads(message) if "actuator_response_type" in dict_msg["message"]: event_time = dict_msg["message"] \ ["actuator_response_type"]["info"]["event_time"] time_diff = int(time.time()) - int(event_time) if time_diff > self.MSG_TIMEOUT: continue if "sensor_response_type" in dict_msg["message"]: logger.info(f"Publishing Accumulated Alert: {message}") self._producer.send([message]) except MessageBusError as e: logger.error("EgressAccumulatedMsgsProcessor, run, %r" % e) except Exception as e: logger.error(e) finally: logger.debug("Consul accumulated processing ended") self._scheduler.enter(30, self._priority, self.run, ()) def _read_config(self): """Read config for messaging bus.""" try: self._signature_user = Conf.get( SSPL_CONF, f"{self.PROCESSOR}>{self.SIGNATURE_USERNAME}", 'sspl-ll') self._signature_token = Conf.get( SSPL_CONF, f"{self.PROCESSOR}>{self.SIGNATURE_TOKEN}", 'FAKETOKEN1234') self._signature_expires = Conf.get( SSPL_CONF, f"{self.PROCESSOR}>{self.SIGNATURE_EXPIRES}", "3600") self._producer_id = Conf.get( SSPL_CONF, f"{self.PROCESSOR}>{self.PRODUCER_ID}", "sspl-sensor") self._message_type = Conf.get( SSPL_CONF, f"{self.PROCESSOR}>{self.MESSAGE_TYPE}", "alerts") self._method = Conf.get(SSPL_CONF, f"{self.PROCESSOR}>{self.METHOD}", "sync") except Exception as ex: logger.error("EgressProcessor, _read_config: %r" % ex) def shutdown(self): """Clean up scheduler queue and gracefully shutdown thread""" super(EgressAccumulatedMsgsProcessor, self).shutdown() self._connection.cleanup()
class RabbitMQEgressAccumulatedMsgsProcessor(ScheduledModuleThread, InternalMsgQ): """Send any unsent message to rabbitmq""" SENSOR_NAME = "RabbitMQEgressAccumulatedMsgsProcessor" PRIORITY = 1 #TODO: read egress config from comman place # Section and keys in configuration file RABBITMQPROCESSOR = 'RABBITMQEGRESSPROCESSOR' VIRT_HOST = 'virtual_host' PRIMARY_RABBITMQ_HOST = 'primary_rabbitmq_host' EXCHANGE_NAME = 'exchange_name' QUEUE_NAME = 'queue_name' ROUTING_KEY = 'routing_key' ACK_QUEUE_NAME = 'ack_queue_name' ACK_ROUTING_KEY = 'ack_routing_key' USER_NAME = 'username' PASSWORD = '******' SIGNATURE_USERNAME = '******' SIGNATURE_TOKEN = 'message_signature_token' SIGNATURE_EXPIRES = 'message_signature_expires' IEM_ROUTE_ADDR = 'iem_route_addr' IEM_ROUTE_EXCHANGE_NAME = 'iem_route_exchange_name' SYSTEM_INFORMATION_KEY = 'SYSTEM_INFORMATION' CLUSTER_ID_KEY = 'cluster_id' NODE_ID_KEY = 'node_id' # 300 seconds for 5 mins MSG_TIMEOUT = 300 @staticmethod def name(): """@return: name of the monitoring module.""" return RabbitMQEgressAccumulatedMsgsProcessor.SENSOR_NAME def __init__(self): super(RabbitMQEgressAccumulatedMsgsProcessor, self).__init__(self.SENSOR_NAME, self.PRIORITY) def initialize(self, conf_reader, msgQlist, products): """initialize configuration reader and internal msg queues""" # Initialize ScheduledMonitorThread super(RabbitMQEgressAccumulatedMsgsProcessor, self).initialize(conf_reader) super(RabbitMQEgressAccumulatedMsgsProcessor, self).initialize_msgQ(msgQlist) self.store_queue = StoreQueue() self._read_config() self._connection = RabbitMQSafeConnection( self._username, self._password, self._virtual_host, self._exchange_name, self._routing_key, self._queue_name) def read_data(self): """This method is part of interface. Currently it is not in use. """ return {} def run(self): """Run the sensor on its own thread""" logger.debug("Consul accumulated messages processing started") if not self._is_my_msgQ_empty(): # Check for shut down message from sspl_ll_d and set a flag to shutdown # once our message queue is empty self._jsonMsg, _ = self._read_my_msgQ() if self._jsonMsg.get("message").get("actuator_response_type") is not None and \ self._jsonMsg.get("message").get("actuator_response_type").get("thread_controller") is not None and \ self._jsonMsg.get("message").get("actuator_response_type").get("thread_controller").get("thread_response") == \ "SSPL-LL is shutting down": logger.info("RabbitMQEgressAccumulatedMsgsProcessor, run, received" \ "global shutdown message from sspl_ll_d") self.shutdown() try: if not self.store_queue.is_empty(): logger.debug( "Found accumulated messages, trying to send again") self._connection._establish_connection() msg_props = pika.BasicProperties() msg_props.content_type = "text/plain" while not self.store_queue.is_empty(): message = self.store_queue.get() dict_msg = json.loads(message) if "actuator_response_type" in dict_msg["message"]: event_time = dict_msg["message"][ "actuator_response_type"]["info"]["event_time"] time_diff = int(time.time()) - int(event_time) if time_diff > self.MSG_TIMEOUT: continue self._connection.publish(exchange=self._exchange_name, routing_key=self._routing_key, properties=msg_props, body=message) if "sensor_response_type" in dict_msg["message"]: logger.info(f"Publishing Accumulated Alert: {message}") self._connection.cleanup() except connection_exceptions as e: logger.error(connection_error_msg.format(e)) except Exception as e: logger.error(e) finally: logger.debug("Consul accumulated processing ended") self._scheduler.enter(30, self._priority, self.run, ()) def _read_config(self): """Configure the RabbitMQ exchange with defaults available""" try: self._virtual_host = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.VIRT_HOST}", 'SSPL') # Read common RabbitMQ configuration self._primary_rabbitmq_host = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.PRIMARY_RABBITMQ_HOST}", 'localhost') # Read RabbitMQ configuration for sensor messages self._queue_name = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.QUEUE_NAME}", 'sensor-queue') self._exchange_name = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.EXCHANGE_NAME}", 'sspl-out') self._routing_key = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.ROUTING_KEY}", 'sensor-key') # Read RabbitMQ configuration for Ack messages self._ack_queue_name = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.ACK_QUEUE_NAME}", 'sensor-queue') self._ack_routing_key = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.ACK_ROUTING_KEY}", 'sensor-key') self._username = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.USER_NAME}", 'sspluser') self._password = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.PASSWORD}", '') self._signature_user = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_USERNAME}", 'sspl-ll') self._signature_token = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_TOKEN}", 'FAKETOKEN1234') self._signature_expires = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.SIGNATURE_EXPIRES}", "3600") self._iem_route_addr = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.IEM_ROUTE_ADDR}", '') self._iem_route_exchange_name = Conf.get( SSPL_CONF, f"{self.RABBITMQPROCESSOR}>{self.IEM_ROUTE_EXCHANGE_NAME}", 'sspl-in') cluster_id = Conf.get(GLOBAL_CONF, f"{CLUSTER}>{self.CLUSTER_ID_KEY}", 'CC01') # Decrypt RabbitMQ Password decryption_key = encryptor.gen_key(cluster_id, ServiceTypes.RABBITMQ.value) self._password = encryptor.decrypt( decryption_key, self._password.encode('ascii'), "RabbitMQEgressAccumulatedMsgsProcessor") if self._iem_route_addr != "": logger.info(" Routing IEMs to host: %s" % self._iem_route_addr) logger.info(" Using IEM exchange: %s" % self._iem_route_exchange_name) except Exception as ex: logger.error("RabbitMQegressProcessor, _read_config: %r" % ex) def shutdown(self): """Clean up scheduler queue and gracefully shutdown thread""" super(RabbitMQEgressAccumulatedMsgsProcessor, self).shutdown() self._connection.cleanup()