Пример #1
0
def encl_sensor_message_request(resource_type):
    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username": "******",
        "signature": "None",
        "time": "1576148751",
        "expires": 500,
        "message": {
            "sspl_ll_msg_header": {
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0",
                "msg_version": "1.0.0"
            },
            "sspl_ll_debug": {
                "debug_component": "sensor",
                "debug_enabled": True
            },
            "sensor_request_type": {
                "enclosure_alert": {
                    "info": {
                        "resource_type": resource_type
                    }
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(
        RabbitMQegressProcessor.name(), egressMsg)
def node_data_sensor_message_request(sensor_type):
    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username": "******",
        "signature": "None",
        "time": "2015-05-29 14:28:30.974749",
        "expires": 500,
        "message": {
            "sspl_ll_msg_header": {
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0",
                "msg_version": "1.0.0"
            },
            "sspl_ll_debug": {
                "debug_component": "sensor",
                "debug_enabled": True
            },
            "sensor_request_type": {
                "node_data": {
                    "sensor_type": sensor_type
                }
            }
        }
    }

    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(
        RabbitMQegressProcessor.name(), egressMsg)
def service_actuator_request(service_name, action):
    egressMsg = {
        "title": "SSPL-LL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username": "******",
        "expires": 3600,
        "signature": "None",
        "time": "2020-03-06 04:08:04.071170",
        "message": {
            "sspl_ll_debug": {
                "debug_component": "sensor",
                "debug_enabled": True
            },
            "sspl_ll_msg_header": {
                "msg_version": "1.0.0",
                "uuid": "9e6b8e53-10f7-4de0-a9aa-b7895bab7774",
                "schema_version": "1.0.0",
                "sspl_version": "2.0.0"
            },
            "request_path": {
                "site_id": "1",
                "rack_id": "1",
                "node_id": "1"
            },
            "response_dest": {},
            "actuator_request_type": {
                "service_controller": {
                    "service_request": action,
                    "service_name": service_name
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ\
                                    (RabbitMQegressProcessor.name(), egressMsg)
def fan_actuator_message_request(resource_type, resource_id):
    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username": "******",
        "signature": "None",
        "time": "2015-05-29 14:28:30.974749",
        "expires": 500,
        "message": {
            "sspl_ll_msg_header": {
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0",
                "msg_version": "1.0.0"
            },
            "sspl_ll_debug": {
                "debug_component": "sensor",
                "debug_enabled": True
            },
            "request_path": {
                "site_id": "1",
                "rack_id": "1",
                "cluster_id": "1",
                "node_id": "1"
            },
            "response_dest": {},
            "actuator_request_type": {
                "storage_enclosure": {
                    "enclosure_request": resource_type,
                    "resource": resource_id
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(
        RabbitMQegressProcessor.name(), egressMsg)
Пример #5
0
def disk_actuator_message_request(resource_type):
    egressMsg = {
        "username": "******",
        "expires": 3600,
        "description":
        "Seagate Storage Platform Library - Low Level - Actuator Request",
        "title": "SSPL-LL Actuator Request",
        "signature": "None",
        "time": "2018-07-31 04:08:04.071170",
        "message": {
            "sspl_ll_debug": {
                "debug_component": "sensor",
                "debug_enabled": True
            },
            "sspl_ll_msg_header": {
                "msg_version": "1.0.0",
                "uuid": "9e6b8e53-10f7-4de0-a9aa-b7895bab7774",
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0"
            },
            "actuator_request_type": {
                "node_controller": {
                    "node_request": resource_type,
                    "resource": "*"
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(
        RabbitMQegressProcessor.name(), egressMsg)
    def _generate_local_mount_data(self):
        """Create & transmit a local_mount_data message as defined
            by the sensor response json schema"""

        # Notify the node sensor to update its data required for the local_mount_data message
        successful = self._node_sensor.read_data("local_mount_data",
                                                 self._get_debug(),
                                                 self._units)
        if not successful:
            logger.error(
                "NodeDataMsgHandler, _generate_local_mount_data was NOT successful."
            )

        # Create the local mount data message and hand it over to the egress processor to transmit
        localMountDataMsg = LocalMountDataMsg(
            self._node_sensor.host_id, self._epoch_time,
            self._node_sensor.free_space, self._node_sensor.free_inodes,
            self._node_sensor.free_swap, self._node_sensor.total_space,
            self._node_sensor.total_swap, self._units)

        # Add in uuid if it was present in the json request
        if self._uuid is not None:
            localMountDataMsg.set_uuid(self._uuid)
        jsonMsg = localMountDataMsg.getJson()

        # Transmit it out over rabbitMQ channel
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
Пример #7
0
def _run_thread_capture_errors(curr_module, msgQlist, conf_reader, product):
    """Run the given thread and log any errors that happen on it.
    Will stop all sspl_modules if one of them fails."""
    try:
        # Each module is passed a reference list to message queues so it can transmit
        #  internal messages to other modules as desired
        curr_module.initialize(conf_reader, msgQlist, product)
        curr_module.start()

    except BaseException as ex:
        logger.critical(
            "SSPL-Tests encountered a fatal error, terminating service Error: %s"
            % ex)
        logger.exception(ex)

        # Populate an actuator response message and transmit back to HAlon
        error_msg = "SSPL-Tests encountered an error, terminating service Error: " + \
                    ", Exception: " + logger.exception(ex)
        jsonMsg = ThreadControllerMsg(curr_module, error_msg).getJson()
        curr_module._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                         jsonMsg)

        # Shut it down, error is non-recoverable
        for name, other_module in list(world.sspl_modules.items()):
            if other_module is not curr_module:
                other_module.shutdown()
    def _send_ifdata_json_msg(self,
                              sensor_type,
                              resource_id,
                              resource_type,
                              state,
                              severity,
                              event=""):
        """A resuable method for transmitting IFDataMsg to RMQ and IEM logging"""
        ifDataMsg = IFdataMsg(self._node_sensor.host_id,
                              self._node_sensor.local_time,
                              self._node_sensor.if_data, resource_id,
                              resource_type, self.site_id, self.node_id,
                              self.cluster_id, self.rack_id, state, severity,
                              event)
        # Add in uuid if it was present in the json request
        if self._uuid is not None:
            ifDataMsg.set_uuid(self._uuid)
        jsonMsg = ifDataMsg.getJson()
        self.if_sensor_data = jsonMsg
        self.os_sensor_type[sensor_type] = self.if_sensor_data

        # Send the event to logging msg handler to send IEM message to journald
        #internal_json_msg=json.dumps({
        #                        'actuator_request_type': {
        #                            'logging': {
        #                                'log_level': 'LOG_WARNING',
        #                                'log_type': 'IEM',
        #                                'log_msg': '{}'.format(jsonMsg)}}})
        #self._write_internal_msgQ(LoggingMsgHandler.name(), internal_json_msg)

        # Transmit it out over rabbitMQ channel
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
    def _generate_node_fru_data(self, jsonMsg):
        """Create & transmit a FRU IPMI data message as defined
            by the sensor response json schema"""

        if self._node_sensor.host_id is None:
            successful = self._node_sensor.read_data("None", self._get_debug(), self._units)
            if not successful:
                logger.error("NodeDataMsgHandler, updating host information was NOT successful.")

        if jsonMsg.get("sensor_request_type").get("node_data") is not None:
            self._fru_info = jsonMsg.get("sensor_request_type").get("node_data")
            node_ipmi_data_msg = NodeIPMIDataMsg(self._fru_info)

        if self._uuid is not None:
            node_ipmi_data_msg.set_uuid(self._uuid)
        jsonMsg = node_ipmi_data_msg.getJson()
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
    def _generate_psu_alert(self, json_msg, host_name, alert_type, alert_id,
                            severity, info, specific_info, sensor_type):
        """Parses the json message, also validates it and then send it to the
           RabbitMQ egress processor"""

        self._log_debug(f"RealStorEnclMsgHandler, _generate_psu_alert,\
            json_msg {json_msg}")

        real_stor_psu_data_msg = \
            RealStorPSUDataMsg(host_name, alert_type, alert_id, severity, info, specific_info)
        json_msg = real_stor_psu_data_msg.getJson()

        # Saves the json message in memory to serve sspl CLI sensor request
        self._psu_sensor_message = json_msg
        self._fru_type[sensor_type] = self._psu_sensor_message
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg,
                                  self._event)
    def _generate_enclosure_alert(self, json_msg, host_name, alert_type,
                                  alert_id, severity, info, specific_info,
                                  sensor_type):
        """Parses the json message, also validates it and then send it to the
            RabbitMQ egress processor"""

        self._log_debug(f"RealStorEnclMsgHandler, _generate_enclosure_alert,\
            json_msg {json_msg}")

        real_stor_encl_msg = RealStorEnclDataMsg(host_name, alert_type,
                                                 alert_id, severity, info,
                                                 specific_info)
        json_msg = real_stor_encl_msg.getJson()
        self._enclosure_message = json_msg
        self._fru_type[sensor_type] = self._enclosure_message
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg,
                                  self._event)
    def _execute_raid_request(self, node_request, actuator_instance, json_msg, uuid):
        """Performs a RAID request by calling perform_request method of a RAID
           actuator.
        """
        # Perform the RAID request on the node and get the response
        raid_response = actuator_instance.perform_request(json_msg).strip()
        self._log_debug(f"_process_msg, raid_response: {raid_response}")

        json_msg = AckResponseMsg(node_request, raid_response, uuid).getJson()
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

        # Restart openhpid to update HPI data only if it is a H/W environment
        if self.setup in [ "hw", "ssu" ]:
            self._log_debug("restarting openhpid service to update HPI data")
            if "assemble" in json_msg.get("actuator_request_type").get("node_controller").get("node_request").lower():
                internal_json_msg = json.dumps(
                                    {"actuator_request_type": {
                                    "service_controller": {
                                        "service_name" : "openhpid.service",
                                        "service_request": "restart"
                                    }}})
                self._write_internal_msgQ(ServiceMsgHandler.name(), internal_json_msg)
    def _process_msg(self, json_msg):
        """Parses the incoming message and generate the desired data message"""
        self._log_debug(
            f"RealStorEnclMsgHandler, _process_msg, json_msg: {json_msg}")

        if json_msg.get("sensor_request_type").get(
                "enclosure_alert") is not None:
            internal_sensor_request = json_msg.get("sensor_request_type").\
                                        get("enclosure_alert").get("status")
            if internal_sensor_request:
                resource_type = json_msg.get("sensor_request_type").\
                                get("enclosure_alert").get("info").get("resource_type")
                if ":" in resource_type:
                    sensor_type = resource_type.split(":")[2]
                else:
                    sensor_type = resource_type
                self._propagate_alert(json_msg, sensor_type)
            else:
                # serves the request coming from sspl CLI
                sensor_type = json_msg.get("sensor_request_type").\
                                get("enclosure_alert").get("info").\
                                    get("resource_type")
                if ":" in sensor_type:
                    sensor_type = sensor_type.split(":")[2]
                else:
                    sensor_type = sensor_type
                sensor_message_type = self._fru_type.get(sensor_type, "")

                # get the previously saved json message for the sensor type
                # and send the RabbitMQ Message
                if sensor_message_type:
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                              sensor_message_type, self._event)
                else:
                    self._log_debug(f"RealStorEnclMsgHandler, _process_msg, \
                        No past data found for {sensor_type} sensor type")
        else:
            logger.exception("RealStorEnclMsgHandler, _process_msg,\
                Not a valid sensor request format")
    def _process_msg(self, jsonMsg):
        """Parses the incoming message and handles appropriately"""
        self._log_debug(f"RealStorActuatorMsgHandler, _process_msg, jsonMsg: {jsonMsg}")

        if isinstance(jsonMsg, dict) is False:
            jsonMsg = json.loads(jsonMsg)

        # Parse out the uuid so that it can be sent back in Ack message
        uuid = None
        if jsonMsg.get("sspl_ll_msg_header").get("uuid") is not None:
            uuid = jsonMsg.get("sspl_ll_msg_header").get("uuid")
            self._log_debug(f"_processMsg, uuid: {uuid}")

        logger.debug(f"RealStorActuatorMsgHandler: _process_msg: jsonMsg: {jsonMsg}")
        if jsonMsg.get("actuator_request_type").get("storage_enclosure").get("enclosure_request") is not None:
            enclosure_request = jsonMsg.get("actuator_request_type").get("storage_enclosure").get("enclosure_request")
            self._log_debug(f"_processMsg, enclosure_request: {enclosure_request}")
            logger.debug(f"RealStorActuatorMsgHandler: _process_msg: INSIDE: jsonMsg: {jsonMsg}")

            # Parse out the request field in the enclosure_request
            (request, fru) = enclosure_request.split(":", 1)
            request = request.strip()
            fru = fru.strip()

            if self._real_stor_actuator is None:
                try:
                    from actuators.impl.generic.realstor_encl import RealStorActuator
                    self._real_stor_actuator = RealStorActuator()
                except ImportError as e:
                    logger.warn("RealStor Actuator not loaded")
                    return

            # Perform the request and get the response
            real_stor_response = self._real_stor_actuator.perform_request(jsonMsg)
            self._log_debug(f"_process_msg, RealStor response: {real_stor_response}")

            json_msg = RealStorActuatorMsg(real_stor_response, uuid).getJson()
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
    def _process_msg(self, jsonMsg):
        """Parses the incoming message and generate the desired data message"""
        self._log_debug("_process_msg, jsonMsg: %s" % jsonMsg)

        if isinstance(jsonMsg, dict) is False:
            jsonMsg = json.loads(jsonMsg)

        # Parse out the uuid so that it can be sent back in response message
        self._uuid = None
        if jsonMsg.get("sspl_ll_msg_header") is not None and \
           jsonMsg.get("sspl_ll_msg_header").get("uuid") is not None:
            self._uuid = jsonMsg.get("sspl_ll_msg_header").get("uuid")
            self._log_debug("_processMsg, uuid: %s" % self._uuid)

        if jsonMsg.get("sensor_request_type") is not None and \
           jsonMsg.get("sensor_request_type").get("node_data") is not None and \
           jsonMsg.get("sensor_request_type").get("node_data").get("sensor_type") is not None:
            self.sensor_type = jsonMsg.get("sensor_request_type").get(
                "node_data").get("sensor_type").split(":")[2]
            self._log_debug("_processMsg, sensor_type: %s" % self.sensor_type)

            if self.sensor_type == "system":
                self._generate_host_update()
                sensor_message_type = self.os_sensor_type.get(
                    self.sensor_type, "")
                if sensor_message_type:
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                              sensor_message_type)
                else:
                    self._log_debug(f"NodeDataMsgHandler, _process_msg, \
                        No past data found for {self.sensor_type} sensor type")

            elif self.sensor_type == "cpu":
                self._generate_cpu_data()
                sensor_message_type = self.os_sensor_type.get(
                    self.sensor_type, "")
                if sensor_message_type:
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                              sensor_message_type)
                else:
                    self._log_debug(f"NodeDataMsgHandler, _process_msg, \
                        No past data found for {self.sensor_type} sensor type")

            elif self.sensor_type == "nw":
                self._generate_if_data()
                sensor_message_type = self.os_sensor_type.get(
                    self.sensor_type, "")
                if sensor_message_type:
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                              sensor_message_type)
                else:
                    self._log_debug(f"NodeDataMsgHandler, _process_msg, \
                        No past data found for {self.sensor_type} sensor type")

            elif self.sensor_type == "disk_space":
                self._generate_disk_space_alert()
                sensor_message_type = self.os_sensor_type.get(
                    self.sensor_type, "")
                if sensor_message_type:
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                              sensor_message_type)
                else:
                    self._log_debug(f"NodeDataMsgHandler, _process_msg, \
                        No past data found for {self.sensor_type} sensor type")

            elif self.sensor_type == "raid_data":
                self._generate_raid_data(jsonMsg)
                sensor_message_type = self.os_sensor_type.get(
                    self.sensor_type, "")
                if sensor_message_type:
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                              sensor_message_type)
                else:
                    self._log_debug(
                        "NodeDataMsgHandler, _process_msg " +
                        f"No past data found for {self.sensor_type} sensor type"
                    )

            elif self.sensor_type == "raid_integrity":
                self._generate_raid_integrity_data(jsonMsg)
                sensor_message_type = self.os_sensor_type.get(
                    self.sensor_type, "")
                if sensor_message_type:
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                              sensor_message_type)
                else:
                    self._log_debug(
                        "NodeDataMsgHandler, _process_msg " +
                        f"No past data found for {self.sensor_type} sensor type"
                    )

        # Update mapping of device names to serial numbers for global use
        elif jsonMsg.get("sensor_response_type") is not None:
            if jsonMsg.get(
                    "sensor_response_type") == "devicename_serialnumber":
                self._update_devicename_sn_dict(jsonMsg)
        elif jsonMsg.get("sensor_request_type") is not None and \
            jsonMsg.get("sensor_request_type").get("node_data") is not None and \
            jsonMsg.get("sensor_request_type").get("node_data").get("info") is not None and \
            jsonMsg.get("sensor_request_type").get("node_data").get("info").get("resource_type") is not None:
            self._generate_node_fru_data(jsonMsg)
    def _process_msg(self, jsonMsg):
        """Parses the incoming message and handles appropriately"""
        self._log_debug(f"_process_msg, jsonMsg: {jsonMsg}")

        if isinstance(jsonMsg, dict) is False:
            jsonMsg = json.loads(jsonMsg)

        # Parse out the uuid so that it can be sent back in Ack message
        uuid = None
        if jsonMsg.get("sspl_ll_msg_header").get("uuid") is not None:
            uuid = jsonMsg.get("sspl_ll_msg_header").get("uuid")
            self._log_debug(f"_processMsg, uuid: {uuid}")

        if jsonMsg.get("actuator_request_type").get("node_controller").get("node_request") is not None:
            node_request = jsonMsg.get("actuator_request_type").get("node_controller").get("node_request")
            self._log_debug(f"_processMsg, node_request: {node_request}")

            # Parse out the component field in the node_request
            component = node_request[0:4]

            # Handle generic command line requests
            if component == 'SSPL':
                # Query the Zope GlobalSiteManager for an object implementing the MOTR actuator
                if self._command_line_actuator is None:
                    from actuators.Icommand_line import ICommandLine

                    command_line_actuator_class = self._queryUtility(ICommandLine)
                    # Instantiate CommandLine Actuator only if class is loaded
                    if command_line_actuator_class:
                        self._command_line_actuator = command_line_actuator_class(self._conf_reader)
                    else:
                        logger.warn("CommandLine Actuator not loaded")
                        json_msg = AckResponseMsg(node_request, NodeControllerMsgHandler.UNSUPPORTED_REQUEST, uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return

                # Perform the request and get the response
                command_line_response = self._command_line_actuator.perform_request(jsonMsg).strip()
                self._log_debug(f"_process_msg, command line response: {command_line_response}")

                json_msg = AckResponseMsg(node_request, command_line_response, uuid).getJson()
                self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

            # Handle LED effects using the HPI actuator
            elif component == "LED:":
                # HPI related operations are not supported in VM environment.
                if self._is_env_vm():
                    logger.warn("HPI operations are not supported in current environment")
                    return
                # Query the Zope GlobalSiteManager for an object implementing the IHPI actuator
                if self._HPI_actuator is None:
                    from actuators.Ihpi import IHPI
                    # Load HPIActuator class
                    HPI_actuator_class = self._queryUtility(IHPI)
                    # Instantiate HPIActuator only if class is loaded
                    if HPI_actuator_class:
                        self._HPI_actuator = HPI_actuator_class(self._conf_reader)
                    else:
                        logger.warn("HPIActuator not loaded")
                        if self._product.lower() in [x.lower() for x in enabled_products]:
                            json_msg = AckResponseMsg(node_request, NodeControllerMsgHandler.UNSUPPORTED_REQUEST, uuid).getJson()
                            self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return

                    self._log_debug(f"_process_msg, _HPI_actuator name: {self._HPI_actuator.name()}")

                    # Perform the request using HPI and get the response
                    hpi_response = self._HPI_actuator.perform_request(jsonMsg).strip()
                    self._log_debug(f"_process_msg, hpi_response: {hpi_response}")

                    json_msg = AckResponseMsg(node_request, hpi_response, uuid).getJson()
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

            # Set the Bezel LED color using the GEM interface
            elif component == "BEZE":
                # Query the Zope GlobalSiteManager for an object implementing the IGEM actuator
                if self._GEM_actuator is None:
                    self._GEM_actuator = self._queryUtility(IGEM)(self._conf_reader)
                    self._log_debug(f"_process_msg, _GEM_actuator name: {self._GEM_actuator.name()}")

                # Perform the request using GEM and get the response
                gem_response = self._GEM_actuator.perform_request(jsonMsg).strip()
                self._log_debug(f"_process_msg, gem_response: {gem_response}")

                json_msg = AckResponseMsg(node_request, gem_response, uuid).getJson()
                self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

            elif component == "PDU:":
                # Query the Zope GlobalSiteManager for an object implementing the IPDU actuator
                if self._PDU_actuator is None:
                    from actuators.Ipdu import IPDU

                    PDU_actuator_class = self._queryUtility(IPDU)
                    # Instantiate RaritanPDU Actuator only if class is loaded
                    if PDU_actuator_class:
                        self._PDU_actuator = PDU_actuator_class(self._conf_reader)
                    else:
                        logger.warn("RaritanPDU Actuator not loaded")
                        json_msg = AckResponseMsg(node_request, NodeControllerMsgHandler.UNSUPPORTED_REQUEST, uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return

                # Perform the request on the PDU and get the response
                pdu_response = self._PDU_actuator.perform_request(jsonMsg).strip()
                self._log_debug(f"_process_msg, pdu_response: {pdu_response}")

                json_msg = AckResponseMsg(node_request, pdu_response, uuid).getJson()
                self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

            elif component == "RAID":
                # If the state is INITIALIZED, We can assume that actuator is
                # ready to perform operation.
                if actuator_state_manager.is_initialized("RAIDactuator"):
                    self._log_debug(f"_process_msg, _RAID_actuator name: {self._RAID_actuator.name()}")
                    self._execute_raid_request(
                        node_request, self._RAID_actuator, jsonMsg, uuid)

                # If the state is INITIALIZING, need to send message
                elif actuator_state_manager.is_initializing("RAIDactuator"):
                    # This state will not be reached. Kept here for consistency.
                    logger.info("RAID actuator is initializing")
                    busy_json_msg = AckResponseMsg(
                        node_request, "BUSY", uuid, error_no=errno.EBUSY).getJson()
                    self._write_internal_msgQ(
                        "RabbitMQegressProcessor", busy_json_msg)

                elif actuator_state_manager.is_imported("RAIDactuator"):
                    # This case will be for first request only. Subsequent
                    # requests will go to INITIALIZED state case.
                    logger.info("RAID actuator is imported and initializing")

                    from actuators.Iraid import IRAIDactuator
                    actuator_state_manager.set_state(
                            "RAIDactuator", actuator_state_manager.INITIALIZING)
                    # Query the Zope GlobalSiteManager for an object implementing the IRAIDactuator
                    raid_actuator_class = self._queryUtility(IRAIDactuator)
                    if raid_actuator_class:
                        # NOTE: Instantiation part should not time consuming
                        # otherwise NodeControllerMsgHandler will get block
                        # and will not be able serve any subsequent requests.
                        # This applies to instantiation of evey actuator.
                        self._RAID_actuator = raid_actuator_class()
                        logger.info(f"_process_msg, _RAID_actuator name: {self._RAID_actuator.name()}")
                        self._execute_raid_request(
                            node_request, self._RAID_actuator, jsonMsg, uuid)
                        actuator_state_manager.set_state(
                            "RAIDactuator", actuator_state_manager.INITIALIZED)
                    else:
                        logger.warn("RAID actuator is not instantiated")

                # If there is no entry for actuator in table, We can assume
                # that it is not loaded for some reason.
                else:
                    logger.warn("RAID actuator is not loaded or not supported")

            elif component == "IPMI":
                # Query the Zope GlobalSiteManager for an object implementing the IPMI actuator
                if self._IPMI_actuator is None:
                    from actuators.Iipmi import Iipmi

                    IPMI_actuator_class = self._queryUtility(Iipmi)
                    # Instantiate IPMI Actuator only if class is loaded
                    if IPMI_actuator_class:
                        self._IPMI_actuator = IPMI_actuator_class(self._conf_reader)
                    else:
                        logger.warn("IPMI Actuator not loaded")
                        json_msg = AckResponseMsg(node_request, NodeControllerMsgHandler.UNSUPPORTED_REQUEST, uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return

                # Perform the IPMI request on the node and get the response
                ipmi_response = self._IPMI_actuator.perform_request(jsonMsg).strip()
                self._log_debug(f"_process_msg, ipmi_response: {ipmi_response}")

                json_msg = AckResponseMsg(node_request, ipmi_response, uuid).getJson()
                self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

            elif component == "STOP":
                # HPI related operations are not supported in VM environment.
                if self._is_env_vm():
                    logger.warn("HPI operations are not supported in current environment")
                    return
                # Query the Zope GlobalSiteManager for an object implementing the IHPI actuator
                if self._HPI_actuator is None:
                    from actuators.Ihpi import IHPI
                    # Load HPIActuator class
                    HPI_actuator_class = self._queryUtility(IHPI)
                    # Instantiate HPIActuator only if class is loaded
                    if HPI_actuator_class:
                        self._HPI_actuator = HPI_actuator_class(self._conf_reader)
                    else:
                        logger.warn("HPIActuator not loaded")
                        if self._product.lower() in [x.lower() for x in enabled_products]:
                            json_msg = AckResponseMsg(node_request, NodeControllerMsgHandler.UNSUPPORTED_REQUEST, uuid).getJson()
                            self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return

                    self._log_debug(f"_process_msg, _HPI_actuator name: {self._HPI_actuator.name()}")

                    # Parse out the drive to stop
                    drive_request = node_request[12:].strip()
                    self._log_debug(f"perform_request, drive to stop: {drive_request}")

                    # Append POWER_OFF to notify HPI actuator of desired state
                    jsonMsg["actuator_request_type"]["node_controller"]["node_request"] = \
                            f"DISK: set {drive_request} POWER_OFF"
                    self._log_debug(f"_process_msg, jsonMsg: {jsonMsg}")

                    # Perform the request using HPI and get the response
                    hpi_response = self._HPI_actuator.perform_request(jsonMsg).strip()
                    self._log_debug(f"_process_msg, hpi_response: {hpi_response}")

                    # Simplify success message as external apps don't care about details
                    if "Success" in hpi_response:
                        hpi_response = "Successful"

                    json_msg = AckResponseMsg(node_request, hpi_response, uuid).getJson()
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

            elif component == "STAR":
                # HPI related operations are not supported in VM environment.
                if self._is_env_vm():
                    logger.warn("HPI operations are not supported in current environment")
                    return
                # Query the Zope GlobalSiteManager for an object implementing the IHPI actuator
                if self._HPI_actuator is None:
                    from actuators.Ihpi import IHPI
                    # Load HPIActuator class
                    HPI_actuator_class = self._queryUtility(IHPI)
                    # Instantiate HPIActuator only if class is loaded
                    if HPI_actuator_class:
                        self._HPI_actuator = HPI_actuator_class(self._conf_reader)
                    else:
                        logger.warn("HPIActuator not loaded")
                        if self._product.lower() in [x.lower() for x in enabled_products]:
                            json_msg = AckResponseMsg(node_request, NodeControllerMsgHandler.UNSUPPORTED_REQUEST, uuid).getJson()
                            self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return

                    self._log_debug(f"_process_msg, _HPI_actuator name: {self._HPI_actuator.name()}")

                    # Parse out the drive to start
                    drive_request = node_request[13:].strip()
                    self._log_debug(f"perform_request, drive to start: {drive_request}")

                    # Append POWER_ON to notify HPI actuator of desired state
                    jsonMsg["actuator_request_type"]["node_controller"]["node_request"] = \
                            f"DISK: set {drive_request} POWER_ON"
                    self._log_debug(f"_process_msg, jsonMsg: {jsonMsg}")

                    # Perform the request using HPI and get the response
                    hpi_response = self._HPI_actuator.perform_request(jsonMsg).strip()
                    self._log_debug(f"_process_msg, hpi_response: {hpi_response}")

                    # Simplify success message as external apps don't care about details
                    if "Success" in hpi_response:
                        hpi_response = "Successful"

                    json_msg = AckResponseMsg(node_request, hpi_response, uuid).getJson()
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)


            elif component == "RESE":
                # HPI related operations are not supported in VM environment.
                if self._is_env_vm():
                    logger.warn("HPI operations are not supported in current environment")
                    return
                # Query the Zope GlobalSiteManager for an object implementing the IHPI actuator
                if self._HPI_actuator is None:
                    from actuators.Ihpi import IHPI
                    # Load HPIActuator class
                    HPI_actuator_class = self._queryUtility(IHPI)
                    # Instantiate HPIActuator only if class is loaded
                    if HPI_actuator_class:
                        self._HPI_actuator = HPI_actuator_class(self._conf_reader)
                    else:
                        logger.warn("HPIActuator not loaded")
                        if self._product.lower() in [x.lower() for x in enabled_products]:
                            json_msg = AckResponseMsg(node_request, NodeControllerMsgHandler.UNSUPPORTED_REQUEST, uuid).getJson()
                            self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return

                    self._log_debug(f"_process_msg, _HPI_actuator name: {self._HPI_actuator.name()}")

                    # Parse out the drive to power cycle
                    drive_request = node_request[13:].strip()
                    self._log_debug(f"perform_request, drive to power cycle: {drive_request}")

                    # Append POWER_OFF and then POWER_ON to notify HPI actuator of desired state
                    jsonMsg["actuator_request_type"]["node_controller"]["node_request"] = \
                            f"DISK: set {drive_request} POWER_OFF"
                    self._log_debug(f"_process_msg, jsonMsg: {jsonMsg}")

                    # Perform the request using HPI and get the response
                    hpi_response = self._HPI_actuator.perform_request(jsonMsg).strip()
                    self._log_debug(f"_process_msg, hpi_response: {hpi_response}")

                    # Check for success and power the disk back on
                    if "Success" in hpi_response:
                        # Append POWER_ON to notify HPI actuator of desired state
                        jsonMsg["actuator_request_type"]["node_controller"]["node_request"] = \
                                   f"DISK: set {drive_request} POWER_ON"
                        self._log_debug(f"_process_msg, jsonMsg: {jsonMsg}")

                        # Perform the request using HPI and get the response
                        hpi_response = self._HPI_actuator.perform_request(jsonMsg).strip()
                        self._log_debug(f"_process_msg, hpi_response: {hpi_response}")

                            # Simplify success message as external apps don't care about details
                        if "Success" in hpi_response:
                            hpi_response = "Successful"

                    json_msg = AckResponseMsg(node_request, hpi_response, uuid).getJson()
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

            elif component == "HDPA":
                # If the state is INITIALIZED, We can assume that actuator is
                # ready to perform operation.
                if actuator_state_manager.is_initialized("Hdparm"):
                    logger.info(f"_process_msg, Hdparm_actuator name: {self._hdparm_actuator.name()}")
                    # Perform the hdparm request on the node and get the response
                    hdparm_response = self._hdparm_actuator.perform_request(jsonMsg).strip()
                    self._log_debug(f"_process_msg, hdparm_response: {hdparm_response}")

                    json_msg = AckResponseMsg(node_request, hdparm_response, uuid).getJson()
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

                # If the state is INITIALIZING, need to send message
                elif actuator_state_manager.is_initializing("Hdparm"):
                    # This state will not be reached. Kept here for consistency.
                    logger.info("Hdparm actuator is initializing")
                    busy_json_msg = AckResponseMsg(
                        node_request, "BUSY", uuid, error_no=errno.EBUSY).getJson()
                    self._write_internal_msgQ(
                        "RabbitMQegressProcessor", busy_json_msg)

                elif actuator_state_manager.is_imported("Hdparm"):
                    # This case will be for first request only. Subsequent
                    # requests will go to INITIALIZED state case.
                    logger.info("Hdparm actuator is imported and initializing")
                    # Query the Zope GlobalSiteManager for an object
                    # implementing the hdparm actuator.
                    from actuators.Ihdparm import IHdparm
                    actuator_state_manager.set_state(
                            "Hdparm", actuator_state_manager.INITIALIZING)
                    hdparm_actuator_class = self._queryUtility(IHdparm)
                    if hdparm_actuator_class:
                        # NOTE: Instantiation part should not time consuming
                        # otherwise NodeControllerMsgHandler will get block and will
                        # not be able serve any subsequent requests. This applies
                        # to instantiation of evey actuator.
                        self._hdparm_actuator = hdparm_actuator_class()
                        self._log_debug(f"_process_msg, _hdparm_actuator name: {self._hdparm_actuator.name()}")
                        # Perform the hdparm request on the node and get the response
                        hdparm_response = self._hdparm_actuator.perform_request(jsonMsg).strip()
                        self._log_debug(f"_process_msg, hdparm_response: {hdparm_response}")

                        json_msg = AckResponseMsg(node_request, hdparm_response, uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        actuator_state_manager.set_state(
                            "Hdparm", actuator_state_manager.INITIALIZED)
                    else:
                        logger.info("Hdparm actuator is not instantiated")

                # If there is no entry for actuator in table, We can assume
                # that it is not loaded for some reason.
                else:
                    logger.info("Hdparm actuator is not loaded or not supported")

            elif component == "SMAR":
                # Parse out the drive request field in json msg
                node_request = jsonMsg.get("actuator_request_type").get("node_controller").get("node_request")
                drive_request = node_request[12:].strip()
                self._log_debug(f"perform_request, drive: {drive_request}")

                # If the drive field is an asterisk then send all the smart results for all drives available
                if drive_request == "*":
                    # Send the event to SystemdWatchdog to schedule SMART test
                    internal_json_msg = json.dumps(
                        {"sensor_request_type" : "disk_smart_test",
                         "serial_number" : "*",
                         "node_request" : self.host_id,
                         "uuid" : uuid
                         })

                    self._write_internal_msgQ("SystemdWatchdog", internal_json_msg)
                    return

                # Put together a message to get the serial number of the drive using hdparm tool
                if drive_request.startswith("/"):
                    serial_number, error = self._retrieve_serial_number(drive_request)

                    # Send error response back on ack channel
                    if error != "":
                        json_msg = AckResponseMsg(node_request, error, uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return
                else:
                    if self._smartctl_actuator is None:
                        from actuators.Ismartctl import ISmartctl
                        smartctl_actuator_class = self._queryUtility(ISmartctl)
                        if smartctl_actuator_class:
                            self._smartctl_actuator = self._queryUtility(ISmartctl)()
                            self._log_debug("_process_msg, _smart_actuator name: %s" % self._smartctl_actuator.name())
                        else:
                            logger.error(" No module Smartctl is present to load")
                    serial_compare = self._smartctl_actuator._check_serial_number(drive_request)
                    if not serial_compare:
                        json_msg = AckResponseMsg(node_request, "Drive Not Found", uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return
                    else:
                        serial_number = drive_request

                    # Send the event to SystemdWatchdog to schedule SMART test
                    internal_json_msg = json.dumps(
                        {"sensor_request_type" : "disk_smart_test",
                            "serial_number" : serial_number,
                            "node_request" : node_request,
                            "uuid" : uuid
                        })

                    self._write_internal_msgQ("SystemdWatchdog", internal_json_msg)

            elif component == "DRVM":
                # Requesting the current status from drivemanager
                # Parse out the drive request field in json msg
                node_request = jsonMsg.get("actuator_request_type").get("node_controller").get("node_request")
                drive_request = node_request[15:].strip()
                self._log_debug(f"perform_request, drive: {drive_request}")

                # If the drive field is an asterisk then send all the drivemanager results for all drives available
                if drive_request == "*":
                    # Send a message to the disk message handler to lookup the drivemanager status and send it out
                    internal_json_msg = json.dumps(
                        {"sensor_request_type" : "drvmngr_status",
                         "serial_number" : "*",
                         "node_request" : self.host_id,
                         "uuid" : uuid
                         })

                    # Send the event to disk message handler to generate json message
                    self._write_internal_msgQ(DiskMsgHandler.name(), internal_json_msg)
                    return

                # Put together a message to get the serial number of the drive using hdparm tool
                if drive_request.startswith("/"):
                    serial_number, error = self._retrieve_serial_number(drive_request)

                    # Send error response back on ack channel
                    if error != "":
                        json_msg = AckResponseMsg(node_request, error, uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return
                else:
                    serial_number = drive_request

                # Send a message to the disk message handler to lookup the smart status and send it out
                internal_json_msg = json.dumps(
                    {"sensor_request_type" : "drvmngr_status",
                     "serial_number" : serial_number,
                     "node_request" : node_request,
                     "uuid" : uuid
                    })

                # Send the event to disk message handler to generate json message
                self._write_internal_msgQ(DiskMsgHandler.name(), internal_json_msg)

            elif component == "HPI_":
                # Requesting the current status from HPI data
                # Parse out the drive request field in json msg
                if self._is_env_vm():
                    logger.warn("HPI operations are not supported in current environment")
                    return

                if self.setup == 'cortx':
                    logger.warn("HPIMonitor not loaded")
                    json_msg = AckResponseMsg(node_request, NodeControllerMsgHandler.UNSUPPORTED_REQUEST, uuid).getJson()
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                    return

                node_request = jsonMsg.get("actuator_request_type").get("node_controller").get("node_request")
                drive_request = node_request[11:].strip()
                self._log_debug(f"perform_request, drive: {drive_request}")

                # If the drive field is an asterisk then send all the hpi results for all drives available
                if drive_request == "*":
                    # Send a message to the disk message handler to lookup the hpi status and send it out
                    internal_json_msg = json.dumps(
                        {"sensor_request_type" : "hpi_status",
                         "serial_number" : "*",
                         "node_request" : self.host_id,
                         "uuid" : uuid
                         })

                    # Send the event to disk message handler to generate json message
                    self._write_internal_msgQ(DiskMsgHandler.name(), internal_json_msg)
                    return

                # Put together a message to get the serial number of the drive using hdparm tool
                if drive_request.startswith("/"):
                    serial_number, error = self._retrieve_serial_number(drive_request)

                    # Send error response back on ack channel
                    if error != "":
                        json_msg = AckResponseMsg(node_request, error, uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return
                else:
                    serial_number = drive_request

                # Send a message to the disk message handler to lookup the smart status and send it out
                internal_json_msg = json.dumps(
                    {"sensor_request_type" : "hpi_status",
                     "serial_number" : serial_number,
                     "node_request" : node_request,
                     "uuid" : uuid
                    })

                # Send the event to disk message handler to generate json message
                self._write_internal_msgQ(DiskMsgHandler.name(), internal_json_msg)

            elif component == "SIMU":
                # Requesting to simulate an event
                # Parse out the simulated request field
                node_request = jsonMsg.get("actuator_request_type").get("node_controller").get("node_request")
                sim_request = node_request[9:].strip().split(" ")
                self._log_debug(f"perform_request, sim_request: {str(sim_request)}")

                # Put together a message to get the serial number of the drive using hdparm tool
                if sim_request[1].startswith("/"):
                    serial_number, error = self._retrieve_serial_number(sim_request[1])

                    # Send error response back on ack channel
                    if error != "":
                        json_msg = AckResponseMsg(node_request, error, uuid).getJson()
                        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                        return
                else:
                    serial_number = sim_request[1]

                # SMART simulation requests are sent to SystemdWatchdog
                if sim_request[0] == "SMART_FAILURE":
                    logger.info(f"NodeControllerMsgHandler, simulating SMART_FAILURE on drive: {serial_number}")

                    internal_json_msg = json.dumps(
                        {"sensor_request_type" : "simulate_failure",
                         "serial_number" : serial_number,
                         "node_request" : sim_request[0],
                         "uuid" : uuid
                         })

                    # Send the event to SystemdWatchdog to handle it from here
                    self._write_internal_msgQ("SystemdWatchdog", internal_json_msg)

                else:
                    # Send a message to the disk message handler to handle simulation request
                    internal_json_msg = json.dumps(
                        {"sensor_request_type" : "sim_event",
                         "serial_number" : serial_number,
                         "node_request" : sim_request[0],
                         "uuid" : uuid
                         })

                    # Send the event to disk message handler to generate json message
                    self._write_internal_msgQ(DiskMsgHandler.name(), internal_json_msg)

            elif component == "NDHW":
                # NDHW Stands for Node HW.
                try:
                    # Load and Instantiate the Actuator for the first request
                    if self._NodeHW_actuator is None:
                        from actuators.impl.generic.node_hw import NodeHWactuator
                        from framework.utils.ipmi_client import IpmiFactory
                        self.ipmi_client_name = self._conf_reader._get_value_with_default(
                            self.NODE_HW_ACTUATOR, self.IPMI_IMPLEMENTOR,
                            "ipmitool")
                        ipmi_factory = IpmiFactory()
                        ipmi_client = \
                           ipmi_factory.get_implementor(self.ipmi_client_name)
                        # Instantiate NodeHWactuator only if class is loaded
                        if ipmi_client is not None:
                            self._NodeHW_actuator = NodeHWactuator(ipmi_client, self._conf_reader)
                            self._NodeHW_actuator.initialize()
                        else:
                            logger.error(f"IPMI client: '{self.ipmi_client_name}' doesn't exist")
                            return
                    node_request = jsonMsg.get("actuator_request_type")
                    # Perform the NodeHW request on the node and get the response
                    #TODO: Send message to Ack as well as Sensor in their respective channel.
                    node_hw_response = self._NodeHW_actuator.perform_request(node_request)
                    self._log_debug(f"_process_msg, node_hw_response: {node_hw_response}")
                    json_msg = NodeHwAckResponseMsg(node_request, node_hw_response, uuid).getJson()
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
                except ImportError as e:
                    logger.error(f"Modules could not be loaded: {e}")
                    return
                except Exception as e:
                    logger.error(f"NodeControllerMsgHandler, _process_msg, Exception in request handling: {e}")
                    return

            else:
                response = f"NodeControllerMsgHandler, _process_msg, unknown node controller msg: {node_request}"
                self._log_debug(response)

                json_msg = AckResponseMsg(node_request, response, uuid).getJson()
                self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
Пример #17
0
    def _send_msg(self, iem_components, log_timestamp):
        """Creates JSON message from iem components and sends to RabbitMQ
           channel.
        """
        # IEM format is IEC:DESCRIPTION
        # IEC format is SEVERITY|SOURCEID|COMPONENTID|MODULEID|EVENTID
        # Field lengths ----1---|---1----|------3----|----3---|---4---
        # Example IEM -> "IEC: BO1001000001:Error in connecting to controller"
        # Actual IEC doesn't contain separator between fields. It is shown
        # here just for readability. Each field has fixed length.
        severity, source_id, component_id, module_id, event_id, description = \
                                                        [iem_components[i] for i in range(6)]

        # Check if severity level is valid
        if severity not in self.SEVERITY_LEVELS:
            logger.warn(f"Invalid Severity level: {severity}")
            return

        # Check for valid source id
        if source_id not in self.SOURCE_IDS:
            logger.warn(f"Invalid Source ID level: {source_id}")
            return

        # Check for valid event time
        event_time = self._get_epoch_time_from_timestamp(log_timestamp)
        if not event_time:
            logger.error(
                "Timestamp is not in required format, discarding the message")
            return

        # Check for other components
        args = {
            "_comp_id": component_id,
            "_module_id": module_id,
            "_event_id": event_id
        }
        if not self._are_components_in_range(**args):
            return

        # Update severity and source_id
        alert_type = iem_severity_to_alert_mapping.get(severity)
        severity = iem_severity_types.get(severity, severity)
        source_id = iem_source_types.get(source_id, source_id)

        # Decode component_id, module_id and event_id
        component_id, module_id, event_id = self._decode_msg(
            f"{component_id}{module_id}{event_id}")

        info = {
            "site_id": self._site_id,
            "rack_id": self._rack_id,
            "node_id": self._node_id,
            "cluster_id": self._cluster_id,
            "source_id": source_id,
            "component_id": component_id,
            "module_id": module_id,
            "event_id": event_id,
            "severity": severity,
            "description": description,
            "alert_type": alert_type,
            "event_time": event_time,
            "IEC": "".join(iem_components[:-1])
        }
        iem_data_msg = IEMDataMsg(info)
        json_msg = iem_data_msg.getJson()
        # RAAL stands for - RAise ALert
        logger.info(f"RAAL: {json_msg}")
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
    def _generate_host_update(self):
        """Create & transmit a host update message as defined
            by the sensor response json schema"""

        # Notify the node sensor to update its data required for the host_update message
        successful = self._node_sensor.read_data("host_update",
                                                 self._get_debug(),
                                                 self._units)
        if not successful:
            logger.error(
                "NodeDataMsgHandler, _generate_host_update was NOT successful."
            )

        self._host_memory_usage_threshold = str(
            self._host_memory_usage_threshold)
        try:
            if self._host_memory_usage_threshold.isdigit():
                self._host_memory_usage_threshold = int(
                    self._host_memory_usage_threshold)
            else:
                self._host_memory_usage_threshold = float(
                    self._host_memory_usage_threshold)
        except ValueError:
            logger.warning(
                "Host Memory Alert, Invalid host_memory_usage_threshold value are entered in config."
            )
            # Assigning default value to _disk_usage_threshold
            self._host_memory_usage_threshold = self.DEFAULT_HOST_MEMORY_USAGE_THRESHOLD
        if self._node_sensor.total_memory[
                "percent"] >= self._host_memory_usage_threshold:
            # Create the disk space data message and hand it over to the egress processor to transmit
            if not self.host_fault:
                self.host_fault = True
                # Create the disk space data message and hand it over to the egress processor to transmit
                logger.warning("Host Memory usage increased to {}%, beyond configured threshold of {}%".\
                    format(self._node_sensor.total_memory["percent"], self._host_memory_usage_threshold))

                logged_in_users = []
                # Create the host update message and hand it over to the egress processor to transmit
                hostUpdateMsg = HostUpdateMsg(
                    self._node_sensor.host_id, self._epoch_time,
                    self._node_sensor.boot_time, self._node_sensor.up_time,
                    self._node_sensor.uname, self._units, self.site_id,
                    self.rack_id, self.node_id, self.cluster_id,
                    self._node_sensor.total_memory,
                    self._node_sensor.logged_in_users,
                    self._node_sensor.process_count,
                    self._node_sensor.running_process_count, self.FAULT)
                # Add in uuid if it was present in the json request
                if self._uuid is not None:
                    hostUpdateMsg.set_uuid(self._uuid)
                jsonMsg = hostUpdateMsg.getJson()
                # Transmit it out over rabbitMQ channel
                self.host_sensor_data = jsonMsg
                self.os_sensor_type["system"] = self.host_sensor_data
                self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                          jsonMsg)

        if (self._node_sensor.total_memory["percent"] <
                self._host_memory_usage_threshold) and (self.host_fault
                                                        == True):
            logger.warning("Host Memory usage decrease to {}%, lesser than configured threshold of {}%".\
                format(self._host_memory_usage_threshold, self._node_sensor.total_memory["percent"]))
            logged_in_users = []
            # Create the host update message and hand it over to the egress processor to transmit
            hostUpdateMsg = HostUpdateMsg(
                self._node_sensor.host_id, self._epoch_time,
                self._node_sensor.boot_time, self._node_sensor.up_time,
                self._node_sensor.uname, self._units, self.site_id,
                self.rack_id, self.node_id, self.cluster_id,
                self._node_sensor.total_memory,
                self._node_sensor.logged_in_users,
                self._node_sensor.process_count,
                self._node_sensor.running_process_count, self.FAULT_RESOLVED)

            # Add in uuid if it was present in the json request
            if self._uuid is not None:
                hostUpdateMsg.set_uuid(self._uuid)
            jsonMsg = hostUpdateMsg.getJson()
            # Transmit it out over rabbitMQ channel
            self.host_sensor_data = jsonMsg
            self.os_sensor_type["system"] = self.host_sensor_data
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
            self.host_fault = False
    def _generate_cpu_data(self):
        """Create & transmit a cpu_data message as defined
            by the sensor response json schema"""

        # Notify the node sensor to update its data required for the cpu_data message
        successful = self._node_sensor.read_data("cpu_data", self._get_debug())
        if not successful:
            logger.error(
                "NodeDataMsgHandler, _generate_cpu_data was NOT successful.")

        self._cpu_usage_threshold = str(self._cpu_usage_threshold)
        try:
            if self._cpu_usage_threshold.isdigit():
                self._cpu_usage_threshold = int(self._cpu_usage_threshold)
            else:
                self._cpu_usage_threshold = float(self._cpu_usage_threshold)
        except ValueError:
            logger.warning(
                "CPU Usage Alert, Invalid host_memory_usage_threshold value are entered in config."
            )
            # Assigning default value to _cpu_usage_threshold
            self._cpu_usage_threshold = self.DEFAULT_CPU_USAGE_THRESHOLD

        if self._node_sensor.cpu_usage >= self._cpu_usage_threshold:

            if not self.cpu_fault:
                self.cpu_fault = True
                # Create the cpu usage data message and hand it over to the egress processor to transmit
                logger.warning("CPU usage increased to {}%, beyond configured threshold of {}%".\
                    format(self._node_sensor.cpu_usage, self._cpu_usage_threshold))

                # Create the local mount data message and hand it over to the egress processor to transmit
                cpuDataMsg = CPUdataMsg(
                    self._node_sensor.host_id, self._epoch_time,
                    self._node_sensor.csps, self._node_sensor.idle_time,
                    self._node_sensor.interrupt_time,
                    self._node_sensor.iowait_time, self._node_sensor.nice_time,
                    self._node_sensor.softirq_time,
                    self._node_sensor.steal_time,
                    self._node_sensor.system_time, self._node_sensor.user_time,
                    self._node_sensor.cpu_core_data,
                    self._node_sensor.cpu_usage, self.site_id, self.rack_id,
                    self.node_id, self.cluster_id, self.FAULT)

                # Add in uuid if it was present in the json request
                if self._uuid is not None:
                    cpuDataMsg.set_uuid(self._uuid)
                jsonMsg = cpuDataMsg.getJson()
                self.cpu_sensor_data = jsonMsg
                self.os_sensor_type["cpu"] = self.cpu_sensor_data
                # Transmit it out over rabbitMQ channel
                self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                          jsonMsg)

        if (self._node_sensor.cpu_usage <=
                self._cpu_usage_threshold) and (self.cpu_fault == True):
            # Create the cpu usage data message and hand it over to the egress processor to transmit
            logger.warning("CPU usage decrised to {}%, lesser than configured threshold of {}%".\
                format(self._cpu_usage_threshold, self._node_sensor.cpu_usage))

            # Create the local mount data message and hand it over to the egress processor to transmit
            cpuDataMsg = CPUdataMsg(
                self._node_sensor.host_id, self._epoch_time,
                self._node_sensor.csps, self._node_sensor.idle_time,
                self._node_sensor.interrupt_time,
                self._node_sensor.iowait_time, self._node_sensor.nice_time,
                self._node_sensor.softirq_time, self._node_sensor.steal_time,
                self._node_sensor.system_time, self._node_sensor.user_time,
                self._node_sensor.cpu_core_data, self._node_sensor.cpu_usage,
                self.site_id, self.rack_id, self.node_id, self.cluster_id,
                self.FAULT_RESOLVED)

            # Add in uuid if it was present in the json request
            if self._uuid is not None:
                cpuDataMsg.set_uuid(self._uuid)
            jsonMsg = cpuDataMsg.getJson()
            self.cpu_sensor_data = jsonMsg
            self.os_sensor_type["cpu"] = self.cpu_sensor_data
            # Transmit it out over rabbitMQ channel
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
            self.cpu_fault = False
    def _generate_disk_space_alert(self):
        """Create & transmit a disk_space_alert message as defined
            by the sensor response json schema"""

        # Notify the node sensor to update its data required for the disk_space_data message
        successful = self._node_sensor.read_data("disk_space_alert",
                                                 self._get_debug(),
                                                 self._units)
        if not successful:
            logger.error(
                "NodeDataMsgHandler, _generate_disk_space_alert was NOT successful."
            )
            return

        # Changing disk_usage_threshold type according to what value type entered in config file
        self._disk_usage_threshold = str(self._disk_usage_threshold)
        try:
            if self._disk_usage_threshold.isdigit():
                self._disk_usage_threshold = int(self._disk_usage_threshold)
            else:
                self._disk_usage_threshold = float(self._disk_usage_threshold)
        except ValueError:
            logger.warning(
                "Disk Space Alert, Invalid disk_usage_threshold value are entered in config."
            )
            # Assigning default value to _disk_usage_threshold
            self._disk_usage_threshold = self.DEFAULT_DISK_USAGE_THRESHOLD

        if self._node_sensor.disk_used_percentage >= self._disk_usage_threshold:
            if not self.disk_fault:
                self.disk_fault = True
                # Create the disk space data message and hand it over to the egress processor to transmit
                logger.warning("Disk usage increased to {}%, beyond configured threshold of {}%".\
                    format(self._node_sensor.disk_used_percentage, self._disk_usage_threshold))
                diskSpaceAlertMsg = DiskSpaceAlertMsg(
                    self._node_sensor.host_id, self._epoch_time,
                    self._node_sensor.total_space,
                    self._node_sensor.free_space,
                    self._node_sensor.disk_used_percentage, self._units,
                    self.site_id, self.rack_id, self.node_id, self.cluster_id,
                    self.FAULT)

                # Add in uuid if it was present in the json request
                if self._uuid is not None:
                    diskSpaceAlertMsg.set_uuid(self._uuid)
                jsonMsg = diskSpaceAlertMsg.getJson()
                self.disk_sensor_data = jsonMsg
                self.os_sensor_type["disk_space"] = self.disk_sensor_data
                # Transmit it out over rabbitMQ channel
                self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                          jsonMsg)

        if (self._node_sensor.disk_used_percentage <=
                self._disk_usage_threshold) and (self.disk_fault == True):
            # Create the disk space data message and hand it over to the egress processor to transmit
            logger.warning("Disk usage decrised to {}%, lesser than threshold of {}%".\
                format(self._disk_usage_threshold, self._node_sensor.disk_used_percentage, ))
            diskSpaceAlertMsg = DiskSpaceAlertMsg(
                self._node_sensor.host_id, self._epoch_time,
                self._node_sensor.total_space, self._node_sensor.free_space,
                self._node_sensor.disk_used_percentage, self._units,
                self.site_id, self.rack_id, self.node_id, self.cluster_id,
                self.FAULT_RESOLVED)

            # Add in uuid if it was present in the json request
            if self._uuid is not None:
                diskSpaceAlertMsg.set_uuid(self._uuid)
            jsonMsg = diskSpaceAlertMsg.getJson()
            self.disk_sensor_data = jsonMsg
            self.os_sensor_type["disk_space"] = self.disk_sensor_data
            # Transmit it out over rabbitMQ channel
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
            self.disk_fault = False