def when_i_send_the_enclosure_sas_port_message_to_request_the_current_actuator_type_data(step, resource_type):
    egressMsg = {
      "username": "******",
      "description": "Seagate Storage Platform Library - Low Level - Actuator Request",
      "title": "SSPL-LL Actuator Request",
      "expires": 3600,
      "signature": "None",
      "time": "2019-11-21 08:37:27.144640",
      "message": {
        "sspl_ll_debug": {
          "debug_component": "sensor",
          "debug_enabled": True
        },
        "response_dest": {},
        "sspl_ll_msg_header": {
          "msg_version": "1.0.0",
          "uuid": "2ba55744-8218-40c2-8c2c-ea7bddf79c09",
          "schema_version": "1.0.0",
          "sspl_version": "1.0.0"
        },
        "actuator_request_type": {
          "storage_enclosure": {
            "enclosure_request": "ENCL: enclosure:interface:sas",
            "resource": "Expansion Port"
          }
        }
      }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(RabbitMQegressProcessor.name(), egressMsg)
Exemplo n.º 2
0
def then_i_get_the_stop_successful_json_response_message(step):
    """I get the JSON response msg with 'thread_response': 'Stop Successful' key value"""
    time.sleep(2)
    module_name = None
    #import pdb
    #pdb.set_trace()
    while not world.sspl_modules[RabbitMQegressProcessor.name()]._is_my_msgQ_empty():
        ingressMsg = world.sspl_modules[RabbitMQegressProcessor.name()]._read_my_msgQ()
        time.sleep(10)
        print("Received: %s" % ingressMsg)

        try:

            # Verify module name and thread response
            module_name = ingressMsg.get("actuator_response_type").get("thread_controller").get("module_name")
            time.sleep(5)
            #name = module_name["RAIDsensor"]
            #print("module_name: %s" % name)

            thread_response = ingressMsg.get("actuator_response_type").get("thread_controller").get("thread_response")
            #response = module_name["Stop Successful"]
            time.sleep(4)
            #print("thread_response: %s" % response)
            break
        except Exception as exception:
            time.sleep(2)
            print(exception)

    assert(module_name is not None)
    assert(thread_response is not None)
Exemplo n.º 3
0
def given_i_send_in_the_actuator_message_to_restart_raid_sensor(step):
    # Clear the message queue buffer out
    while not world.sspl_modules[RabbitMQingressProcessorTests.name()]._is_my_msgQ_empty():
        world.sspl_modules[RabbitMQingressProcessorTests.name()]._read_my_msgQ()

    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username" : "JohnDoe",
        "signature" : "None",
        "time" : "2015-05-29 14:28:30.974749",
        "expires" : 500,

        "message" : {
            "sspl_ll_msg_header": {
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0",
                "msg_version": "1.0.0"
            },
            "actuator_request_type": {
                "thread_controller": {
                    "module_name" : "RAIDsensor",
                    "thread_request": "restart"
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(RabbitMQegressProcessor.name(), egressMsg)
Exemplo n.º 4
0
def when_i_send_in_the_actuator_message_to_action_the_service(
        step, action, service):
    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username": "******",
        "signature": "None",
        "time": "2015-05-29 14:28:30.974749",
        "expires": 500,
        "message": {
            "sspl_ll_msg_header": {
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0",
                "msg_version": "1.0.0"
            },
            "actuator_request_type": {
                "service_controller": {
                    "service_name": service,
                    "service_request": action
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(
        RabbitMQegressProcessor.name(), egressMsg)
def when_i_send_in_the_controller_sensor_message_to_request_the_current_sensor_type_data(
        step, resource_type):
    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username": "******",
        "signature": "None",
        "time": "2015-05-29 14:28:30.974749",
        "expires": 500,
        "message": {
            "sspl_ll_msg_header": {
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0",
                "msg_version": "1.0.0"
            },
            "sspl_ll_debug": {
                "debug_component": "sensor",
                "debug_enabled": True
            },
            "sensor_request_type": {
                "enclosure_alert": {
                    "info": {
                        "resource_type": resource_type
                    }
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(
        RabbitMQegressProcessor.name(), egressMsg)
Exemplo n.º 6
0
def given_i_request_to_start_raid_sensor_and_then_i_request_a_thread_status(step):
    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username" : "JohnDoe",
        "signature" : "None",
        "time" : "2015-05-29 14:28:30.974749",
        "expires" : 500,

        "message" : {
            "sspl_ll_msg_header": {
                 "schema_version": "1.0.0",
                 "sspl_version": "1.0.0",
                 "msg_version": "1.0.0"
            },
            "actuator_request_type": {
                "thread_controller": {
                     "module_name" : "RAIDsensor",
                     "thread_request": "start"
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(RabbitMQegressProcessor.name(), egressMsg)

    # Request the status for the stopped thread
    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",
        "username" : "JohnDoe",
        "signature" : "None",
        "time" : "2015-05-29 14:28:30.974749",
        "expires" : 500,

        "message" : {
            "sspl_ll_msg_header": {
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0",
                "msg_version": "1.0.0"
            },
            "actuator_request_type": {
                "thread_controller": {
                    "module_name" : "RAIDsensor",
                    "thread_request": "status"
                }
            }
        }
    }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(RabbitMQegressProcessor.name(), egressMsg)
    def _execute_raid_request(self, node_request, actuator_instance, json_msg,
                              uuid):
        """Performs a RAID request by calling perform_request method of a RAID
           actuator.
        """
        # Perform the RAID request on the node and get the response
        raid_response = actuator_instance.perform_request(json_msg).strip()
        self._log_debug(f"_process_msg, raid_response: {raid_response}")

        json_msg = AckResponseMsg(node_request, raid_response, uuid).getJson()
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)

        # Restart openhpid to update HPI data only if it is a H/W environment
        if self.setup in ["hw", "ssu"]:
            self._log_debug("restarting openhpid service to update HPI data")
            if "assemble" in json_msg.get("actuator_request_type").get(
                    "node_controller").get("node_request").lower():
                internal_json_msg = json.dumps({
                    "actuator_request_type": {
                        "service_controller": {
                            "service_name": "openhpid.service",
                            "service_request": "restart"
                        }
                    }
                })
                self._write_internal_msgQ(ServiceMsgHandler.name(),
                                          internal_json_msg)
Exemplo n.º 8
0
    def _send_to_msg_handler(self, msgType, message, uuid):
        # Hand off to appropriate actuator message handler
        if msgType.get("logging") is not None:
            self._write_internal_msgQ("LoggingMsgHandler", message)

        elif msgType.get("thread_controller") is not None:
            self._write_internal_msgQ("ThreadController", message)

        elif msgType.get("service_controller") is not None:
            self._write_internal_msgQ("ServiceMsgHandler", message)

        elif msgType.get("node_controller") is not None:
            self._write_internal_msgQ("NodeControllerMsgHandler", message)

        elif msgType.get("storage_enclosure") is not None:
            self._write_internal_msgQ("RealStorActuatorMsgHandler", message)

        # Hand off to appropriate sensor message handler
        elif msgType.get("node_data") is not None:
            self._write_internal_msgQ("NodeDataMsgHandler", message)

        elif msgType.get("enclosure_alert") is not None:
            self._write_internal_msgQ("RealStorEnclMsgHandler", message)

        elif msgType.get("storage_enclosure") is not None:
            self._write_internal_msgQ("RealStorActuatorMsgHandler", message)
        # ... handle other incoming messages that have been validated
        else:
            # Send ack about not finding a msg handler
            ack_msg = AckResponseMsg("Error Processing Message",
                                     "Message Handler Not Found",
                                     uuid).getJson()
            self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                      ack_msg)
Exemplo n.º 9
0
def _run_thread_capture_errors(curr_module, sspl_modules, msgQlist,
                               conf_reader, product):
    """Run the given thread and log any errors that happen on it.
    Will stop all sspl_modules if one of them fails."""
    try:
        # Each module is passed a reference list to message queues so it can transmit
        #  internal messages to other modules as desired
        curr_module.start_thread(conf_reader, msgQlist, product)

    except BaseException as ex:
        logger.critical(
            "SSPL-LL encountered a fatal error, terminating service Error: %s"
            % ex)
        logger.exception(ex)

        # Populate an actuator response message and transmit back to HAlon
        error_msg = "SSPL-LL encountered an error, terminating service Error: " + \
                    ", Exception: " + logger.exception(ex)
        json_msg = ThreadControllerMsg(curr_module.name(), error_msg).getJson()

        if product.lower() in [x.lower() for x in enabled_products]:
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
        elif product.lower() in [x.lower() for x in cs_legacy_products]:
            self._write_internal_msgQ(PlaneCntrlRMQegressProcessor.name(),
                                      json_msg)

        # Shut it down, error is non-recoverable
        for name, other_module in list(sspl_modules.items()):
            if other_module is not curr_module:
                other_module.shutdown()
Exemplo n.º 10
0
    def run(self):
        """Run the module periodically on its own thread."""
        if (self._product.lower() in [x.lower() for x in enabled_products]) and \
           not self._threads_initialized:
            if self._product.lower() in [x.lower() for x in cs_products]:
                # Wait for the dcs-collector to populate the /tmp/dcs/hpi directory
                while not os.path.isdir(self._hpi_base_dir):
                    logger.info("ThreadController, dir not found: %s " %
                                self._hpi_base_dir)
                    logger.info("ThreadController, rechecking in %s secs" %
                                self._start_delay)
                    time.sleep(int(self._start_delay))

            logger.debug("ThreadController._sspl_modules is {}".format(
                self._sspl_modules))
            # Allow other threads to initialize
            continue_waiting = False
            for (n, m) in self._sspl_modules.items():
                if not isinstance(m, SensorThread):
                    continue
                thread_init_status = m.get_thread_init_status()
                logger.debug("Thread status for {} is {}".format(
                    m.__class__, thread_init_status))
                if thread_init_status == SensorThreadState.FAILED:
                    m.shutdown()
                elif thread_init_status == SensorThreadState.WAITING:
                    continue_waiting = True

            if continue_waiting:
                self._scheduler.enter(10, self._priority, self.run, ())
                return

            # Notify external applications that've started up successfully
            startup_msg = "SSPL-LL service has started successfully"
            json_msg = ThreadControllerMsg(ThreadController.name(),
                                           startup_msg).getJson()
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
            self._threads_initialized = True

            #self._set_debug(True)
            #self._set_debug_persist(True)
            self._log_debug("Start accepting requests")
        try:
            # Block on message queue until it contains an entry
            jsonMsg, _ = self._read_my_msgQ()
            if jsonMsg is not None:
                self._process_msg(jsonMsg)

            # Keep processing until the message queue is empty
            while not self._is_my_msgQ_empty():
                jsonMsg, _ = self._read_my_msgQ()
                if jsonMsg is not None:
                    self._process_msg(jsonMsg)

        except Exception as ex:
            # Log it and restart the whole process when a failure occurs
            logger.exception("ThreadController restarting: %r" % ex)

        self._scheduler.enter(1, self._priority, self.run, ())
        self._log_debug("Finished processing successfully")
Exemplo n.º 11
0
    def _send_ifdata_json_msg(self,
                              sensor_type,
                              resource_id,
                              resource_type,
                              state,
                              severity,
                              event=""):
        """A resuable method for transmitting IFDataMsg to RMQ and IEM logging"""
        ifDataMsg = IFdataMsg(self._node_sensor.host_id,
                              self._node_sensor.local_time,
                              self._node_sensor.if_data, resource_id,
                              resource_type, self.site_id, self.node_id,
                              self.cluster_id, self.rack_id, state, severity,
                              event)
        # Add in uuid if it was present in the json request
        if self._uuid is not None:
            ifDataMsg.set_uuid(self._uuid)
        jsonMsg = ifDataMsg.getJson()
        self.if_sensor_data = jsonMsg
        self.os_sensor_type[sensor_type] = self.if_sensor_data

        # Send the event to logging msg handler to send IEM message to journald
        #internal_json_msg=json.dumps({
        #                        'actuator_request_type': {
        #                            'logging': {
        #                                'log_level': 'LOG_WARNING',
        #                                'log_type': 'IEM',
        #                                'log_msg': '{}'.format(jsonMsg)}}})
        #self._write_internal_msgQ(LoggingMsgHandler.name(), internal_json_msg)

        # Transmit it out over rabbitMQ channel
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
Exemplo n.º 12
0
    def _generate_local_mount_data(self):
        """Create & transmit a local_mount_data message as defined
            by the sensor response json schema"""

        # Notify the node sensor to update its data required for the local_mount_data message
        successful = self._node_sensor.read_data("local_mount_data",
                                                 self._get_debug(),
                                                 self._units)
        if not successful:
            logger.error(
                "NodeDataMsgHandler, _generate_local_mount_data was NOT successful."
            )

        # Create the local mount data message and hand it over to the egress processor to transmit
        localMountDataMsg = LocalMountDataMsg(
            self._node_sensor.host_id, self._epoch_time,
            self._node_sensor.free_space, self._node_sensor.free_inodes,
            self._node_sensor.free_swap, self._node_sensor.total_space,
            self._node_sensor.total_swap, self._units)

        # Add in uuid if it was present in the json request
        if self._uuid is not None:
            localMountDataMsg.set_uuid(self._uuid)
        jsonMsg = localMountDataMsg.getJson()

        # Transmit it out over rabbitMQ channel
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
Exemplo n.º 13
0
 def check_RabbitMQegressProcessor_is_running(self):
     """Used by the shutdown_handler to allow queued egress msgs to complete"""
     if self._product.lower() in [x.lower() for x in enabled_products]:
         return self._sspl_modules[
             PlaneCntrlRMQegressProcessor.name()].is_running()
     elif self._product.lower() in [x.lower() for x in cs_legacy_products]:
         return self._sspl_modules[
             RabbitMQegressProcessor.name()].is_running()
Exemplo n.º 14
0
    def _process_msg(self, body):
        """Parses the incoming message and hands off to the appropriate module"""

        ingressMsg = {}
        uuid = None
        try:
            if isinstance(body, dict) is False:
                ingressMsg = json.loads(body)
            else:
                ingressMsg = body

            # Authenticate message using username and signature fields
            username = ingressMsg.get("username")
            signature = ingressMsg.get("signature")
            message = ingressMsg.get("message")
            uuid = ingressMsg.get("uuid")
            msg_len = len(message) + 1

            if uuid is None:
                uuid = "N/A"

            if use_security_lib and \
                    SSPL_SEC.sspl_verify_message(msg_len, str(message),
                                                 username, signature) != 0:
                logger.warn(
                    "RabbitMQingressProcessor, Authentication failed on message: %s" % ingressMsg)
                return

            # Get the incoming message type
            if message.get("actuator_request_type") is not None:
                msgType = message.get("actuator_request_type")

                # Validate against the actuator schema
                validate(ingressMsg, self._actuator_schema)

            elif message.get("sensor_request_type") is not None:
                msgType = message.get("sensor_request_type")

                # Validate against the sensor schema
                validate(ingressMsg, self._sensor_schema)

            else:
                # We only handle incoming actuator and sensor requests, ignore
                # everything else.
                return

            # Check for debugging being activated in the message header
            self._check_debug(message)
            self._log_debug("_process_msg, ingressMsg: %s" % ingressMsg)

            self._send_to_msg_handler(msgType, message, uuid)

        except Exception as ex:
            logger.error(
                "RabbitMQingressProcessor, _process_msg unrecognized message: %r" % ingressMsg)
            ack_msg = AckResponseMsg("Error Processing Msg",
                                     "Msg Handler Not Found", uuid).getJson()
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), ack_msg)
Exemplo n.º 15
0
    def _process_msg(self, jsonMsg):
        """Parses the incoming message and hands off to the appropriate logger"""
        self._log_debug(f"_process_msg, jsonMsg: {jsonMsg}")

        if isinstance(jsonMsg, dict) is False:
            jsonMsg = json.loads(jsonMsg)

        uuid = None
        if jsonMsg.get("sspl_ll_msg_header") is not None and \
           jsonMsg.get("sspl_ll_msg_header").get("uuid") is not None:
            uuid = jsonMsg.get("sspl_ll_msg_header").get("uuid")
            self._log_debug(f"_process_msg, uuid: {uuid}")

        log_type = jsonMsg.get("actuator_request_type").get("logging").get("log_type")

        result = "N/A"

        # Disabled for LDR_R1
        # if log_type == "IEM":
        #     self._log_debug("_process_msg, msg_type: IEM")
        #     if self._iem_log_locally == "true":
        #         result = self._iem_logger.log_msg(jsonMsg)
        #         self._log_debug(f"Log IEM results: {result}")

        if log_type == "HDS":
            # Retrieve the serial number of the drive
            self._log_debug("_process_msg, msg_type: HDS")
            log_msg = jsonMsg.get("actuator_request_type").get("logging").get("log_msg")

            # Parse out the json data section in the IEM and replace single quotes with double
            json_data = json.loads(str('{' + log_msg.split('{')[1]).replace("'", '"'))

            serial_number = json_data.get("serial_number")
            status = json_data.get("status")
            reason = json_data.get("reason")
            self._log_debug(f"_processMsg, serial_number: {serial_number}, status:{status}, reason: {reason}")

            # Send a message to the disk manager handler to create and transmit json msg
            internal_json_msg = json.dumps(
                 {"sensor_response_type" : "disk_status_HDS",
                  "object_path" : "HDS",
                  "status" : status,
                  "reason" : reason,
                  "serial_number" : serial_number
                 })

            # Send the event to disk message handler to generate json message
            self._write_internal_msgQ("DiskMsgHandler", internal_json_msg)

            # Disabled for LDR_R1
            # Hand off to the IEM logger
            # result = self._iem_logger.log_msg(jsonMsg)

            # Send ack about logging msg
            ack_msg = AckResponseMsg(log_type, result, uuid).getJson()
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), ack_msg)
def when_i_send_in_the_enclosure_actuator_message_to_request_the_current_sensor_type_data(step, resource_type, resource_id):
    egressMsg = {
        "title": "SSPL Actuator Request",
        "description": "Seagate Storage Platform Library - Actuator Request",

        "username" : "JohnDoe",
        "signature" : "None",
        "time" : "2015-05-29 14:28:30.974749",
        "expires" : 500,

        "message" : {
            "sspl_ll_msg_header": {
                "schema_version": "1.0.0",
                "sspl_version": "1.0.0",
                "msg_version": "1.0.0"
            },
             "sspl_ll_debug": {
                "debug_component" : "sensor",
                "debug_enabled" : True
            },
            "response_dest": {},
            "sspl_ll_msg_header": {
            "msg_version": "1.0.0",
            "uuid": "16476007-a739-4785-b5c7-f3de189cdf9d",
            "schema_version": "1.0.0",
            "sspl_version": "1.0.0"
            },
            "request_path": {
                "site_id": 0,
                "node_id": 1,
                "rack_id": 0,
                "cluster_id": 1
            },
            "actuator_request_type": {
                "storage_enclosure": {
                    "enclosure_request": resource_type,
                    "resource": resource_id
                }
            }
        }
        }
    world.sspl_modules[RabbitMQegressProcessor.name()]._write_internal_msgQ(RabbitMQegressProcessor.name(), egressMsg)
    def _generate_enclosure_alert(self, json_msg, host_name, alert_type, alert_id,
                                            severity, info, specific_info, sensor_type):
        """Parses the json message, also validates it and then send it to the
            RabbitMQ egress processor"""

        self._log_debug(f"RealStorEnclMsgHandler, _generate_enclosure_alert,\
            json_msg {json_msg}")

        real_stor_encl_msg = RealStorEnclDataMsg(host_name, alert_type, alert_id, severity,
                                                info, specific_info)
        json_msg = real_stor_encl_msg.getJson()
        self._enclosure_message = json_msg
        self._fru_type[sensor_type] = self._enclosure_message
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg, self._event)
    def _generate_psu_alert(self, json_msg, host_name, alert_type, alert_id,
                                             severity, info, specific_info, sensor_type):
        """Parses the json message, also validates it and then send it to the
           RabbitMQ egress processor"""

        self._log_debug(f"RealStorEnclMsgHandler, _generate_psu_alert,\
            json_msg {json_msg}")

        real_stor_psu_data_msg = \
            RealStorPSUDataMsg(host_name, alert_type, alert_id, severity, info, specific_info)
        json_msg = real_stor_psu_data_msg.getJson()

        # Saves the json message in memory to serve sspl CLI sensor request
        self._psu_sensor_message = json_msg
        self._fru_type[sensor_type] = self._psu_sensor_message
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg, self._event)
Exemplo n.º 19
0
    def _check_reset_all_modules(self, jsonMsg):
        """Restarts all modules with debug mode off. Activated by internal_msgQ"""
        if jsonMsg.get("sspl_ll_debug") is not None and \
            jsonMsg.get("sspl_ll_debug").get("debug_component") is not None and \
            jsonMsg.get("sspl_ll_debug").get("debug_component") == "all":
                for module in self._sspl_modules:
                    self._log_debug("_check_reset_all_modules, module: %s" % module)
                    # Don't restart this thread or it won't complete the loop
                    if module != self.name():
                        self._restart_module(module)

                # Populate an actuator response message and transmit
                msgString = ThreadControllerMsg("All Modules", "Restarted with debug mode off").getJson()
                self._write_internal_msgQ(RabbitMQegressProcessor.name(), msgString)
                return True

        return False
Exemplo n.º 20
0
    def _route_IEM(self, jsonMsg):
        # Send the IEM to the logging msg handler to be processed

        # Get the optional log_level if it exists in msg
        if jsonMsg.get("actuator_request_type").get("logging").get("log_level") is not None:
            log_level = jsonMsg.get("actuator_request_type").get("logging").get("log_level")
        else:
            log_level = "LOG_INFO"

        # Get the message to log in format "IEC: EVENT_CODE: EVENT_STRING: JSON DATA"
        log_msg = f"{log_level} {jsonMsg.get('actuator_request_type').get('logging').get('log_msg')}"

        internal_json_msg = json.dumps(
                 {"message": {
                    "IEM_routing": {
                        "log_msg": log_msg
                        }
                    }
                 })
        # Send the IEM to RabbitMQegressProcessor to be routed to another IEM listener
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), internal_json_msg)
Exemplo n.º 21
0
    def _generate_node_fru_data(self, jsonMsg):
        """Create & transmit a FRU IPMI data message as defined
            by the sensor response json schema"""

        if self._node_sensor.host_id is None:
            successful = self._node_sensor.read_data("None", self._get_debug(),
                                                     self._units)
            if not successful:
                logger.error(
                    "NodeDataMsgHandler, updating host information was NOT successful."
                )

        if jsonMsg.get("sensor_request_type").get("node_data") is not None:
            self._fru_info = jsonMsg.get("sensor_request_type").get(
                "node_data")
            node_ipmi_data_msg = NodeIPMIDataMsg(self._fru_info)

        if self._uuid is not None:
            node_ipmi_data_msg.set_uuid(self._uuid)
        jsonMsg = node_ipmi_data_msg.getJson()
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
    def _process_msg(self, jsonMsg):
        """Parses the incoming message and handles appropriately"""
        self._log_debug(f"RealStorActuatorMsgHandler, _process_msg, jsonMsg: {jsonMsg}")

        if isinstance(jsonMsg, dict) is False:
            jsonMsg = json.loads(jsonMsg)

        # Parse out the uuid so that it can be sent back in Ack message
        uuid = None
        if jsonMsg.get("sspl_ll_msg_header").get("uuid") is not None:
            uuid = jsonMsg.get("sspl_ll_msg_header").get("uuid")
            self._log_debug(f"_processMsg, uuid: {uuid}")

        logger.debug(f"RealStorActuatorMsgHandler: _process_msg: jsonMsg: {jsonMsg}")
        if jsonMsg.get("actuator_request_type").get("storage_enclosure").get("enclosure_request") is not None:
            enclosure_request = jsonMsg.get("actuator_request_type").get("storage_enclosure").get("enclosure_request")
            self._log_debug(f"_processMsg, enclosure_request: {enclosure_request}")
            logger.debug(f"RealStorActuatorMsgHandler: _process_msg: INSIDE: jsonMsg: {jsonMsg}")

            # Parse out the request field in the enclosure_request
            (request, fru) = enclosure_request.split(":", 1)
            request = request.strip()
            fru = fru.strip()

            if self._real_stor_actuator is None:
                try:
                    from actuators.impl.generic.realstor_encl import RealStorActuator
                    self._real_stor_actuator = RealStorActuator()
                except ImportError as e:
                    logger.warn("RealStor Actuator not loaded")
                    return

            # Perform the request and get the response
            real_stor_response = self._real_stor_actuator.perform_request(jsonMsg)
            self._log_debug(f"_process_msg, RealStor response: {real_stor_response}")

            json_msg = RealStorActuatorMsg(real_stor_response, uuid).getJson()
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
    def _process_msg(self, json_msg):
        """Parses the incoming message and generate the desired data message"""
        self._log_debug(f"RealStorEnclMsgHandler, _process_msg, json_msg: {json_msg}")

        if json_msg.get("sensor_request_type").get("enclosure_alert") is not None:
            internal_sensor_request = json_msg.get("sensor_request_type").\
                                        get("enclosure_alert").get("status")
            if internal_sensor_request:
                resource_type = json_msg.get("sensor_request_type").\
                                get("enclosure_alert").get("info").get("resource_type")
                if ":" in resource_type:
                    sensor_type = resource_type.split(":")[2]
                else:
                    sensor_type = resource_type
                self._propagate_alert(json_msg, sensor_type)
            else:
                # serves the request coming from sspl CLI
                sensor_type = json_msg.get("sensor_request_type").\
                                get("enclosure_alert").get("info").\
                                    get("resource_type")
                if ":" in sensor_type:
                    sensor_type = sensor_type.split(":")[2]
                else:
                    sensor_type = sensor_type
                sensor_message_type = self._fru_type.get(sensor_type, "")

                # get the previously saved json message for the sensor type
                # and send the RabbitMQ Message
                if sensor_message_type:
                    self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                              sensor_message_type, self._event)
                else:
                    self._log_debug(f"RealStorEnclMsgHandler, _process_msg, \
                        No past data found for {sensor_type} sensor type")
        else:
            logger.exception("RealStorEnclMsgHandler, _process_msg,\
                Not a valid sensor request format")
Exemplo n.º 24
0
 def _transmit_json_msg(self, json_data):
     """Transmit message to halon by passing it to egress msg handler"""
     json_data["trapName"] = self._trap_name
     json_msg = SNMPtrapMsg(json_data).getJson()
     self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
Exemplo n.º 25
0
    def _send_msg(self, iem_components, log_timestamp):
        """Creates JSON message from iem components and sends to RabbitMQ
           channel.
        """
        # IEM format is IEC:DESCRIPTION
        # IEC format is SEVERITY|SOURCEID|COMPONENTID|MODULEID|EVENTID
        # Field lengths ----1---|---1----|------3----|----3---|---4---
        # Example IEM -> "IEC: BO1001000001:Error in connecting to controller"
        # Actual IEC doesn't contain separator between fields. It is shown
        # here just for readability. Each field has fixed length.
        severity, source_id, component_id, module_id, event_id, description = \
                                                        [iem_components[i] for i in range(6)]

        # Check if severity level is valid
        if severity not in self.SEVERITY_LEVELS:
            logger.warn(f"Invalid Severity level: {severity}")
            return

        # Check for valid source id
        if source_id not in self.SOURCE_IDS:
            logger.warn(f"Invalid Source ID level: {source_id}")
            return

        # Check for valid event time
        event_time = self._get_epoch_time_from_timestamp(log_timestamp)
        if not event_time:
            logger.error("Timestamp is not in required format, discarding the message")
            return

        # Check for other components
        args = {
            "_comp_id": component_id,
            "_module_id": module_id,
            "_event_id": event_id
        }
        if not self._are_components_in_range(**args):
            return

        # Update severity and source_id
        alert_type = iem_severity_to_alert_mapping.get(severity)
        severity = iem_severity_types.get(severity, severity)
        source_id = iem_source_types.get(source_id, source_id)

        # Decode component_id, module_id and event_id
        component_id, module_id, event_id = self._decode_msg( f"{component_id}{module_id}{event_id}")

        info = {
            "site_id": self._site_id,
            "rack_id": self._rack_id,
            "node_id": self._node_id,
            "cluster_id" : self._cluster_id,
            "source_id": source_id,
            "component_id": component_id,
            "module_id": module_id,
            "event_id": event_id,
            "severity": severity,
            "description": description,
            "alert_type": alert_type,
            "event_time": event_time,
            "IEC": "".join(iem_components[:-1])
        }
        iem_data_msg = IEMDataMsg(info)
        json_msg = iem_data_msg.getJson()
        self._write_internal_msgQ(RabbitMQegressProcessor.name(), json_msg)
Exemplo n.º 26
0
    def _generate_host_update(self):
        """Create & transmit a host update message as defined
            by the sensor response json schema"""

        # Notify the node sensor to update its data required for the host_update message
        successful = self._node_sensor.read_data("host_update",
                                                 self._get_debug(),
                                                 self._units)
        if not successful:
            logger.error(
                "NodeDataMsgHandler, _generate_host_update was NOT successful."
            )

        self._host_memory_usage_threshold = str(
            self._host_memory_usage_threshold)
        try:
            if self._host_memory_usage_threshold.isdigit():
                self._host_memory_usage_threshold = int(
                    self._host_memory_usage_threshold)
            else:
                self._host_memory_usage_threshold = float(
                    self._host_memory_usage_threshold)
        except ValueError:
            logger.warning(
                "Host Memory Alert, Invalid host_memory_usage_threshold value are entered in config."
            )
            # Assigning default value to _disk_usage_threshold
            self._host_memory_usage_threshold = self.DEFAULT_HOST_MEMORY_USAGE_THRESHOLD
        if self._node_sensor.total_memory[
                "percent"] >= self._host_memory_usage_threshold:
            # Create the disk space data message and hand it over to the egress processor to transmit
            if not self.host_fault:
                self.host_fault = True
                # Create the disk space data message and hand it over to the egress processor to transmit
                fault_event = "Host memory usage increased to %s, beyond configured threshold of %s" \
                                %(self._node_sensor.total_memory["percent"], self._host_memory_usage_threshold)

                logger.warning(fault_event)

                logged_in_users = []
                # Create the host update message and hand it over to the egress processor to transmit
                hostUpdateMsg = HostUpdateMsg(
                    self._node_sensor.host_id, self._epoch_time,
                    self._node_sensor.boot_time, self._node_sensor.up_time,
                    self._node_sensor.uname, self._units, self.site_id,
                    self.rack_id, self.node_id, self.cluster_id,
                    self._node_sensor.total_memory,
                    self._node_sensor.logged_in_users,
                    self._node_sensor.process_count,
                    self._node_sensor.running_process_count, self.FAULT,
                    fault_event)
                # Add in uuid if it was present in the json request
                if self._uuid is not None:
                    hostUpdateMsg.set_uuid(self._uuid)
                jsonMsg = hostUpdateMsg.getJson()
                # Transmit it out over rabbitMQ channel
                self.host_sensor_data = jsonMsg
                self.os_sensor_type["memory_usage"] = self.host_sensor_data
                self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                          jsonMsg)

        if (self._node_sensor.total_memory["percent"] <
                self._host_memory_usage_threshold) and (self.host_fault
                                                        == True):
            fault_resolved_event = "Host memory usage decreased to %s, lesser than configured threshold of %s" \
                                    %(self._node_sensor.total_memory["percent"], self._host_memory_usage_threshold)
            logger.warning(fault_resolved_event)
            logged_in_users = []
            # Create the host update message and hand it over to the egress processor to transmit
            hostUpdateMsg = HostUpdateMsg(
                self._node_sensor.host_id, self._epoch_time,
                self._node_sensor.boot_time, self._node_sensor.up_time,
                self._node_sensor.uname, self._units, self.site_id,
                self.rack_id, self.node_id, self.cluster_id,
                self._node_sensor.total_memory,
                self._node_sensor.logged_in_users,
                self._node_sensor.process_count,
                self._node_sensor.running_process_count, self.FAULT_RESOLVED,
                fault_resolved_event)

            # Add in uuid if it was present in the json request
            if self._uuid is not None:
                hostUpdateMsg.set_uuid(self._uuid)
            jsonMsg = hostUpdateMsg.getJson()
            # Transmit it out over rabbitMQ channel
            self.host_sensor_data = jsonMsg
            self.os_sensor_type["memory_usage"] = self.host_sensor_data

            self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
            self.host_fault = False
Exemplo n.º 27
0
    def _generate_cpu_data(self):
        """Create & transmit a cpu_data message as defined
            by the sensor response json schema"""

        # Notify the node sensor to update its data required for the cpu_data message
        successful = self._node_sensor.read_data("cpu_data", self._get_debug())
        if not successful:
            logger.error(
                "NodeDataMsgHandler, _generate_cpu_data was NOT successful.")

        self._cpu_usage_threshold = str(self._cpu_usage_threshold)
        try:
            if self._cpu_usage_threshold.isdigit():
                self._cpu_usage_threshold = int(self._cpu_usage_threshold)
            else:
                self._cpu_usage_threshold = float(self._cpu_usage_threshold)
        except ValueError:
            logger.warning(
                "CPU Usage Alert, Invalid host_memory_usage_threshold value are entered in config."
            )
            # Assigning default value to _cpu_usage_threshold
            self._cpu_usage_threshold = self.DEFAULT_CPU_USAGE_THRESHOLD

        if self._node_sensor.cpu_usage >= self._cpu_usage_threshold:

            if not self.cpu_fault:
                self.cpu_fault = True
                # Create the cpu usage data message and hand it over to the egress processor to transmit

                fault_event = "CPU usage increased to %s, beyond configured threshold of %s" \
                                %(self._node_sensor.cpu_usage, self._cpu_usage_threshold)
                logger.warning(fault_event)

                # Create the local mount data message and hand it over to the egress processor to transmit
                cpuDataMsg = CPUdataMsg(
                    self._node_sensor.host_id, self._epoch_time,
                    self._node_sensor.csps, self._node_sensor.idle_time,
                    self._node_sensor.interrupt_time,
                    self._node_sensor.iowait_time, self._node_sensor.nice_time,
                    self._node_sensor.softirq_time,
                    self._node_sensor.steal_time,
                    self._node_sensor.system_time, self._node_sensor.user_time,
                    self._node_sensor.cpu_core_data,
                    self._node_sensor.cpu_usage, self.site_id, self.rack_id,
                    self.node_id, self.cluster_id, self.FAULT, fault_event)

                # Add in uuid if it was present in the json request
                if self._uuid is not None:
                    cpuDataMsg.set_uuid(self._uuid)
                jsonMsg = cpuDataMsg.getJson()
                self.cpu_sensor_data = jsonMsg
                self.os_sensor_type["cpu_usage"] = self.cpu_sensor_data

                # Transmit it out over rabbitMQ channel
                self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                          jsonMsg)

        if (self._node_sensor.cpu_usage <=
                self._cpu_usage_threshold) and (self.cpu_fault == True):
            # Create the cpu usage data message and hand it over to the egress processor to transmit
            fault_resolved_event = "CPU usage decreased to %s, lesser than configured threshold of %s" \
                %(self._node_sensor.cpu_usage, self._cpu_usage_threshold)
            logger.warning(fault_resolved_event)

            # Create the local mount data message and hand it over to the egress processor to transmit
            cpuDataMsg = CPUdataMsg(
                self._node_sensor.host_id, self._epoch_time,
                self._node_sensor.csps, self._node_sensor.idle_time,
                self._node_sensor.interrupt_time,
                self._node_sensor.iowait_time, self._node_sensor.nice_time,
                self._node_sensor.softirq_time, self._node_sensor.steal_time,
                self._node_sensor.system_time, self._node_sensor.user_time,
                self._node_sensor.cpu_core_data, self._node_sensor.cpu_usage,
                self.site_id, self.rack_id, self.node_id, self.cluster_id,
                self.FAULT_RESOLVED, fault_resolved_event)

            # Add in uuid if it was present in the json request
            if self._uuid is not None:
                cpuDataMsg.set_uuid(self._uuid)
            jsonMsg = cpuDataMsg.getJson()
            self.cpu_sensor_data = jsonMsg
            self.os_sensor_type["cpu_usage"] = self.cpu_sensor_data

            # Transmit it out over rabbitMQ channel
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
            self.cpu_fault = False
Exemplo n.º 28
0
    def _process_msg(self, jsonMsg):
        """Parses the incoming message and calls the appropriate method"""
        self._log_debug("_process_msg, jsonMsg: %s" % jsonMsg)

        # Check to see if debug mode is being globally turned off on all modules
        if self._check_reset_all_modules(jsonMsg) is True:
            return

        # Parse out the module name and request
        module_name = jsonMsg.get("actuator_request_type").get(
            "thread_controller").get("module_name")
        thread_request = jsonMsg.get("actuator_request_type").get(
            "thread_controller").get("thread_request")

        # Parse out the uuid so that it can be sent back in Ack message
        uuid = None
        if jsonMsg.get("sspl_ll_msg_header") is not None and \
           jsonMsg.get("sspl_ll_msg_header").get("uuid") is not None:
            uuid = jsonMsg.get("sspl_ll_msg_header").get("uuid")
            self._log_debug("_processMsg, uuid: %s" % uuid)

        # Pass along the debug section to the module
        if jsonMsg.get("sspl_ll_debug") is not None:
            self.debug_section = {"sspl_ll_debug": {}}
            self.debug_section["sspl_ll_debug"] = jsonMsg.get("sspl_ll_debug")
        else:
            self.debug_section = None

        self._log_debug("_process_msg, self.debug_section: %s" %
                        self.debug_section)

        # Parse out thread request and call the appropriate method
        if thread_request == "restart":
            self._restart_module(module_name)
        elif thread_request == "start":
            self._start_module(module_name)
        elif thread_request == "stop":
            # Don't let the outside world stop us from using RabbitMQ connection or shut down this thread
            if module_name == "RabbitMQegressProcessor" or \
                module_name == "RabbitMQingressProcessor" or \
                module_name == "ThreadController":
                logger.warn(
                    "Attempt to stop RabbitMQ or ThreadController Processors, \
                                    ignoring. Please try 'restart' instead.")
                return
            self._stop_module(module_name)
        elif thread_request == "status":
            self._status_module(module_name)
        elif thread_request == "degrade":
            if module_name.lower() != "all":
                logger.warn(
                    "Invalid module_name {0}. Need 'all' in module_name".
                    format(module_name))
                return
            self._switch_to_degraded_state(self._sspl_modules)
        elif thread_request == "active":
            if module_name.lower() != "all":
                logger.warn(
                    "Invalid module_name {0}. Need 'all' in module_name".
                    format(module_name))
                return
            self._switch_to_active_state(self._sspl_modules)
        else:
            self._thread_response = "Error, unrecognized thread request"

        node_id = []
        if jsonMsg.get("actuator_request_type").get("thread_controller").get("parameters") is not None and \
           jsonMsg.get("actuator_request_type").get("thread_controller").get("parameters").get("node_id"):
            node_id = jsonMsg.get("actuator_request_type").get(
                "thread_controller").get("parameters").get("node_id")

        ack_type = {}
        ack_type["hostname"] = self._hostname
        ack_type["node_id"] = node_id

        # Populate an actuator response message and transmit
        threadControllerMsg = ThreadControllerMsg(module_name, self._thread_response, \
                                                  json.dumps(ack_type))

        if uuid is not None:
            threadControllerMsg.set_uuid(uuid)
        msgString = threadControllerMsg.getJson()
        logger.info("ThreadController, response: %s" % str(msgString))
        if self._product.lower() in [x.lower() for x in enabled_products]:
            self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                      msgString)
        elif self._product.lower() in [x.lower() for x in cs_legacy_products]:
            self._write_internal_msgQ(PlaneCntrlRMQegressProcessor.name(),
                                      msgString)
Exemplo n.º 29
0
    def _generate_disk_space_alert(self):
        """Create & transmit a disk_space_alert message as defined
            by the sensor response json schema"""

        # Notify the node sensor to update its data required for the disk_space_data message
        successful = self._node_sensor.read_data("disk_space_alert",
                                                 self._get_debug(),
                                                 self._units)
        if not successful:
            logger.error(
                "NodeDataMsgHandler, _generate_disk_space_alert was NOT successful."
            )
            return

        # Changing disk_usage_threshold type according to what value type entered in config file
        self._disk_usage_threshold = str(self._disk_usage_threshold)
        try:
            if self._disk_usage_threshold.isdigit():
                self._disk_usage_threshold = int(self._disk_usage_threshold)
            else:
                self._disk_usage_threshold = float(self._disk_usage_threshold)
        except ValueError:
            logger.warning(
                "Disk Space Alert, Invalid disk_usage_threshold value are entered in config."
            )
            # Assigning default value to _disk_usage_threshold
            self._disk_usage_threshold = self.DEFAULT_DISK_USAGE_THRESHOLD

        if self._node_sensor.disk_used_percentage >= self._disk_usage_threshold:
            if not self.disk_fault:
                self.disk_fault = True
                # Create the disk space data message and hand it over to the egress processor to transmit
                fault_event = "Disk usage increased to %s, beyond configured threshold of %s" \
                                %(self._node_sensor.disk_used_percentage, self._disk_usage_threshold)
                logger.warning(fault_event)
                diskSpaceAlertMsg = DiskSpaceAlertMsg(
                    self._node_sensor.host_id, self._epoch_time,
                    self._node_sensor.total_space,
                    self._node_sensor.free_space,
                    self._node_sensor.disk_used_percentage, self._units,
                    self.site_id, self.rack_id, self.node_id, self.cluster_id,
                    self.FAULT, fault_event)

                # Add in uuid if it was present in the json request
                if self._uuid is not None:
                    diskSpaceAlertMsg.set_uuid(self._uuid)
                jsonMsg = diskSpaceAlertMsg.getJson()
                self.disk_sensor_data = jsonMsg
                self.os_sensor_type["disk_space"] = self.disk_sensor_data

                # Transmit it out over rabbitMQ channel
                self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                          jsonMsg)

        if (self._node_sensor.disk_used_percentage <=
                self._disk_usage_threshold) and (self.disk_fault == True):
            # Create the disk space data message and hand it over to the egress processor to transmit
            fault_resolved_event = "Disk usage decreased to %s, lesser than configured threshold of %s" \
                                %(self._node_sensor.disk_used_percentage, self._disk_usage_threshold)
            logger.warning(fault_resolved_event)
            diskSpaceAlertMsg = DiskSpaceAlertMsg(
                self._node_sensor.host_id, self._epoch_time,
                self._node_sensor.total_space, self._node_sensor.free_space,
                self._node_sensor.disk_used_percentage, self._units,
                self.site_id, self.rack_id, self.node_id, self.cluster_id,
                self.FAULT_RESOLVED, fault_resolved_event)

            # Add in uuid if it was present in the json request
            if self._uuid is not None:
                diskSpaceAlertMsg.set_uuid(self._uuid)
            jsonMsg = diskSpaceAlertMsg.getJson()
            self.disk_sensor_data = jsonMsg
            self.os_sensor_type["disk_space"] = self.disk_sensor_data

            # Transmit it out over rabbitMQ channel
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), jsonMsg)
            self.disk_fault = False
    def _process_msg(self, ch, method, properties, body):
        """Parses the incoming message and hands off to the appropriate module"""

        ingressMsg = {}
        uuid = None
        try:
            if isinstance(body, dict) is False:
                ingressMsg = json.loads(body)
            else:
                ingressMsg = body

            # Authenticate message using username and signature fields
            username = ingressMsg.get("username")
            signature = ingressMsg.get("signature")
            message = ingressMsg.get("message")
            uuid = ingressMsg.get("uuid")
            msg_len = len(message) + 1

            if uuid is None:
                uuid = "N/A"

            if use_security_lib and \
               SSPL_SEC.sspl_verify_message(msg_len, str(message), username, signature) != 0:
                logger.warn(
                    "RabbitMQingressProcessor, Authentication failed on message: %s"
                    % ingressMsg)
                return

            # Get the incoming message type
            if message.get("actuator_request_type") is not None:
                msgType = message.get("actuator_request_type")

                # Validate against the actuator schema
                validate(ingressMsg, self._actuator_schema)

            elif message.get("sensor_request_type") is not None:
                msgType = message.get("sensor_request_type")

                # Validate against the sensor schema
                validate(ingressMsg, self._sensor_schema)

            else:
                # We only handle incoming actuator and sensor requests, ignore
                # everything else.
                return

            # Check for debugging being activated in the message header
            self._check_debug(message)
            self._log_debug("_process_msg, ingressMsg: %s" % ingressMsg)

            # Hand off to appropriate actuator message handler
            if msgType.get("logging") is not None:
                self._write_internal_msgQ("LoggingMsgHandler", message)

            elif msgType.get("thread_controller") is not None:
                self._write_internal_msgQ("ThreadController", message)

            elif msgType.get("service_controller") is not None:
                self._write_internal_msgQ("ServiceMsgHandler", message)

            elif msgType.get("node_controller") is not None:
                self._write_internal_msgQ("NodeControllerMsgHandler", message)

            elif msgType.get("storage_enclosure") is not None:
                self._write_internal_msgQ("RealStorActuatorMsgHandler",
                                          message)

            # Hand off to appropriate sensor message handler
            elif msgType.get("node_data") is not None:
                self._write_internal_msgQ("NodeDataMsgHandler", message)

            elif msgType.get("enclosure_alert") is not None:
                self._write_internal_msgQ("RealStorEnclMsgHandler", message)

            elif msgType.get("storage_enclosure") is not None:
                self._write_internal_msgQ("RealStorActuatorMsgHandler",
                                          message)
            # ... handle other incoming messages that have been validated
            else:
                # Send ack about not finding a msg handler
                ack_msg = AckResponseMsg("Error Processing Message",
                                         "Message Handler Not Found",
                                         uuid).getJson()
                self._write_internal_msgQ(RabbitMQegressProcessor.name(),
                                          ack_msg)

            # Acknowledge message was received
            self._connection.ack(ch, delivery_tag=method.delivery_tag)

        except Exception as ex:
            logger.error(
                "RabbitMQingressProcessor, _process_msg unrecognized message: %r"
                % ingressMsg)
            ack_msg = AckResponseMsg("Error Processing Msg",
                                     "Msg Handler Not Found", uuid).getJson()
            self._write_internal_msgQ(RabbitMQegressProcessor.name(), ack_msg)