def consume(self):
        """Consume the objects from object recovery queue."""
        self.server = None
        try:
            #Conditionally importing ObjectRecoveryRabbitMq/ObjectRecoveryMsgbusConsumer when config setting says so.
            if self.config.get_messaging_platform() == MESSAGE_BUS:
                from s3backgrounddelete.object_recovery_msgbus import ObjectRecoveryMsgbus

                self.server = ObjectRecoveryMsgbus(self.config, self.logger)
            elif self.config.get_messaging_platform() == RABBIT_MQ:
                from s3backgrounddelete.object_recovery_queue import ObjectRecoveryRabbitMq

                self.server = ObjectRecoveryRabbitMq(
                    self.config, self.config.get_rabbitmq_username(),
                    self.config.get_rabbitmq_password(),
                    self.config.get_rabbitmq_host(),
                    self.config.get_rabbitmq_exchange(),
                    self.config.get_rabbitmq_queue_name(),
                    self.config.get_rabbitmq_mode(),
                    self.config.get_rabbitmq_durable(), self.logger)
            else:
                self.logger.error(
                    "Invalid argument specified in messaging_platform use message_bus or rabbit_mq"
                )
                return

            self.logger.info("Consumer started at " +
                             str(datetime.datetime.now()))
            self.server.receive_data()
        except BaseException:
            if self.server:
                self.server.close()
            self.logger.error("main except:" + str(traceback.format_exc()))
Example #2
0
    def consume(self):
        """Consume the objects from object recovery queue."""
        self.server = None
        try:
            #Conditionally importing ObjectRecoveryMsgbusConsumer when config setting says so.
            if self.config.get_messaging_platform() == MESSAGE_BUS:
                from s3backgrounddelete.object_recovery_msgbus import ObjectRecoveryMsgbus

                self.server = ObjectRecoveryMsgbus(self.config, self.logger)
            else:
                self.logger.error("Invalid argument : " +
                                  self.config.get_messaging_platform() +
                                  "specified in messaging_platform.")
                return

            self.logger.info("Consumer started at " +
                             str(datetime.datetime.now()))
            self.server.receive_data()
        except BaseException:
            if self.server:
                self.server.close()
            self.logger.error("main except:" + str(traceback.format_exc()))
Example #3
0
    def add_kv_to_msgbus(self, marker=None):
        """Add object key value to msgbus topic."""
        self.logger.info("Inside add_kv_to_msgbus.")
        try:
            from s3backgrounddelete.object_recovery_msgbus import ObjectRecoveryMsgbus

            if not self.producer:
                self.producer = ObjectRecoveryMsgbus(self.config, self.logger)
            threshold = self.config.get_threshold()
            self.logger.debug("Threshold is : " + str(threshold))
            count = self.producer.get_count()
            self.logger.debug("Count of unread msgs is : " + str(count))

            if ((int(count) < threshold) or (threshold == 0)):
                self.logger.debug(
                    "Count of unread messages is less than threshold value.Hence continuing..."
                )
            else:
                #do nothing
                self.logger.info(
                    "Queue has more messages than threshold value. Hence skipping addition of further entries."
                )
                return
            # Cleanup all entries and enqueue only 1000 entries
            #PurgeAPI Here
            self.producer.purge()
            result, index_response = CORTXS3IndexApi(
                self.config,
                connectionType=CONNECTION_TYPE_PRODUCER,
                logger=self.logger).list(
                    self.config.get_probable_delete_index_id(),
                    self.config.get_max_keys(), marker)
            if result:
                self.logger.info("Index listing result :" +
                                 str(index_response.get_index_content()))
                probable_delete_json = index_response.get_index_content()
                probable_delete_oid_list = probable_delete_json["Keys"]
                is_truncated = probable_delete_json["IsTruncated"]
                if (probable_delete_oid_list is not None):
                    for record in probable_delete_oid_list:
                        # Check if record is older than the pre-configured 'time to process' delay
                        leak_processing_delay = self.config.get_leak_processing_delay_in_mins(
                        )
                        try:
                            objLeakVal = json.loads(record["Value"])
                        except ValueError as error:
                            self.logger.error(
                                "Failed to parse JSON data for: " +
                                str(record) + " due to: " + error)
                            continue

                        if (objLeakVal is None):
                            self.logger.error("No value associated with " +
                                              str(record) + ". Skipping entry")
                            continue

                        # Check if object leak entry is older than 15mins or a preconfigured duration
                        if (not ObjectRecoveryScheduler.
                                isObjectLeakEntryOlderThan(
                                    objLeakVal, leak_processing_delay)):
                            self.logger.info("Object leak entry " +
                                             record["Key"] +
                                             " is NOT older than " +
                                             str(leak_processing_delay) +
                                             "mins. Skipping entry")
                            continue

                        self.logger.info(
                            "Object recovery queue sending data :" +
                            str(record))
                        ret = self.producer.send_data(
                            record, producer_id=self.producer_name)
                        if not ret:
                            # TODO - Do Audit logging
                            self.logger.error(
                                "Object recovery queue send data " +
                                str(record) + " failed :")
                        else:
                            self.logger.info(
                                "Object recovery queue send data successfully :"
                                + str(record))
                else:
                    self.logger.info(
                        "Index listing result empty. Ignoring adding entry to object recovery queue"
                    )
            else:
                self.logger.error("Failed to retrive Index listing:")
        except Exception as exception:
            self.logger.error(
                "add_kv_to_msgbus send data exception: {}".format(exception))
            self.logger.debug("traceback : {}".format(traceback.format_exc()))
Example #4
0
class ObjectRecoveryScheduler(object):
    """Scheduler which will add key value to message_bus queue."""
    def __init__(self, producer_name):
        """Initialise logger and configuration."""
        self.data = None
        self.config = CORTXS3Config()
        self.create_logger_directory()
        self.create_logger()
        self.signal = DynamicConfigHandler(self)
        self.logger.info("Initialising the Object Recovery Scheduler")
        self.producer = None
        self.producer_name = producer_name

    @staticmethod
    def isObjectLeakEntryOlderThan(leakRecord, OlderInMins=15):
        object_leak_time = leakRecord["create_timestamp"]
        now = datetime.datetime.utcnow()
        date_time_obj = datetime.datetime.strptime(object_leak_time,
                                                   "%Y-%m-%dT%H:%M:%S.000Z")
        timeDelta = now - date_time_obj
        timeDeltaInMns = math.floor(timeDelta.total_seconds() / 60)
        return (timeDeltaInMns >= OlderInMins)

    def add_kv_to_msgbus(self, marker=None):
        """Add object key value to msgbus topic."""
        self.logger.info("Inside add_kv_to_msgbus.")
        try:
            from s3backgrounddelete.object_recovery_msgbus import ObjectRecoveryMsgbus

            if not self.producer:
                self.producer = ObjectRecoveryMsgbus(self.config, self.logger)
            threshold = self.config.get_threshold()
            self.logger.debug("Threshold is : " + str(threshold))
            count = self.producer.get_count()
            self.logger.debug("Count of unread msgs is : " + str(count))

            if ((int(count) < threshold) or (threshold == 0)):
                self.logger.debug(
                    "Count of unread messages is less than threshold value.Hence continuing..."
                )
            else:
                #do nothing
                self.logger.info(
                    "Queue has more messages than threshold value. Hence skipping addition of further entries."
                )
                return
            # Cleanup all entries and enqueue only 1000 entries
            #PurgeAPI Here
            self.producer.purge()
            result, index_response = CORTXS3IndexApi(
                self.config,
                connectionType=CONNECTION_TYPE_PRODUCER,
                logger=self.logger).list(
                    self.config.get_probable_delete_index_id(),
                    self.config.get_max_keys(), marker)
            if result:
                self.logger.info("Index listing result :" +
                                 str(index_response.get_index_content()))
                probable_delete_json = index_response.get_index_content()
                probable_delete_oid_list = probable_delete_json["Keys"]
                is_truncated = probable_delete_json["IsTruncated"]
                if (probable_delete_oid_list is not None):
                    for record in probable_delete_oid_list:
                        # Check if record is older than the pre-configured 'time to process' delay
                        leak_processing_delay = self.config.get_leak_processing_delay_in_mins(
                        )
                        try:
                            objLeakVal = json.loads(record["Value"])
                        except ValueError as error:
                            self.logger.error(
                                "Failed to parse JSON data for: " +
                                str(record) + " due to: " + error)
                            continue

                        if (objLeakVal is None):
                            self.logger.error("No value associated with " +
                                              str(record) + ". Skipping entry")
                            continue

                        # Check if object leak entry is older than 15mins or a preconfigured duration
                        if (not ObjectRecoveryScheduler.
                                isObjectLeakEntryOlderThan(
                                    objLeakVal, leak_processing_delay)):
                            self.logger.info("Object leak entry " +
                                             record["Key"] +
                                             " is NOT older than " +
                                             str(leak_processing_delay) +
                                             "mins. Skipping entry")
                            continue

                        self.logger.info(
                            "Object recovery queue sending data :" +
                            str(record))
                        ret = self.producer.send_data(
                            record, producer_id=self.producer_name)
                        if not ret:
                            # TODO - Do Audit logging
                            self.logger.error(
                                "Object recovery queue send data " +
                                str(record) + " failed :")
                        else:
                            self.logger.info(
                                "Object recovery queue send data successfully :"
                                + str(record))
                else:
                    self.logger.info(
                        "Index listing result empty. Ignoring adding entry to object recovery queue"
                    )
            else:
                self.logger.error("Failed to retrive Index listing:")
        except Exception as exception:
            self.logger.error(
                "add_kv_to_msgbus send data exception: {}".format(exception))
            self.logger.debug("traceback : {}".format(traceback.format_exc()))

    def schedule_periodically(self):
        """Schedule producer to add key value to message_bus queue on hourly basis."""
        # Run producer periodically on hourly basis
        self.logger.info("Producer " + str(self.producer_name) +
                         " started at : " + str(datetime.datetime.now()))
        scheduled_run = sched.scheduler(time.time, time.sleep)

        def periodic_run(scheduler):
            """Add key value to queue using scheduler."""
            if self.config.get_messaging_platform() == MESSAGE_BUS:
                self.add_kv_to_msgbus()
            else:
                self.logger.error(
                    "Invalid argument specified in messaging_platform use 'message_bus'"
                )
                return

            scheduled_run.enter(self.config.get_schedule_interval(), 1,
                                periodic_run, (scheduler, ))

        scheduled_run.enter(self.config.get_schedule_interval(), 1,
                            periodic_run, (scheduled_run, ))
        scheduled_run.run()

    def create_logger(self):
        """Create logger, file handler, console handler and formatter."""
        # create logger with "object_recovery_scheduler"
        self.logger = logging.getLogger(
            self.config.get_scheduler_logger_name())
        self.logger.setLevel(self.config.get_file_log_level())
        # https://docs.python.org/3/library/logging.handlers.html#logging.handlers.RotatingFileHandler
        fhandler = logging.handlers.RotatingFileHandler(
            self.config.get_scheduler_logger_file(),
            mode='a',
            maxBytes=self.config.get_max_bytes(),
            backupCount=self.config.get_backup_count(),
            encoding=None,
            delay=False)
        fhandler.setLevel(self.config.get_file_log_level())
        # create console handler with a higher log level
        chandler = logging.StreamHandler()
        chandler.setLevel(self.config.get_console_log_level())
        # create formatter and add it to the handlers
        formatter = logging.Formatter(self.config.get_log_format())
        fhandler.setFormatter(formatter)
        chandler.setFormatter(formatter)
        # add the handlers to the logger
        self.logger.addHandler(fhandler)
        self.logger.addHandler(chandler)

    def create_logger_directory(self):
        """Create log directory if not exsists."""
        self._logger_directory = os.path.join(
            self.config.get_logger_directory())
        if not os.path.isdir(self._logger_directory):
            try:
                os.mkdir(self._logger_directory)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    pass
                else:
                    raise Exception(" Producer Logger Could not be created")
Example #5
0
class ObjectRecoveryProcessor(object):
    """Provides consumer for object recovery"""
    def __init__(self):
        """Initialise Server, config and create logger."""
        self.server = None
        self.config = CORTXS3Config()
        self.create_logger_directory()
        self.create_logger()
        self.signal = DynamicConfigHandler(self)
        self.logger.info("Initialising the Object Recovery Processor")

    def consume(self):
        """Consume the objects from object recovery queue."""
        self.server = None
        try:
            #Conditionally importing ObjectRecoveryMsgbusConsumer when config setting says so.
            if self.config.get_messaging_platform() == MESSAGE_BUS:
                from s3backgrounddelete.object_recovery_msgbus import ObjectRecoveryMsgbus

                self.server = ObjectRecoveryMsgbus(self.config, self.logger)
            else:
                self.logger.error("Invalid argument : " +
                                  self.config.get_messaging_platform() +
                                  "specified in messaging_platform.")
                return

            self.logger.info("Consumer started at " +
                             str(datetime.datetime.now()))
            self.server.receive_data()
        except BaseException:
            if self.server:
                self.server.close()
            self.logger.error("main except:" + str(traceback.format_exc()))

    def create_logger(self):
        """Create logger, file handler, formatter."""
        # Create logger with "object_recovery_processor"
        self.logger = logging.getLogger(
            self.config.get_processor_logger_name())
        self.logger.setLevel(self.config.get_file_log_level())
        # create file handler which logs even debug messages
        fhandler = logging.handlers.RotatingFileHandler(
            self.config.get_processor_logger_file(),
            mode='a',
            maxBytes=self.config.get_max_bytes(),
            backupCount=self.config.get_backup_count(),
            encoding=None,
            delay=False)
        fhandler.setLevel(self.config.get_file_log_level())
        # create console handler with a higher log level
        chandler = logging.StreamHandler()
        chandler.setLevel(self.config.get_console_log_level())
        # create formatter and add it to the handlers
        formatter = logging.Formatter(self.config.get_log_format())
        fhandler.setFormatter(formatter)
        chandler.setFormatter(formatter)
        # add the handlers to the logger
        self.logger.addHandler(fhandler)
        self.logger.addHandler(chandler)

    def close(self):
        """Stop processor."""
        self.logger.info("Stopping the processor")
        self.server.close()
        # perform an orderly shutdown by flushing and closing all handlers
        logging.shutdown()

    def create_logger_directory(self):
        """Create log directory if not exsists."""
        self._logger_directory = os.path.join(
            self.config.get_logger_directory())
        if not os.path.isdir(self._logger_directory):
            try:
                os.mkdir(self._logger_directory)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    pass
                else:
                    raise Exception("Consumer Logger Could not be created")
class ObjectRecoveryScheduler(object):
    """Scheduler which will add key value to rabbitmq message queue."""

    def __init__(self):
        """Initialise logger and configuration."""
        self.data = None
        self.config = CORTXS3Config()
        self.create_logger_directory()
        self.create_logger()
        self.signal = DynamicConfigHandler(self)
        self.logger.info("Initialising the Object Recovery Scheduler")
        self.producer = None

    @staticmethod
    def isObjectLeakEntryOlderThan(leakRecord, OlderInMins = 15):
        object_leak_time = leakRecord["create_timestamp"]
        now = datetime.datetime.utcnow()
        date_time_obj = datetime.datetime.strptime(object_leak_time, "%Y-%m-%dT%H:%M:%S.000Z")
        timeDelta = now - date_time_obj
        timeDeltaInMns = math.floor(timeDelta.total_seconds()/60)
        return (timeDeltaInMns >= OlderInMins)

    def add_kv_to_msgbus(self, marker = None):
        """Add object key value to msgbus topic."""
        self.logger.info("Inside add_kv_to_msgbus.")
        try:
            from s3backgrounddelete.object_recovery_msgbus import ObjectRecoveryMsgbus

            if not self.producer:
                self.producer = ObjectRecoveryMsgbus(
                    self.config,
                    self.logger)
            # Cleanup all entries and enqueue only 1000 entries
            #PurgeAPI Here
            self.producer.purge()
            result, index_response = CORTXS3IndexApi(
                self.config, logger=self.logger).list(
                    self.config.get_probable_delete_index_id(), self.config.get_max_keys(), marker)
            if result:
                self.logger.info("Index listing result :" +
                                 str(index_response.get_index_content()))
                probable_delete_json = index_response.get_index_content()
                probable_delete_oid_list = probable_delete_json["Keys"]
                is_truncated = probable_delete_json["IsTruncated"]
                if (probable_delete_oid_list is not None):
                    for record in probable_delete_oid_list:
                        # Check if record is older than the pre-configured 'time to process' delay
                        leak_processing_delay = self.config.get_leak_processing_delay_in_mins()
                        try:
                            objLeakVal = json.loads(record["Value"])
                        except ValueError as error:
                            self.logger.error(
                            "Failed to parse JSON data for: " + str(record) + " due to: " + error)
                            continue

                        if (objLeakVal is None):
                            self.logger.error("No value associated with " + str(record) + ". Skipping entry")
                            continue

                        # Check if object leak entry is older than 15mins or a preconfigured duration
                        if (not ObjectRecoveryScheduler.isObjectLeakEntryOlderThan(objLeakVal, leak_processing_delay)):
                            self.logger.info("Object leak entry " + record["Key"] +
                                            " is NOT older than " + str(leak_processing_delay) +
                                            "mins. Skipping entry")
                            continue

                        self.logger.info(
                            "Object recovery queue sending data :" +
                            str(record))
                        ret = self.producer.send_data(record)
                        if not ret:
                            # TODO - Do Audit logging
                            self.logger.error(
                                "Object recovery queue send data "+ str(record) +
                                " failed :")
                        else:
                            self.logger.info(
                                "Object recovery queue send data successfully :" +
                                str(record))
                else:
                    self.logger.info(
                        "Index listing result empty. Ignoring adding entry to object recovery queue")
            else:
                self.logger.error("Failed to retrive Index listing:")
        except Exception as exception:
            self.logger.error(
                "add_kv_to_msgbus send data exception: {}".format(exception))
            self.logger.debug(
                "traceback : {}".format(traceback.format_exc()))

    def add_kv_to_queue(self, marker = None):
        """Add object key value to object recovery queue."""
        self.logger.info("Adding kv list to queue")
        try:
            from s3backgrounddelete.object_recovery_queue import ObjectRecoveryRabbitMq

            mq_client = ObjectRecoveryRabbitMq(
                self.config,
                self.config.get_rabbitmq_username(),
                self.config.get_rabbitmq_password(),
                self.config.get_rabbitmq_host(),
                self.config.get_rabbitmq_exchange(),
                self.config.get_rabbitmq_queue_name(),
                self.config.get_rabbitmq_mode(),
                self.config.get_rabbitmq_durable(),
                self.logger)
            # Cleanup all entries and enqueue only 1000 entries
            mq_client.purge_queue(self.config.get_rabbitmq_queue_name())

            result, index_response = CORTXS3IndexApi(
                self.config, logger=self.logger).list(
                    self.config.get_probable_delete_index_id(), self.config.get_max_keys(), marker)
            if result:
                self.logger.info("Index listing result :" +
                                 str(index_response.get_index_content()))
                probable_delete_json = index_response.get_index_content()
                probable_delete_oid_list = probable_delete_json["Keys"]
                is_truncated = probable_delete_json["IsTruncated"]
                if (probable_delete_oid_list is not None):
                    for record in probable_delete_oid_list:
                        # Check if record is older than the pre-configured 'time to process' delay
                        leak_processing_delay = self.config.get_leak_processing_delay_in_mins()
                        try:
                            objLeakVal = json.loads(record["Value"])
                        except ValueError as error:
                            self.logger.error(
                            "Failed to parse JSON data for: " + str(record) + " due to: " + error)
                            continue

                        if (objLeakVal is None):
                            self.logger.error("No value associated with " + str(record) + ". Skipping entry")
                            continue

                        # Check if object leak entry is older than 15mins or a preconfigured duration
                        if (not ObjectRecoveryScheduler.isObjectLeakEntryOlderThan(objLeakVal, leak_processing_delay)):
                            self.logger.info("Object leak entry " + record["Key"] +
                                              " is NOT older than " + str(leak_processing_delay) +
                                              "mins. Skipping entry")
                            continue

                        self.logger.info(
                            "Object recovery queue sending data :" +
                            str(record))
                        ret, msg = mq_client.send_data(
                            record, self.config.get_rabbitmq_queue_name())
                        if not ret:
                            IEMutil("ERROR", IEMutil.RABBIT_MQ_CONN_FAILURE, IEMutil.RABBIT_MQ_CONN_FAILURE_STR)
                            self.logger.error(
                                "Object recovery queue send data "+ str(record) +
                                " failed :" + msg)
                        else:
                            self.logger.info(
                                "Object recovery queue send data successfully :" +
                                str(record))
                else:
                    self.logger.info(
                        "Index listing result empty. Ignoring adding entry to object recovery queue")
                    pass
            else:
                self.logger.error("Failed to retrive Index listing:")
        except BaseException:
            self.logger.error(
                "Object recovery queue send data exception:" + traceback.format_exc())
        finally:
            if mq_client:
               self.logger.info("Closing the mqclient")
               mq_client.close()

    def schedule_periodically(self):
        """Schedule RabbitMQ producer to add key value to queue on hourly basis."""
        # Run RabbitMQ producer periodically on hourly basis
        self.logger.info("Producer started at " + str(datetime.datetime.now()))
        scheduled_run = sched.scheduler(time.time, time.sleep)

        def periodic_run(scheduler):
            """Add key value to queue using scheduler."""
            #Conditionally importing ObjectRecoveryRabbitMq/ObjectRecoveryMsgbusConsumer when config setting says so.
            if self.config.get_messaging_platform() == MESSAGE_BUS:
                self.add_kv_to_msgbus()
            elif self.config.get_messaging_platform() == RABBIT_MQ:
                self.add_kv_to_queue()
            else:
                self.logger.error(
                "Invalid argument specified in messaging_platform use message_bus or rabbit_mq")
                return
  
            scheduled_run.enter(
                self.config.get_schedule_interval(), 1, periodic_run, (scheduler,))

        scheduled_run.enter(self.config.get_schedule_interval(),
                            1, periodic_run, (scheduled_run,))
        scheduled_run.run()

    def create_logger(self):
        """Create logger, file handler, console handler and formatter."""
        # create logger with "object_recovery_scheduler"
        self.logger = logging.getLogger(
            self.config.get_scheduler_logger_name())
        self.logger.setLevel(self.config.get_file_log_level())
        # https://docs.python.org/3/library/logging.handlers.html#logging.handlers.RotatingFileHandler
        fhandler = logging.handlers.RotatingFileHandler(self.config.get_scheduler_logger_file(), mode='a',
                                                        maxBytes = self.config.get_max_bytes(),
                                                        backupCount = self.config.get_backup_count(), encoding=None,
                                                        delay=False )
        fhandler.setLevel(self.config.get_file_log_level())
        # create console handler with a higher log level
        chandler = logging.StreamHandler()
        chandler.setLevel(self.config.get_console_log_level())
        # create formatter and add it to the handlers
        formatter = logging.Formatter(self.config.get_log_format())
        fhandler.setFormatter(formatter)
        chandler.setFormatter(formatter)
        # add the handlers to the logger
        self.logger.addHandler(fhandler)
        self.logger.addHandler(chandler)

    def create_logger_directory(self):
        """Create log directory if not exsists."""
        self._logger_directory = os.path.join(self.config.get_logger_directory())
        if not os.path.isdir(self._logger_directory):
            try:
                os.mkdir(self._logger_directory)
            except BaseException:
                self.logger.error(
                    "Unable to create log directory at " + self._logger_directory)