Ejemplo n.º 1
0
def check_storage_consistency(context, storage_id, storage_new):
    """Check storage response returned by driver whether it matches the
    storage stored in database.

    :param context: The context of delfin.
    :type context: delfin.context.RequestContext
    :param storage_id: The uuid of storage in database.
    :type storage_id: string
    :param storage_new: The storage response returned by driver.
    :type storage_new: dict
    """
    if not storage_new:
        raise exception.StorageBackendNotFound()

    if not storage_new.get('serial_number'):
        msg = _("Serial number should be provided by storage.")
        raise exception.InvalidResults(msg)

    storage_present = db.storage_get(context, storage_id)
    if storage_new['serial_number'] != storage_present['serial_number']:
        msg = (
            _("Serial number %s does not match "
              "the existing storage serial number %s.") %
            (storage_new['serial_number'], storage_present['serial_number']))
        raise exception.StorageSerialNumberMismatch(msg)
Ejemplo n.º 2
0
    def get_storage(self, context):
        # Do something here

        sn = six.text_type(uuidutils.generate_uuid())
        try:
            # use existing sn if already registered storage
            storage = db.storage_get(context, self.storage_id)
            if storage:
                sn = storage['serial_number']
        except exception.StorageNotFound:
            LOG.debug('Registering new storage')
        except Exception:
            LOG.info('Error while retrieving storage from DB')
        total, used, free = self._get_random_capacity()
        raw = random.randint(2000, 3000)
        subscribed = random.randint(3000, 4000)
        return {
            'name': 'fake_driver',
            'description': 'fake driver.',
            'vendor': 'fake_vendor',
            'model': 'fake_model',
            'status': 'normal',
            'serial_number': sn,
            'firmware_version': '1.0.0',
            'location': 'HK',
            'total_capacity': total,
            'used_capacity': used,
            'free_capacity': free,
            'raw_capacity': raw,
            'subscribed_capacity': subscribed
        }
Ejemplo n.º 3
0
    def _set_synced_after(func, *args, **kwargs):
        call_args = inspect.getcallargs(func, *args, **kwargs)
        self = call_args['self']
        sync_result = constants.ResourceSync.SUCCEED
        ret = None
        try:
            ret = func(*args, **kwargs)
        except Exception:
            sync_result = constants.ResourceSync.FAILED
        lock = coordination.Lock(self.storage_id)
        with lock:
            try:
                storage = db.storage_get(self.context, self.storage_id)
            except exception.StorageNotFound:
                LOG.warn('Storage %s not found when set synced' %
                         self.storage_id)
            else:
                # One sync task done, sync status minus 1
                # When sync status get to 0
                # means all the sync tasks are completed
                if storage['sync_status'] != constants.SyncStatus.SYNCED:
                    storage['sync_status'] -= sync_result
                    db.storage_update(self.context, self.storage_id,
                                      {'sync_status': storage['sync_status']})

        return ret
Ejemplo n.º 4
0
 def _check_deleted(func, *args, **kwargs):
     call_args = inspect.getcallargs(func, *args, **kwargs)
     self = call_args['self']
     ret = func(*args, **kwargs)
     # When context.read_deleted is 'yes', db.storage_get would
     # only get the storage whose 'deleted' tag is not default value
     self.context.read_deleted = 'yes'
     try:
         db.storage_get(self.context, self.storage_id)
     except exception.StorageNotFound:
         LOG.debug('Storage %s not found when checking deleted' %
                   self.storage_id)
     else:
         self.remove()
     self.context.read_deleted = 'no'
     return ret
Ejemplo n.º 5
0
    def collect(self, ctx, storage_id, args, start_time, end_time):
        try:
            LOG.debug("Performance collection for storage [%s] with start time"
                      " [%s] and end time [%s]" %
                      (storage_id, start_time, end_time))
            perf_metrics = self.driver_api \
                .collect_perf_metrics(ctx, storage_id,
                                      args,
                                      start_time, end_time)

            # Fill extra labels to metric by fetching metadata from resource DB
            try:
                storage_details = db.storage_get(ctx, storage_id)
                for m in perf_metrics:
                    m.labels["name"] = storage_details.name
                    m.labels["serial_number"] = storage_details.serial_number
            except Exception as e:
                msg = _('Failed to add extra labels to performance '
                        'metrics: {0}'.format(e))
                LOG.error(msg)
                return TelemetryTaskStatus.TASK_EXEC_STATUS_FAILURE

            self.perf_exporter.dispatch(context, perf_metrics)
            return TelemetryTaskStatus.TASK_EXEC_STATUS_SUCCESS
        except Exception as e:
            LOG.error("Failed to collect performance metrics for "
                      "storage id :{0}, reason:{1}".format(
                          storage_id, six.text_type(e)))
            return TelemetryTaskStatus.TASK_EXEC_STATUS_FAILURE
Ejemplo n.º 6
0
    def show(self, req, id):
        ctx = req.environ['delfin.context']

        query_para = {}
        query_para.update(req.GET)

        try:
            begin_time = None
            end_time = None

            if query_para.get('begin_time'):
                begin_time = int(query_para.get('begin_time'))

            if query_para.get('end_time'):
                end_time = int(query_para.get('end_time'))
        except Exception:
            msg = "begin_time and end_time should be integer values in " \
                  "milliseconds."
            raise exception.InvalidInput(msg)

        # When both begin_time and end_time are provided, end_time should
        # be greater than begin_time
        if begin_time and end_time and end_time <= begin_time:
            msg = "end_time should be greater than begin_time."
            raise exception.InvalidInput(msg)

        storage = db.storage_get(ctx, id)
        alert_list = self.driver_manager.list_alerts(ctx, id, query_para)

        # Update storage attributes in each alert model
        for alert in alert_list:
            alert_util.fill_storage_attributes(alert, storage)

        return alerts_view.build_alerts(alert_list)
Ejemplo n.º 7
0
    def sync_alerts(self, ctx, storage_id, query_para):
        """ Syncs all alerts from storage side to exporter """

        LOG.info('Syncing alerts for storage id:{0}'.format(storage_id))
        try:
            storage = db.storage_get(ctx, storage_id)

            current_alert_list = self.driver_manager.list_alerts(
                ctx, storage_id, query_para)
            if not len(current_alert_list):
                # No alerts to sync
                LOG.info('No alerts to sync from storage device for '
                         'storage id:{0}'.format(storage_id))
                return

            for alert in current_alert_list:
                alert_util.fill_storage_attributes(alert, storage)
            self.alert_export_manager.dispatch(ctx, current_alert_list)
            LOG.info(
                'Syncing storage alerts successful for storage id:{0}'.format(
                    storage_id))
        except Exception as e:
            msg = _('Failed to sync alerts from storage device: {0}'.format(
                six.text_type(e)))
            LOG.error(msg)
Ejemplo n.º 8
0
    def process_alert_info(self, alert):
        """Fills alert model using driver manager interface."""
        ctxt = context.get_admin_context()
        storage = db.storage_get(ctxt, alert['storage_id'])
        alert_model = {}

        try:
            alert_model = self.driver_manager.parse_alert(
                ctxt, alert['storage_id'], alert)
            # Fill storage specific info
            if alert_model:
                storage = self.get_storage_from_parsed_alert(
                    ctxt, storage, alert_model)
                alert_util.fill_storage_attributes(alert_model, storage)
        except exception.IncompleteTrapInformation as e:
            LOG.warning(e)
            threading.Thread(target=self.sync_storage_alert,
                             args=(ctxt, alert['storage_id'])).start()
        except exception.AlertSourceNotFound:
            LOG.info("Could not identify alert source from parsed alert. "
                     "Skipping the dispatch of alert")
            return
        except Exception as e:
            LOG.error(e)
            raise exception.InvalidResults(
                "Failed to fill the alert model from driver.")

        # Export to base exporter which handles dispatch for all exporters
        if alert_model:
            LOG.info("Dispatching one SNMP Trap to {} with sn {}".format(
                alert_model['storage_id'], alert_model['serial_number']))
            self.exporter_manager.dispatch(ctxt, [alert_model])
Ejemplo n.º 9
0
    def _storage_exist(self, context, access_info):
        access_info_dict = copy.deepcopy(access_info)

        # Remove unrelated query fields
        unrelated_fields = ['username', 'password']
        for access in constants.ACCESS_TYPE:
            if access_info_dict.get(access):
                for key in unrelated_fields:
                    access_info_dict[access].pop(key)

        # Check if storage is registered
        access_info_list = db.access_info_get_all(context,
                                                  filters=access_info_dict)
        for _access_info in access_info_list:
            try:
                storage = db.storage_get(context, _access_info['storage_id'])
                if storage:
                    LOG.error("Storage %s has same access "
                              "information." % storage['id'])
                    return True
            except exception.StorageNotFound:
                # Suppose storage was not saved successfully after access
                # information was saved in database when registering storage.
                # Therefore, removing access info if storage doesn't exist to
                # ensure the database has no residual data.
                LOG.debug("Remove residual access information.")
                db.access_info_delete(context, _access_info['storage_id'])

        return False
Ejemplo n.º 10
0
    def collect(self):
        """
        :return:
        """
        LOG.info(
            'Collecting array performance metrics for storage id:{0}'.format(
                self.storage_id))
        try:
            # collect the performance metrics from driver and push to
            # prometheus exporter api
            array_metrics = self.driver_api.collect_array_metrics(
                self.context, self.storage_id, self.interval, self.is_historic)
            # fill extra labels to metric by fetching metadata from resource DB
            try:
                array_details = db.storage_get(self.context,
                                               storage_id=self.storage_id)
                for m in array_metrics:
                    m.labels["name"] = array_details.name
                    m.labels["serial_number"] = array_details.serial_number

            except Exception as e:
                msg = _('Failed to add extra labels to array performance '
                        'metrics: {0}'.format(e))
                LOG.error(msg)

            self.perf_exporter.dispatch(self.context, array_metrics)

        except Exception as e:
            msg = _('Failed to collect array performance metrics from '
                    'driver: {0}'.format(e))
            LOG.error(msg)
        else:
            LOG.info("Array performance metrics collection done!!!")
Ejemplo n.º 11
0
    def delete(self, req, id):
        ctxt = req.environ['delfin.context']
        storage = db.storage_get(ctxt, id)

        for subclass in task.StorageResourceTask.__subclasses__():
            self.task_rpcapi.remove_storage_resource(
                ctxt, storage['id'],
                subclass.__module__ + '.' + subclass.__name__)
        self.task_rpcapi.remove_storage_in_cache(ctxt, storage['id'])
Ejemplo n.º 12
0
    def put(self, req, id, body):
        """Create a new alert source or update an exist one."""
        ctx = req.environ['delfin.context']
        alert_source = body

        alert_source["storage_id"] = id
        db.storage_get(ctx, id)
        alert_source = self._input_check(alert_source)

        snmp_config_to_del = self._get_snmp_config_brief(ctx, id)
        if snmp_config_to_del is not None:
            alert_source = db.alert_source_update(ctx, id, alert_source)
        else:
            alert_source = db.alert_source_create(ctx, alert_source)
        snmp_config_to_add = alert_source
        self.alert_rpcapi.sync_snmp_config(ctx, snmp_config_to_del,
                                           snmp_config_to_add)

        return alert_view.build_alert_source(alert_source.to_dict())
Ejemplo n.º 13
0
 def sync(self, req, id):
     """
     :param req:
     :param id:
     :return:
     """
     ctxt = req.environ['delfin.context']
     storage = db.storage_get(ctxt, id)
     resource_count = len(resources.StorageResourceTask.__subclasses__())
     _set_synced_if_ok(ctxt, storage['id'], resource_count)
     for subclass in resources.StorageResourceTask.__subclasses__():
         self.task_rpcapi.sync_storage_resource(
             ctxt, storage['id'],
             subclass.__module__ + '.' + subclass.__name__)
Ejemplo n.º 14
0
    def _get_alert_source_by_host(source_ip):
        """Gets alert source for given source ip address."""
        filters = {'host~': source_ip}
        ctxt = context.RequestContext()

        # Using the known filter and db exceptions are handled by api
        alert_sources = db.alert_source_get_all(ctxt, filters=filters)
        if not alert_sources:
            raise exception.AlertSourceNotFoundWithHost(source_ip)

        # This is to make sure unique host is configured each alert source
        unique_alert_source = None
        if len(alert_sources) > 1:
            # Clear invalid alert_source
            for alert_source in alert_sources:
                try:
                    db.storage_get(ctxt, alert_source['storage_id'])
                except exception.StorageNotFound:
                    LOG.warning('Found redundancy alert source for storage %s'
                                % alert_source['storage_id'])
                    try:
                        db.alert_source_delete(
                            ctxt, alert_source['storage_id'])
                    except Exception as e:
                        LOG.warning('Delete the invalid alert source failed, '
                                    'reason is %s' % six.text_type(e))
                else:
                    unique_alert_source = alert_source
        else:
            unique_alert_source = alert_sources[0]

        if unique_alert_source is None:
            msg = (_("Failed to get unique alert source with host %s.")
                   % source_ip)
            raise exception.InvalidResults(msg)

        return unique_alert_source
Ejemplo n.º 15
0
 def _handle_validation_result(self, ctxt, storage_id,
                               category=constants.Category.FAULT):
     try:
         storage = db.storage_get(ctxt, storage_id)
         serial_number = storage.get('serial_number')
         if category == constants.Category.FAULT:
             self.snmp_error_flag[serial_number] = True
             self._dispatch_snmp_validation_alert(ctxt, storage, category)
         elif self.snmp_error_flag.get(serial_number, True):
             self.snmp_error_flag[serial_number] = False
             self._dispatch_snmp_validation_alert(ctxt, storage, category)
     except Exception as e:
         msg = six.text_type(e)
         LOG.error("Exception occurred when handling validation "
                   "error: %s ." % msg)
Ejemplo n.º 16
0
    def get_capabilities(self, req, id):
        """
        The API fetches capabilities from driver
          associated with the storage device.
        """
        # Check and fetch storage with storage_id
        ctx = req.environ['delfin.context']
        storage_info = db.storage_get(ctx, id)

        # Fetch supported driver's capability
        capabilities = self.driver_api. \
            get_capabilities(ctx, storage_info['id'])

        # validate capabilities
        validation.validate_capabilities(capabilities)

        return storage_view.build_capabilities(storage_info, capabilities)
Ejemplo n.º 17
0
    def process_alert_info(self, alert):
        """Fills alert model using driver manager interface."""
        ctxt = context.get_admin_context()
        storage = db.storage_get(ctxt, alert['storage_id'])

        try:
            alert_model = self.driver_manager.parse_alert(
                ctxt, alert['storage_id'], alert)
            # Fill storage specific info
            alert_util.fill_storage_attributes(alert_model, storage)
        except Exception as e:
            LOG.error(e)
            raise exception.InvalidResults(
                "Failed to fill the alert model from driver.")

        # Export to base exporter which handles dispatch for all exporters
        self.exporter_manager.dispatch(ctxt, alert_model)
Ejemplo n.º 18
0
def _set_synced_if_ok(context, storage_id, resource_count):
    try:
        storage = db.storage_get(context, storage_id)
    except exception.StorageNotFound:
        msg = 'Storage %s not found when try to set sync_status' \
              % storage_id
        raise exception.InvalidInput(message=msg)
    else:
        last_update = storage['updated_at'] or storage['created_at']
        current_time = timeutils.utcnow()
        interval = (current_time - last_update).seconds
        # If last synchronization was within
        # CONF.sync_task_expiration(in seconds), and the sync status
        # is bigger than 0, it means some sync task is still running,
        # the new sync task should not launch
        if interval < CONF.sync_task_expiration and \
                storage['sync_status'] > 0:
            raise exception.StorageIsSyncing(storage['id'])
        storage['sync_status'] = resource_count * constants.ResourceSync.START
        db.storage_update(context, storage['id'], storage)
Ejemplo n.º 19
0
    def _storage_exist(self, context, access_info):
        access_info_dict = copy.deepcopy(access_info)
        access_info_list = access_info_filter(context, access_info_dict)

        for _access_info in access_info_list:
            try:
                storage = db.storage_get(context, _access_info['storage_id'])
                if storage:
                    LOG.error("Storage %s has same access "
                              "information." % storage['id'])
                    return True
            except exception.StorageNotFound:
                # Suppose storage was not saved successfully after access
                # information was saved in database when registering storage.
                # Therefore, removing access info if storage doesn't exist to
                # ensure the database has no residual data.
                LOG.debug("Remove residual access information.")
                db.access_info_delete(context, _access_info['storage_id'])

        return False
Ejemplo n.º 20
0
    def sync(self, req, id, body):
        ctx = req.environ['delfin.context']

        # begin_time and end_time are optional parameters
        begin_time = body.get('begin_time')
        end_time = body.get('end_time')

        # When both begin_time and end_time are provided, end_time should
        # be greater than begin_time
        if begin_time and end_time and end_time <= begin_time:
            msg = "end_time should be greater than begin_time."
            raise exception.InvalidInput(msg)

        # Check for the storage existence
        _ = db.storage_get(ctx, id)

        query_para = {
            'begin_time': body.get('begin_time'),
            'end_time': body.get('end_time')
        }

        # Trigger asynchronous alert syncing from storage backend
        self.task_rpcapi.sync_storage_alerts(ctx, id, query_para)
Ejemplo n.º 21
0
    def process_alert_info(self, alert):
        """Fills alert model using driver manager interface."""
        ctxt = context.get_admin_context()
        storage = db.storage_get(ctxt, alert['storage_id'])
        alert_model = {}

        try:
            alert_model = self.driver_manager.parse_alert(
                ctxt, alert['storage_id'], alert)
            # Fill storage specific info
            if alert_model:
                alert_util.fill_storage_attributes(alert_model, storage)
        except exception.IncompleteTrapInformation as e:
            LOG.warn(e)
            threading.Thread(target=self.sync_storage_alert,
                             args=(ctxt, alert['storage_id'])).start()
        except Exception as e:
            LOG.error(e)
            raise exception.InvalidResults(
                "Failed to fill the alert model from driver.")

        # Export to base exporter which handles dispatch for all exporters
        if alert_model:
            self.exporter_manager.dispatch(ctxt, alert_model)
Ejemplo n.º 22
0
 def show(self, req, id):
     ctxt = req.environ['delfin.context']
     storage = db.storage_get(ctxt, id)
     return storage_view.build_storage(storage)
Ejemplo n.º 23
0
    def metrics_config(self, req, body, id):
        """
        :param req:
        :param body:
        :param id:
        :return:
        """
        ctxt = req.environ['delfin.context']

        # check storage is registered
        db.storage_get(ctxt, id)

        metrics_config_dict = body
        metrics_config_dict.update(body)

        # get scheduler object
        schedule = config.Scheduler.getInstance()

        # The path of scheduler config file
        config_file = CONF.scheduler.config_path

        try:
            # Load the scheduler configuration file
            data = config.load_json_file(config_file)
            storage_found = False
            for storage in data.get("storages"):
                config_storage_id = storage.get('id')
                if config_storage_id == id:
                    for resource in metrics_config_dict.keys():
                        storage_dict = storage.get(resource)
                        metric_dict = metrics_config_dict.get(resource)
                        storage_dict.update(metric_dict)

                        interval = storage_dict.get('interval')
                        is_historic = storage_dict.get('is_historic')

                        job_id = id + resource

                        if schedule.get_job(job_id):
                            schedule.reschedule_job(job_id=job_id,
                                                    trigger='interval',
                                                    seconds=interval)
                        else:
                            schedule.add_job(
                                self.perf_collect,
                                'interval',
                                args=[id, interval, is_historic, resource],
                                seconds=interval,
                                next_run_time=datetime.now(),
                                id=job_id)

                        storage_found = True

            if not storage_found:
                temp_dict = {'id': id}
                temp_dict.update(metrics_config_dict)
                data.get("storages").append(temp_dict)

                for resource in metrics_config_dict.keys():
                    resource_dict = metrics_config_dict.get(resource)
                    interval = resource_dict.get('interval')
                    is_historic = resource_dict.get('is_historic')

                    job_id = id + resource

                    schedule.add_job(
                        self.perf_collect,
                        'interval',
                        args=[id, interval, is_historic, resource],
                        seconds=interval,
                        next_run_time=datetime.now(),
                        id=job_id)

            with open(config_file, "w") as jsonFile:
                json.dump(data, jsonFile)
                jsonFile.close()

        except TypeError as e:
            LOG.error("Error occurred during parsing of config file")
            raise exception.InvalidContentType(e)
        except json.decoder.JSONDecodeError as e:
            msg = ("Not able to open the config file: {0}".format(config_file))
            LOG.error(msg)
            raise exception.InvalidInput(e.msg)
        else:
            return metrics_config_dict
        finally:
            try:
                schedule.start()
            except Exception as e:
                LOG.debug("Scheduler is already running.{0}".format(e))
Ejemplo n.º 24
0
 def delete(self, req, id, sequence_number):
     ctx = req.environ['delfin.context']
     _ = db.storage_get(ctx, id)
     self.driver_manager.clear_alert(ctx, id, sequence_number)