Beispiel #1
0
def _set_synced_if_ok(context, storage_id, resource_count):
    try:
        storage = db.storage_get(context, storage_id)
    except exception.StorageNotFound:
        msg = 'Storage %s not found when try to set sync_status' \
              % storage_id
        raise exception.InvalidInput(message=msg)
    else:
        last_update = storage['updated_at'] or storage['created_at']
        current_time = timeutils.utcnow()
        interval = (current_time - last_update).seconds
        # If last synchronization was within
        # CONF.sync_task_expiration(in seconds), and the sync status
        # is bigger than 0, it means some sync task is still running,
        # the new sync task should not launch
        if interval < CONF.sync_task_expiration and \
                storage['sync_status'] > 0:
            raise exception.StorageIsSyncing(storage['id'])
        storage['sync_status'] = resource_count * constants.ResourceSync.START
        storage['updated_at'] = current_time
        db.storage_update(context, storage['id'], storage)
Beispiel #2
0
    def init_connection(self, access_info):
        """ Given the access_info get a connection to VMAX storage """
        self.array_id = access_info.get('extra_attributes', {}). \
            get('array_id', None)
        if not self.array_id:
            raise exception.InvalidInput('Input array_id is missing')

        try:
            # Initialise PyU4V connection to VMAX
            self.conn = PyU4V.U4VConn(u4v_version=SUPPORTED_VERSION,
                                      server_ip=access_info['host'],
                                      port=access_info['port'],
                                      verify=False,
                                      array_id=self.array_id,
                                      username=access_info['username'],
                                      password=access_info['password'])

        except Exception as err:
            msg = "Failed to connect to VMAX: {}".format(err)
            LOG.error(msg)
            raise exception.StorageBackendException(msg)
Beispiel #3
0
    def parse_alert(context, alert):
        """Parse alert data got from alert manager and fill the alert model."""
        # Check for mandatory alert attributes
        for attr in AlertHandler._mandatory_alert_attributes:
            if not alert.get(attr):
                msg = "Mandatory information %s missing in alert message. " \
                      % attr
                raise exception.InvalidInput(msg)

        try:
            alert_model = dict()
            # These information are sourced from device registration info
            alert_model['alert_id'] = ("0x%07x" % int(
                alert.get(AlertHandler.OID_MESSAGECODE)))
            alert_model['alert_name'] = AlertHandler.get_alert_type(alert.get(
                AlertHandler.OID_MESSAGECODE))
            alert_model['severity'] = AlertHandler.SEVERITY_MAP.get(
                alert.get(AlertHandler.OID_SEVERITY),
                constants.Severity.NOT_SPECIFIED)
            alert_model['category'] = AlertHandler.CATEGORY_MAP.get(
                alert.get(AlertHandler.OID_STATE),
                constants.Category.NOT_SPECIFIED)
            alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
            alert_model['sequence_number'] = alert.get(AlertHandler.OID_ID)
            alert_model['occur_time'] = AlertHandler.get_time_stamp(
                alert.get(AlertHandler.OID_TIMEOCCURRED))
            alert_model['description'] = alert.get(AlertHandler.OID_DETAILS)
            alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE
            alert_model['location'] = alert.get(AlertHandler.OID_COMPONENT)

            if alert.get(AlertHandler.OID_STATE) == '5':
                alert_model['clear_category'] = constants.ClearType.AUTOMATIC
            return alert_model
        except Exception as e:
            LOG.error(e)
            msg = (_("Failed to build alert model as some attributes missing "
                     "in alert message."))
            raise exception.InvalidResults(msg)
Beispiel #4
0
    def sync(self, req, id, body):
        ctx = req.environ['delfin.context']

        # begin_time and end_time are optional parameters
        begin_time = body.get('begin_time')
        end_time = body.get('end_time')

        # When both begin_time and end_time are provided, end_time should
        # be greater than begin_time
        if begin_time and end_time and end_time <= begin_time:
            msg = "end_time should be greater than begin_time."
            raise exception.InvalidInput(msg)

        # Check for the storage existence
        _ = db.storage_get(ctx, id)

        query_para = {
            'begin_time': body.get('begin_time'),
            'end_time': body.get('end_time')
        }

        # Trigger asynchronous alert syncing from storage backend
        self.task_rpcapi.sync_storage_alerts(ctx, id, query_para)
Beispiel #5
0
    def init_connection(self, access_info):
        """ Given the access_info get a connection to VMAX storage """
        try:
            ver, self.uni_version = self.rest.get_uni_version()
            LOG.info('Connected to Unisphere Version: {0}'.format(ver))
        except exception.InvalidUsernameOrPassword as e:
            msg = "Failed to connect VMAX. Reason: {}".format(e.msg)
            LOG.error(msg)
            raise e
        except (exception.SSLCertificateFailed,
                exception.SSLHandshakeFailed) as e:
            msg = ("Failed to connect to VMAX: {}".format(e))
            LOG.error(msg)
            raise
        except Exception as err:
            msg = ("Failed to connect to VMAX. Host or Port is not correct: "
                   "{}".format(err))
            LOG.error(msg)
            raise exception.InvalidIpOrPort()

        if not self.uni_version:
            msg = "Invalid input. Failed to get vmax unisphere version"
            raise exception.InvalidInput(msg)
Beispiel #6
0
    def init_connection(self, access_info):
        """ Given the access_info get a connection to VMAX storage """
        try:
            ver, self.uni_version = self.rest.get_uni_version()
            LOG.info('Connected to Unisphere Version: {0}'.format(ver))
        except exception.InvalidUsernameOrPassword as e:
            msg = "Failed to connect VMAX. Reason: {}".format(e.msg)
            LOG.error(msg)
            raise e
        except (exception.SSLCertificateFailed,
                exception.SSLHandshakeFailed) as e:
            msg = ("Failed to connect to VMAX: {}".format(e))
            LOG.error(msg)
            raise
        except Exception as err:
            msg = ("Failed to connect to VMAX. Host or Port is not correct: "
                   "{}".format(err))
            LOG.error(msg)
            raise exception.InvalidIpOrPort()

        if not self.uni_version:
            msg = "Invalid input. Failed to get vmax unisphere version"
            raise exception.InvalidInput(msg)

        self.array_id = access_info.get('extra_attributes', {}). \
            get('array_id', None)

        try:
            # Get array details from unisphere
            array = self.rest.get_array_detail(version=self.uni_version)
            if not array:
                msg = "Failed to get array details"
                raise exception.InvalidInput(msg)

            if len(array['symmetrixId']) == EMBEDDED_UNISPHERE_ARRAY_COUNT:
                if not self.array_id:
                    self.array_id = array['symmetrixId'][0]
                elif self.array_id != array['symmetrixId'][0]:
                    msg = "Invalid array_id. Expected id: {}". \
                        format(array['symmetrixId'])
                    raise exception.InvalidInput(msg)
            else:
                # Get first local array id
                array_ids = array.get('symmetrixId', list())
                for array_id in array_ids:
                    array_info = self.rest.get_array_detail(
                        version=self.uni_version, array=array_id)
                    if array_info.get('local'):
                        LOG.info("Adding local VMAX array {}".format(array_id))
                        if not self.array_id:
                            self.array_id = array_id
                        break
                    else:
                        LOG.info(
                            "Skipping remote VMAX array {}".format(array_id))
            if not self.array_id:
                msg = "Failed to get VMAX array id from Unisphere"
                raise exception.InvalidInput(msg)
        except Exception:
            LOG.error("Failed to init_connection to VMAX")
            raise

        if not self.array_id:
            msg = "Input array_id is missing. Supported ids: {}". \
                format(array['symmetrixId'])
            raise exception.InvalidInput(msg)
Beispiel #7
0
def process_sort_params(sort_keys,
                        sort_dirs,
                        default_keys=None,
                        default_dir='asc'):
    """Process the sort parameters to include default keys.

    Creates a list of sort keys and a list of sort directions. Adds the default
    keys to the end of the list if they are not already included.

    When adding the default keys to the sort keys list, the associated
    direction is:
    1) The first element in the 'sort_dirs' list (if specified), else
    2) 'default_dir' value (Note that 'asc' is the default value since this is
    the default in sqlalchemy.utils.paginate_query)

    :param sort_keys: List of sort keys to include in the processed list
    :param sort_dirs: List of sort directions to include in the processed list
    :param default_keys: List of sort keys that need to be included in the
                         processed list, they are added at the end of the list
                         if not already specified.
    :param default_dir: Sort direction associated with each of the default
                        keys that are not supplied, used when they are added
                        to the processed list
    :returns: list of sort keys, list of sort directions
    :raise exception.InvalidInput: If more sort directions than sort keys
                                   are specified or if an invalid sort
                                   direction is specified
    """
    if default_keys is None:
        default_keys = ['created_at']

    # Determine direction to use for when adding default keys
    if sort_dirs and len(sort_dirs):
        default_dir_value = sort_dirs[0]
    else:
        default_dir_value = default_dir

    # Create list of keys (do not modify the input list)
    if sort_keys:
        result_keys = list(sort_keys)
    else:
        result_keys = []

    # If a list of directions is not provided, use the default sort direction
    # for all provided keys.
    if sort_dirs:
        result_dirs = []
        # Verify sort direction
        for sort_dir in sort_dirs:
            if sort_dir not in ('asc', 'desc'):
                msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
                raise exception.InvalidInput(msg)
            result_dirs.append(sort_dir)
    else:
        result_dirs = [default_dir_value for _sort_key in result_keys]

    # Ensure that the key and direction length match
    while len(result_dirs) < len(result_keys):
        result_dirs.append(default_dir_value)
    # Unless more direction are specified, which is an error
    if len(result_dirs) > len(result_keys):
        msg = _("Sort direction array size exceeds sort key array size.")
        raise exception.InvalidInput(msg)

    # Ensure defaults are included
    for key in default_keys:
        if key not in result_keys:
            result_keys.append(key)
            result_dirs.append(default_dir_value)

    return result_keys, result_dirs
Beispiel #8
0
    def metrics_config(self, req, body, id):
        """
        :param req:
        :param body:
        :param id:
        :return:
        """
        ctxt = req.environ['delfin.context']

        # check storage is registered
        db.storage_get(ctxt, id)

        metrics_config_dict = body
        metrics_config_dict.update(body)

        # get scheduler object
        schedule = config.Scheduler.getInstance()

        # The path of scheduler config file
        config_file = CONF.scheduler.config_path

        try:
            # Load the scheduler configuration file
            data = config.load_json_file(config_file)
            storage_found = False
            for storage in data.get("storages"):
                config_storage_id = storage.get('id')
                if config_storage_id == id:
                    for resource in metrics_config_dict.keys():
                        storage_dict = storage.get(resource)
                        metric_dict = metrics_config_dict.get(resource)
                        storage_dict.update(metric_dict)

                        interval = storage_dict.get('interval')
                        is_historic = storage_dict.get('is_historic')

                        job_id = id + resource

                        if schedule.get_job(job_id):
                            schedule.reschedule_job(job_id=job_id,
                                                    trigger='interval',
                                                    seconds=interval)
                        else:
                            schedule.add_job(
                                self.perf_collect,
                                'interval',
                                args=[id, interval, is_historic, resource],
                                seconds=interval,
                                next_run_time=datetime.now(),
                                id=job_id)

                        storage_found = True

            if not storage_found:
                temp_dict = {'id': id}
                temp_dict.update(metrics_config_dict)
                data.get("storages").append(temp_dict)

                for resource in metrics_config_dict.keys():
                    resource_dict = metrics_config_dict.get(resource)
                    interval = resource_dict.get('interval')
                    is_historic = resource_dict.get('is_historic')

                    job_id = id + resource

                    schedule.add_job(
                        self.perf_collect,
                        'interval',
                        args=[id, interval, is_historic, resource],
                        seconds=interval,
                        next_run_time=datetime.now(),
                        id=job_id)

            with open(config_file, "w") as jsonFile:
                json.dump(data, jsonFile)
                jsonFile.close()

        except TypeError as e:
            LOG.error("Error occurred during parsing of config file")
            raise exception.InvalidContentType(e)
        except json.decoder.JSONDecodeError as e:
            msg = ("Not able to open the config file: {0}".format(config_file))
            LOG.error(msg)
            raise exception.InvalidInput(e.msg)
        else:
            return metrics_config_dict
        finally:
            try:
                schedule.start()
            except Exception as e:
                LOG.debug("Scheduler is already running.{0}".format(e))
Beispiel #9
0
    def parse_alert(self, context, alert):
        """Parse alert data got from alert manager and fill the alert model."""

        # Check for mandatory alert attributes
        for attr in self.mandatory_alert_attributes:
            if not alert.get(attr):
                msg = "Mandatory information %s missing in alert message. " \
                      % attr
                raise exception.InvalidInput(msg)

        alert_model = {}
        # These information are sourced from device registration info
        alert_model['me_dn'] = alert['storage_id']
        alert_model['me_name'] = alert['storage_name']
        alert_model['manufacturer'] = alert['vendor']
        alert_model['product_name'] = alert['model']

        # Fill default values for alert attributes
        alert_model['category'] = self.default_category
        alert_model['location'] = self.default_location
        alert_model['event_type'] = self.default_event_type
        alert_model['severity'] = self.default_severity
        alert_model['probable_cause'] = self.default_probable_cause
        alert_model['me_category'] = self.default_me_category

        # Trap info does not have clear_type and alert sequence number
        # TBD : Below fields filling to be updated
        alert_model['clear_type'] = ""
        alert_model['device_alert_sn'] = ""
        alert_model['match_key'] = ""

        # trap info do not contain occur time, update with received time
        # Get date and time. Format will be like : Wed May 20 01:53:29 2020
        curr_time = datetime.now()
        alert_model['occur_time'] = curr_time.strftime('%c')

        # Fill all the alert model fields
        if alert.get('category'):
            alert_model['category'] = alert['category']

        # Array id is used to fill unique id at source system side
        alert_model['native_me_dn'] = alert['connUnitName']

        # Location is name-value pair having component type and component name
        if alert.get('emcAsyncEventComponentType') \
                and alert.get('emcAsyncEventComponentName'):
            component_type = alert_mapper.component_type_mapping.get(
                alert.get('emcAsyncEventComponentType'), "")
            alert_model['location'] = 'Component type: ' \
                                      + component_type \
                                      + ',Component name: ' \
                                      + alert['emcAsyncEventComponentName']

        if alert.get('connUnitEventType'):
            alert_model['event_type'] = alert['connUnitEventType']

        if alert.get('connUnitEventSeverity'):
            alert_model['severity'] = alert['connUnitEventSeverity']

        if alert.get('connUnitEventDescr'):
            alert_model['probable_cause'] = alert['connUnitEventDescr']

        # Fill alarm id and fill alarm_name with corresponding mapping names
        alert_model['alarm_id'] = alert['emcAsyncEventCode']
        alert_model['alarm_name'] = alert_mapper.alarm_id_name_mapping.get(
            alert_model['alarm_id'], alert_model['alarm_id'])

        if alert.get('connUnitType'):
            alert_model['me_category'] = alert['connUnitType']

        return alert_model
def paginate_query(query, model, limit, sort_keys, marker=None,
                   sort_dir=None, sort_dirs=None, offset=None):
    """Returns a query with sorting / pagination criteria added.

    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)

    We also have to cope with different sort_directions.

    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.

    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
    :param offset: the number of items to skip from the marker or from the
                    first element.

    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """

    if sort_dir and sort_dirs:
        raise AssertionError('Both sort_dir and sort_dirs specified.')

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    if len(sort_dirs) != len(sort_keys):
        raise AssertionError(
            'sort_dirs length is not equal to sort_keys length.')

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        sort_dir_func = {
            'asc': sqlalchemy.asc,
            'desc': sqlalchemy.desc,
        }[current_sort_dir]

        try:
            sort_key_attr = getattr(model, current_sort_key)
        except AttributeError:
            raise exception.InvalidInput('Invalid sort key')
        if not api.is_orm_value(sort_key_attr):
            raise exception.InvalidInput('Invalid sort key')
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            if v is None:
                v = _get_default_column_value(model, sort_key)
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(0, len(sort_keys)):
            crit_attrs = []
            for j in range(0, i):
                model_attr = getattr(model, sort_keys[j])
                default = _get_default_column_value(model, sort_keys[j])
                attr = sa_sql.expression.case([(model_attr.isnot(None),
                                                model_attr), ],
                                              else_=default)
                crit_attrs.append((attr == marker_values[j]))

            model_attr = getattr(model, sort_keys[i])
            default = _get_default_column_value(model, sort_keys[i])
            attr = sa_sql.expression.case([(model_attr.isnot(None),
                                            model_attr), ],
                                          else_=default)
            if sort_dirs[i] == 'desc':
                crit_attrs.append((attr < marker_values[i]))
            elif sort_dirs[i] == 'asc':
                crit_attrs.append((attr > marker_values[i]))
            else:
                raise ValueError(_("Unknown sort direction, "
                                   "must be 'desc' or 'asc'"))

            criteria = sqlalchemy.sql.and_(*crit_attrs)
            criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    if offset:
        query = query.offset(offset)

    return query
Beispiel #11
0
    def _input_check(self, alert_source):
        version = alert_source.get('version')

        if version.lower() == 'snmpv3':
            user_name = alert_source.get('username')
            security_level = alert_source.get('security_level')
            engine_id = alert_source.get('engine_id')

            # Validate engine_id, check octet string can be formed from it
            if engine_id:
                try:
                    OctetString.fromHexString(engine_id)
                except (TypeError, ValueError):
                    msg = "engine_id should be a set of octets in " \
                          "hexadecimal format."
                    raise exception.InvalidInput(msg)

            if not user_name or not security_level:
                msg = "If snmp version is SNMPv3, then username, " \
                      "security_level are required."
                raise exception.InvalidInput(msg)

            if security_level == constants.SecurityLevel.AUTHNOPRIV\
                    or security_level == constants.SecurityLevel.AUTHPRIV:
                auth_protocol = alert_source.get('auth_protocol')
                auth_key = alert_source.get('auth_key')
                if not auth_protocol or not auth_key:
                    msg = "If snmp version is SNMPv3 and security_level is " \
                          "authPriv or authNoPriv, auth_protocol and " \
                          "auth_key are required."
                    raise exception.InvalidInput(msg)
                alert_source['auth_key'] = cryptor.encode(
                    alert_source['auth_key'])

                if security_level == constants.SecurityLevel.AUTHPRIV:
                    privacy_protocol = alert_source.get('privacy_protocol')
                    privacy_key = alert_source.get('privacy_key')
                    if not privacy_protocol or not privacy_key:
                        msg = "If snmp version is SNMPv3 and security_level" \
                              " is authPriv, privacy_protocol and " \
                              "privacy_key are  required."
                        raise exception.InvalidInput(msg)
                    alert_source['privacy_key'] = cryptor.encode(
                        alert_source['privacy_key'])
                else:
                    alert_source['privacy_key'] = None
                    alert_source['privacy_protocol'] = None
            else:
                alert_source['auth_key'] = None
                alert_source['auth_protocol'] = None
                alert_source['privacy_key'] = None
                alert_source['privacy_protocol'] = None

            # Clear keys for other versions.
            alert_source['community_string'] = None
        else:
            community_string = alert_source.get('community_string')
            if not community_string:
                msg = "If snmp version is SNMPv1 or SNMPv2c, " \
                      "community_string is required."
                raise exception.InvalidInput(msg)
            alert_source['community_string'] = cryptor.encode(
                alert_source['community_string'])

            # Clear keys for SNMPv3
            for k in SNMPv3_keys:
                alert_source[k] = None

        return alert_source
Beispiel #12
0
    def _input_check(self, alert_source):
        version = alert_source.get('version')
        plain_auth_key = None
        plain_priv_key = None

        if version.lower() == 'snmpv3':
            user_name = alert_source.get('username')
            security_level = alert_source.get('security_level')
            engine_id = alert_source.get('engine_id')

            # Validate engine_id
            snmp_validator.validate_engine_id(engine_id)

            if not user_name or not security_level or not engine_id:
                msg = "If snmp version is SNMPv3, then username, " \
                      "security_level and engine_id are required."
                raise exception.InvalidInput(msg)

            if security_level == "AuthNoPriv" or security_level == "AuthPriv":
                auth_protocol = alert_source.get('auth_protocol')
                auth_key = alert_source.get('auth_key')
                if not auth_protocol or not auth_key:
                    msg = "If snmp version is SNMPv3 and security_level is " \
                          "AuthPriv or AuthNoPriv, auth_protocol and " \
                          "auth_key are required."
                    raise exception.InvalidInput(msg)
                plain_auth_key = alert_source['auth_key']
                alert_source['auth_key'] = cryptor.encode(
                    alert_source['auth_key'])

                if security_level == "AuthPriv":
                    privacy_protocol = alert_source.get('privacy_protocol')
                    privacy_key = alert_source.get('privacy_key')
                    if not privacy_protocol or not privacy_key:
                        msg = "If snmp version is SNMPv3 and security_level" \
                              " is AuthPriv, privacy_protocol and " \
                              "privacy_key are  required."
                        raise exception.InvalidInput(msg)
                    plain_priv_key = alert_source['privacy_key']
                    alert_source['privacy_key'] = cryptor.encode(
                        alert_source['privacy_key'])
                else:
                    alert_source['privacy_key'] = None
                    alert_source['privacy_protocol'] = None
            else:
                alert_source['auth_key'] = None
                alert_source['auth_protocol'] = None
                alert_source['privacy_key'] = None
                alert_source['privacy_protocol'] = None

            # Clear keys for other versions.
            alert_source['community_string'] = None
        else:
            community_string = alert_source.get('community_string', None)
            if not community_string:
                msg = "If snmp version is SNMPv1 or SNMPv2c, " \
                      "community_string is required."
                raise exception.InvalidInput(msg)

            # Clear keys for SNMPv3
            for k in SNMPv3_keys:
                alert_source[k] = None

        # Validate configuration with alert source using snmp connectivity and
        # update if valid
        alert_source = snmp_validator.validate_connectivity(
            alert_source, plain_auth_key, plain_priv_key)

        return alert_source