Пример #1
0
def check_exclusive_options(**kwargs):
    """Checks that only one of the provided options is actually not-none.

    Iterates over all the kwargs passed in and checks that only one of said
    arguments is not-none, if more than one is not-none then an exception will
    be raised with the names of those arguments who were not-none.
    """

    if not kwargs:
        return

    pretty_keys = kwargs.pop("pretty_keys", True)
    exclusive_options = {}
    for (k, v) in kwargs.items():
        if v is not None:
            exclusive_options[k] = True

    if len(exclusive_options) > 1:
        # Change the format of the names from pythonic to
        # something that is more readable.
        #
        # Ex: 'the_key' -> 'the key'
        if pretty_keys:
            names = [k.replace('_', ' ') for k in kwargs.keys()]
        else:
            names = kwargs.keys()
        names = ", ".join(sorted(names))
        msg = (_("May specify only one of %s") % (names))
        raise exception.InvalidInput(reason=msg)
Пример #2
0
 def check_gcs_options(self):
     required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file',
                         'backup_gcs_project_id')
     unset_options = [
         opt for opt in required_options if not getattr(CONF, opt, None)
     ]
     if unset_options:
         msg = _('Unset gcs options: %s') % unset_options
         LOG.error(msg)
         raise exception.InvalidInput(reason=msg)
Пример #3
0
    def __init__(self,
                 ip,
                 port,
                 conn_timeout,
                 login,
                 password=None,
                 privatekey=None,
                 *args,
                 **kwargs):
        self.ip = ip
        self.port = port
        self.login = login
        self.password = password
        self.conn_timeout = conn_timeout if conn_timeout else None
        self.privatekey = privatekey
        self.hosts_key_file = None

        # Validate good config setting here.
        # Paramiko handles the case where the file is inaccessible.
        if not CONF.ssh_hosts_key_file:
            raise exception.ParameterNotFound(param='ssh_hosts_key_file')
        elif not os.path.isfile(CONF.ssh_hosts_key_file):
            # If using the default path, just create the file.
            if CONF.state_path in CONF.ssh_hosts_key_file:
                open(CONF.ssh_hosts_key_file, 'a').close()
            else:
                msg = (_("Unable to find ssh_hosts_key_file: %s") %
                       CONF.ssh_hosts_key_file)
                raise exception.InvalidInput(reason=msg)

        if 'hosts_key_file' in kwargs.keys():
            self.hosts_key_file = kwargs.pop('hosts_key_file')
            LOG.info(
                _LI("Secondary ssh hosts key file %(kwargs)s will be "
                    "loaded along with %(conf)s from /etc/storage.conf."), {
                        'kwargs': self.hosts_key_file,
                        'conf': CONF.ssh_hosts_key_file
                    })

        LOG.debug(
            "Setting strict_ssh_host_key_policy to '%(policy)s' "
            "using ssh_hosts_key_file '%(key_file)s'.", {
                'policy': CONF.strict_ssh_host_key_policy,
                'key_file': CONF.ssh_hosts_key_file
            })

        self.strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy

        if not self.hosts_key_file:
            self.hosts_key_file = CONF.ssh_hosts_key_file
        else:
            self.hosts_key_file += ',' + CONF.ssh_hosts_key_file

        super(SSHPool, self).__init__(*args, **kwargs)
Пример #4
0
    def _check_encryption_input(self, encryption, create=True):
        if encryption.get('key_size') is not None:
            encryption['key_size'] = utils.validate_integer(
                encryption['key_size'], 'key_size',
                min_value=0, max_value=db.MAX_INT)

        if create:
            msg = None
            if 'provider' not in encryption.keys():
                msg = _('provider must be defined')
            elif 'control_location' not in encryption.keys():
                msg = _('control_location must be defined')

            if msg is not None:
                raise exception.InvalidInput(reason=msg)

        # Check control location
        if 'control_location' in encryption.keys():
            if encryption['control_location'] not in CONTROL_LOCATION:
                msg = _("Valid control location are: %s") % CONTROL_LOCATION
                raise exception.InvalidInput(reason=msg)
Пример #5
0
def check_string_length(value, name, min_length=0, max_length=None):
    """Check the length of specified string.

    :param value: the value of the string
    :param name: the name of the string
    :param min_length: the min_length of the string
    :param max_length: the max_length of the string
    """
    if not isinstance(value, six.string_types):
        msg = _("%s is not a string or unicode") % name
        raise exception.InvalidInput(message=msg)

    if len(value) < min_length:
        msg = _("%(name)s has a minimum character requirement of "
                "%(min_length)s.") % {'name': name, 'min_length': min_length}
        raise exception.InvalidInput(message=msg)

    if max_length and len(value) > max_length:
        msg = _("%(name)s has more than %(max_length)s "
                "characters.") % {'name': name, 'max_length': max_length}
        raise exception.InvalidInput(message=msg)
Пример #6
0
    def decode_record(backup_url):
        """Deserialize backup metadata from string into a dictionary.

        :raises: InvalidInput
        """
        try:
            return jsonutils.loads(base64.decode_as_text(backup_url))
        except TypeError:
            msg = _("Can't decode backup record.")
        except ValueError:
            msg = _("Can't parse backup record.")
        raise exception.InvalidInput(reason=msg)
Пример #7
0
    def _extract_availability_zone(self, availability_zone):
        if availability_zone is None:
            if CONF.default_availability_zone:
                availability_zone = CONF.default_availability_zone
            else:
                # For backwards compatibility use the storage_availability_zone
                availability_zone = CONF.storage_availability_zone

        valid = self._valid_availability_zone(availability_zone)
        if not valid:
            msg = _LW("Availability zone '%s' is invalid") % (
                availability_zone)
            LOG.warning(msg)
            raise exception.InvalidInput(reason=msg)

        return availability_zone
Пример #8
0
def db_sync(version=None, init_version=INIT_VERSION, engine=None):
    """Migrate the database to `version` or the most recent version."""

    if engine is None:
        engine = db_api.get_engine()

    current_db_version = get_backend().db_version(engine, MIGRATE_REPO_PATH,
                                                  init_version)

    # TODO(e0ne): drop version validation when new oslo.db will be released
    if version and int(version) < current_db_version:
        msg = _('Database schema downgrade is not allowed.')
        raise exception.InvalidInput(reason=msg)
    return get_backend().db_sync(engine=engine,
                                 abs_path=MIGRATE_REPO_PATH,
                                 version=version,
                                 init_version=init_version)
Пример #9
0
def paginate_query(query,
                   model,
                   limit,
                   sort_keys,
                   marker=None,
                   sort_dir=None,
                   sort_dirs=None,
                   offset=None):
    """Returns a query with sorting / pagination criteria added.

    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)

    We also have to cope with different sort_directions.

    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.

    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys

    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))

    assert (not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert (len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        sort_dir_func = {
            'asc': sqlalchemy.asc,
            'desc': sqlalchemy.desc,
        }[current_sort_dir]

        try:
            sort_key_attr = getattr(model, current_sort_key)
        except AttributeError:
            raise exception.InvalidInput(reason='Invalid sort key')
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(0, len(sort_keys)):
            crit_attrs = []
            for j in range(0, i):
                model_attr = getattr(model, sort_keys[j])
                crit_attrs.append((model_attr == marker_values[j]))

            model_attr = getattr(model, sort_keys[i])
            if sort_dirs[i] == 'desc':
                crit_attrs.append((model_attr < marker_values[i]))
            elif sort_dirs[i] == 'asc':
                crit_attrs.append((model_attr > marker_values[i]))
            else:
                raise ValueError(
                    _("Unknown sort direction, "
                      "must be 'desc' or 'asc'"))

            criteria = sqlalchemy.sql.and_(*crit_attrs)
            criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    if offset:
        query = query.offset(offset)

    return query
Пример #10
0
    def _get_import_backup(self, context, backup_url):
        """Prepare database backup record for import.

        This method decodes provided backup_url and expects to find the id of
        the backup in there.

        Then checks the DB for the presence of this backup record and if it
        finds it and is not deleted it will raise an exception because the
        record cannot be created or used.

        If the record is in deleted status then we must be trying to recover
        this record, so we'll reuse it.

        If the record doesn't already exist we create it with provided id.

        :param context: running context
        :param backup_url: backup description to be used by the backup driver
        :return: BackupImport object
        :raises: InvalidBackup
        :raises: InvalidInput
        """
        # Deserialize string backup record into a dictionary
        backup_record = storage.Backup.decode_record(backup_url)

        # ID is a required field since it's what links incremental backups
        if 'id' not in backup_record:
            msg = _('Provided backup record is missing an id')
            raise exception.InvalidInput(reason=msg)

        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'volume_id': '0000-0000-0000-0000',
            'status': fields.BackupStatus.CREATING,
        }

        try:
            # Try to get the backup with that ID in all projects even among
            # deleted entries.
            backup = storage.BackupImport.get_by_id(context,
                                                    backup_record['id'],
                                                    read_deleted='yes',
                                                    project_only=False)

            # If record exists and it's not deleted we cannot proceed with the
            # import
            if backup.status != fields.BackupStatus.DELETED:
                msg = _('Backup already exists in database.')
                raise exception.InvalidBackup(reason=msg)

            # Otherwise we'll "revive" delete backup record
            backup.update(kwargs)
            backup.save()

        except exception.BackupNotFound:
            # If record doesn't exist create it with the specific ID
            backup = storage.BackupImport(context=context,
                                          id=backup_record['id'],
                                          **kwargs)
            backup.create()

        return backup