Esempio n. 1
0
def process_reserve_over_quota(context, overs, usages, quotas, size):
    def _consumed(name):
        return (usages[name]['reserved'] + usages[name]['in_use'])

    for over in overs:
        if 'gigabytes' in over:
            msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                      "%(s_size)sG snapshot (%(d_consumed)dG of "
                      "%(d_quota)dG already consumed).")
            LOG.warning(
                msg, {
                    's_pid': context.project_id,
                    's_size': size,
                    'd_consumed': _consumed(over),
                    'd_quota': quotas[over]
                })
            raise exception.VolumeSizeExceedsAvailableQuota(
                requested=size,
                consumed=_consumed('gigabytes'),
                quota=quotas['gigabytes'])
        elif 'snapshots' in over:
            msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                      "snapshot (%(d_consumed)d snapshots "
                      "already consumed).")
            LOG.warning(msg, {
                's_pid': context.project_id,
                'd_consumed': _consumed(over)
            })
            raise exception.SnapshotLimitExceeded(allowed=quotas[over])
Esempio n. 2
0
    def _check_goodness_function(self, stats):
        """Gets a host's goodness rating based on its goodness function."""

        goodness_rating = 0

        if stats['goodness_function'] is None:
            LOG.warning(_LW("Goodness function not set :: defaulting to "
                            "minimal goodness rating of 0"))
        else:
            try:
                goodness_result = self._run_evaluator(
                    stats['goodness_function'],
                    stats)
            except Exception as ex:
                LOG.warning(_LW("Error in goodness_function function "
                                "'%(function)s' : '%(error)s' :: Defaulting "
                                "to a goodness of 0"),
                            {'function': stats['goodness_function'],
                             'error': ex, })
                return goodness_rating

            if type(goodness_result) is bool:
                if goodness_result:
                    goodness_rating = 100
            elif goodness_result < 0 or goodness_result > 100:
                LOG.warning(_LW("Invalid goodness result.  Result must be "
                                "between 0 and 100.  Result generated: '%s' "
                                ":: Defaulting to a goodness of 0"),
                            goodness_result)
            else:
                goodness_rating = goodness_result

        return goodness_rating
Esempio n. 3
0
def get_volume_type_reservation(ctxt,
                                volume,
                                type_id,
                                reserve_vol_type_only=False):
    from jacket.storage import quota
    QUOTAS = quota.QUOTAS
    # Reserve quotas for the given volume type
    try:
        reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
        QUOTAS.add_volume_type_opts(ctxt, reserve_opts, type_id)
        # If reserve_vol_type_only is True, just reserve volume_type quota,
        # not volume quota.
        if reserve_vol_type_only:
            reserve_opts.pop('volumes')
            reserve_opts.pop('gigabytes')
        # Note that usually the project_id on the volume will be the same as
        # the project_id in the context. But, if they are different then the
        # reservations must be recorded against the project_id that owns the
        # volume.
        project_id = volume['project_id']
        reservations = QUOTAS.reserve(ctxt,
                                      project_id=project_id,
                                      **reserve_opts)
    except exception.OverQuota as e:
        overs = e.kwargs['overs']
        usages = e.kwargs['usages']
        quotas = e.kwargs['quotas']

        def _consumed(name):
            return (usages[name]['reserved'] + usages[name]['in_use'])

        for over in overs:
            if 'gigabytes' in over:
                s_size = volume['size']
                d_quota = quotas[over]
                d_consumed = _consumed(over)
                LOG.warning(
                    _LW("Quota exceeded for %(s_pid)s, tried to create "
                        "%(s_size)sG volume - (%(d_consumed)dG of "
                        "%(d_quota)dG already consumed)"), {
                            's_pid': ctxt.project_id,
                            's_size': s_size,
                            'd_consumed': d_consumed,
                            'd_quota': d_quota
                        })
                raise exception.VolumeSizeExceedsAvailableQuota(
                    requested=s_size, quota=d_quota, consumed=d_consumed)
            elif 'volumes' in over:
                LOG.warning(
                    _LW("Quota exceeded for %(s_pid)s, tried to create "
                        "volume (%(d_consumed)d volumes "
                        "already consumed)"), {
                            's_pid': ctxt.project_id,
                            'd_consumed': _consumed(over)
                        })
                raise exception.VolumeLimitExceeded(allowed=quotas[over])
    return reservations
Esempio n. 4
0
def get_volume_type_reservation(ctxt, volume, type_id,
                                reserve_vol_type_only=False):
    from jacket.storage import quota
    QUOTAS = quota.QUOTAS
    # Reserve quotas for the given volume type
    try:
        reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
        QUOTAS.add_volume_type_opts(ctxt,
                                    reserve_opts,
                                    type_id)
        # If reserve_vol_type_only is True, just reserve volume_type quota,
        # not volume quota.
        if reserve_vol_type_only:
            reserve_opts.pop('volumes')
            reserve_opts.pop('gigabytes')
        # Note that usually the project_id on the volume will be the same as
        # the project_id in the context. But, if they are different then the
        # reservations must be recorded against the project_id that owns the
        # volume.
        project_id = volume['project_id']
        reservations = QUOTAS.reserve(ctxt,
                                      project_id=project_id,
                                      **reserve_opts)
    except exception.OverQuota as e:
        overs = e.kwargs['overs']
        usages = e.kwargs['usages']
        quotas = e.kwargs['quotas']

        def _consumed(name):
            return (usages[name]['reserved'] + usages[name]['in_use'])

        for over in overs:
            if 'gigabytes' in over:
                s_size = volume['size']
                d_quota = quotas[over]
                d_consumed = _consumed(over)
                LOG.warning(
                    _LW("Quota exceeded for %(s_pid)s, tried to create "
                        "%(s_size)sG volume - (%(d_consumed)dG of "
                        "%(d_quota)dG already consumed)"),
                    {'s_pid': ctxt.project_id,
                     's_size': s_size,
                     'd_consumed': d_consumed,
                     'd_quota': d_quota})
                raise exception.VolumeSizeExceedsAvailableQuota(
                    requested=s_size, quota=d_quota, consumed=d_consumed)
            elif 'volumes' in over:
                LOG.warning(
                    _LW("Quota exceeded for %(s_pid)s, tried to create "
                        "volume (%(d_consumed)d volumes "
                        "already consumed)"),
                    {'s_pid': ctxt.project_id,
                     'd_consumed': _consumed(over)})
                raise exception.VolumeLimitExceeded(
                    allowed=quotas[over])
    return reservations
    def host_passes(self, host_state, filter_properties):
        context = filter_properties['context']
        host = volume_utils.extract_host(host_state.host, 'host')

        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)

        # Without 'local_to_instance' hint
        if not instance_uuid:
            return True

        if not uuidutils.is_uuid_like(instance_uuid):
            raise exception.InvalidUUID(uuid=instance_uuid)

        # TODO(adrienverge): Currently it is not recommended to allow instance
        # migrations for hypervisors where this hint will be used. In case of
        # instance migration, a previously locally-created volume will not be
        # automatically migrated. Also in case of instance migration during the
        # volume's scheduling, the result is unpredictable. A future
        # enhancement would be to subscribe to Nova migration events (e.g. via
        # Ceilometer).

        # First, lookup for already-known information in local cache
        if instance_uuid in self._cache:
            return self._cache[instance_uuid] == host

        if not self._nova_has_extended_server_attributes(context):
            LOG.warning(
                _LW('Hint "%s" dropped because '
                    'ExtendedServerAttributes not active in Nova.'),
                HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        server = nova.API().get_server(context,
                                       instance_uuid,
                                       privileged_user=True,
                                       timeout=REQUESTS_TIMEOUT)

        if not hasattr(server, INSTANCE_HOST_PROP):
            LOG.warning(
                _LW('Hint "%s" dropped because Nova did not return '
                    'enough information. Either Nova policy needs to '
                    'be changed or a privileged account for Nova '
                    'should be specified in conf.'), HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)

        # Match if given instance is hosted on host
        return self._cache[instance_uuid] == host
Esempio n. 6
0
    def _check_filter_function(self, stats):
        """Checks if a volume passes a host's filter function.

           Returns a tuple in the format (filter_passing, filter_invalid).
           Both values are booleans.
        """
        if stats['filter_function'] is None:
            LOG.debug("Filter function not set :: passing host")
            return True

        try:
            filter_result = self._run_evaluator(stats['filter_function'],
                                                stats)
        except Exception as ex:
            # Warn the admin for now that there is an error in the
            # filter function.
            LOG.warning(
                _LW("Error in filtering function "
                    "'%(function)s' : '%(error)s' :: failing host"), {
                        'function': stats['filter_function'],
                        'error': ex,
                    })
            return False

        return filter_result
Esempio n. 7
0
def root_app_factory(loader, global_conf, **local_conf):
    if CONF.enable_v1_api:
        LOG.warning(
            _LW('The v1 api is deprecated and is not under active '
                'development. You should set enable_v1_api=false '
                'and enable_v3_api=true in your storage.conf file.'))
    return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
Esempio n. 8
0
    def _create_from_image_cache(self, context, internal_context, volume_ref,
                                 image_id, image_meta):
        """Attempt to create the volume using the image cache.

        Best case this will simply clone the existing volume in the cache.
        Worst case the image is out of date and will be evicted. In that case
        a clone will not be created and the image must be downloaded again.
        """
        LOG.debug('Attempting to retrieve cache entry for image = '
                  '%(image_id)s on host %(host)s.',
                  {'image_id': image_id, 'host': volume_ref['host']})
        try:
            cache_entry = self.image_volume_cache.get_entry(internal_context,
                                                            volume_ref,
                                                            image_id,
                                                            image_meta)
            if cache_entry:
                LOG.debug('Creating from source image-volume %(volume_id)s',
                          {'volume_id': cache_entry['volume_id']})
                model_update = self._create_from_source_volume(
                    context,
                    volume_ref,
                    cache_entry['volume_id']
                )
                return model_update, True
        except exception.CinderException as e:
            LOG.warning(_LW('Failed to create volume from image-volume cache, '
                            'will fall back to default behavior. Error: '
                            '%(exception)s'), {'exception': e})
        return None, False
Esempio n. 9
0
def associate_qos_with_type(context, specs_id, type_id):
    """Associate qos_specs with volume type.

    Associate target qos specs with specific volume type. Would raise
    following exceptions:
        VolumeTypeNotFound  - if volume type doesn't exist;
        QoSSpecsNotFound  - if qos specs doesn't exist;
        InvalidVolumeType  - if volume type is already associated with
                             qos specs other than given one.
        QoSSpecsAssociateFailed -  if there was general DB error
    :param specs_id: qos specs ID to associate with
    :param type_id: volume type ID to associate with
    """
    try:
        get_qos_specs(context, specs_id)
        res = volume_types.get_volume_type_qos_specs(type_id)
        if res.get('qos_specs', None):
            if res['qos_specs'].get('id') != specs_id:
                msg = (_("Type %(type_id)s is already associated with another "
                         "qos specs: %(qos_specs_id)s") % {
                             'type_id': type_id,
                             'qos_specs_id': res['qos_specs']['id']
                         })
                raise exception.InvalidVolumeType(reason=msg)
        else:
            db.qos_specs_associate(context, specs_id, type_id)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        LOG.warning(
            _LW('Failed to associate qos specs '
                '%(id)s with type: %(vol_type_id)s'),
            dict(id=specs_id, vol_type_id=type_id))
        raise exception.QoSSpecsAssociateFailed(specs_id=specs_id,
                                                type_id=type_id)
Esempio n. 10
0
def associate_qos_with_type(context, specs_id, type_id):
    """Associate qos_specs with volume type.

    Associate target qos specs with specific volume type. Would raise
    following exceptions:
        VolumeTypeNotFound  - if volume type doesn't exist;
        QoSSpecsNotFound  - if qos specs doesn't exist;
        InvalidVolumeType  - if volume type is already associated with
                             qos specs other than given one.
        QoSSpecsAssociateFailed -  if there was general DB error
    :param specs_id: qos specs ID to associate with
    :param type_id: volume type ID to associate with
    """
    try:
        get_qos_specs(context, specs_id)
        res = volume_types.get_volume_type_qos_specs(type_id)
        if res.get('qos_specs', None):
            if res['qos_specs'].get('id') != specs_id:
                msg = (_("Type %(type_id)s is already associated with another "
                         "qos specs: %(qos_specs_id)s") %
                       {'type_id': type_id,
                        'qos_specs_id': res['qos_specs']['id']})
                raise exception.InvalidVolumeType(reason=msg)
        else:
            db.qos_specs_associate(context, specs_id, type_id)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        LOG.warning(_LW('Failed to associate qos specs '
                        '%(id)s with type: %(vol_type_id)s'),
                    dict(id=specs_id, vol_type_id=type_id))
        raise exception.QoSSpecsAssociateFailed(specs_id=specs_id,
                                                type_id=type_id)
Esempio n. 11
0
    def delete(self, backup):
        """Delete the given backup."""
        container = backup['container']
        object_prefix = backup['service_metadata']
        LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
                  'prefix: %(pre)s.',
                  {'id': backup['id'],
                   'cont': container,
                   'pre': object_prefix})

        if container is not None and object_prefix is not None:
            object_names = []
            try:
                object_names = self._generate_object_names(backup)
            except Exception:
                LOG.warning(_LW('Error while listing storage, continuing'
                                ' with delete.'))

            for object_name in object_names:
                self.delete_object(container, object_name)
                LOG.debug('deleted object: %(object_name)s'
                          ' in container: %(container)s.',
                          {
                              'object_name': object_name,
                              'container': container
                          })
                # Deleting a backup's storage can take some time.
                # Yield so other threads can run
                eventlet.sleep(0)

        LOG.debug('delete %s finished.', backup['id'])
Esempio n. 12
0
    def delete(self, backup):
        """Delete the given backup from Ceph object store."""
        LOG.debug('Delete started for backup=%s', backup['id'])

        delete_failed = False
        try:
            self._try_delete_base_image(backup['id'], backup['volume_id'])
        except self.rbd.ImageNotFound:
            LOG.warning(
                _LW("RBD image for backup %(backup)s of volume %(volume)s "
                    "not found. Deleting backup metadata."),
                {'backup': backup['id'], 'volume': backup['volume_id']})
            delete_failed = True

        with rbd_driver.RADOSClient(self) as client:
            VolumeMetadataBackup(client, backup['id']).remove_if_exists()

        if delete_failed:
            LOG.info(_LI("Delete of backup '%(backup)s' "
                         "for volume '%(volume)s' "
                         "finished with warning."),
                     {'backup': backup['id'], 'volume': backup['volume_id']})
        else:
            LOG.debug("Delete of backup '%(backup)s' for volume "
                      "'%(volume)s' finished.",
                      {'backup': backup['id'], 'volume': backup['volume_id']})
Esempio n. 13
0
 def _generate_hex_key(self, **kwargs):
     if CONF.storage_keymgr.fixed_key is None:
         LOG.warning(
             _LW('config option storage_keymgr.fixed_key has not been defined:'
                 ' some operations may fail unexpectedly'))
         raise ValueError(_('storage_keymgr.fixed_key not defined'))
     return CONF.storage_keymgr.fixed_key
Esempio n. 14
0
 def _generate_hex_key(self, **kwargs):
     if CONF.storage_keymgr.fixed_key is None:
         LOG.warning(
             _LW('config option storage_keymgr.fixed_key has not been defined:'
                 ' some operations may fail unexpectedly'))
         raise ValueError(_('storage_keymgr.fixed_key not defined'))
     return CONF.storage_keymgr.fixed_key
Esempio n. 15
0
    def _do_iscsi_discovery(self, volume):
        # TODO(justinsb): Deprecate discovery and use stored info
        # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
        LOG.warning(_LW("ISCSI provider_location not stored, using discovery"))

        volume_id = volume['id']

        try:
            # NOTE(griff) We're doing the split straight away which should be
            # safe since using '@' in hostname is considered invalid

            (out, _err) = utils.execute('iscsiadm', '-m', 'discovery',
                                        '-t', 'sendtargets', '-p',
                                        volume['host'].split('@')[0],
                                        run_as_root=True)
        except processutils.ProcessExecutionError as ex:
            LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
                      volume['host'].split('@')[0])
            LOG.debug(("Error from iscsiadm -m discovery: %s") % ex.stderr)
            return None

        for target in out.splitlines():
            if (self.configuration.safe_get('iscsi_ip_address') in target
                    and volume_id in target):
                return target
        return None
Esempio n. 16
0
    def _recreate_backing_lun(self, iqn, tid, name, path):
        LOG.warning(_LW('Attempting recreate of backing lun...'))

        # Since we think the most common case of this is a dev busy
        # (create vol from snapshot) we're going to add a sleep here
        # this will hopefully give things enough time to stabilize
        # how long should we wait??  I have no idea, let's go big
        # and error on the side of caution
        time.sleep(10)

        (out, err) = (None, None)
        try:
            (out, err) = utils.execute('tgtadm', '--lld', 'iscsi',
                                       '--op', 'new', '--mode',
                                       'logicalunit', '--tid',
                                       tid, '--lun', '1', '-b',
                                       path, run_as_root=True)
        except putils.ProcessExecutionError as e:
            LOG.error(_LE("Failed recovery attempt to create "
                          "iscsi backing lun for Volume "
                          "ID:%(vol_id)s: %(e)s"),
                      {'vol_id': name, 'e': e})
        finally:
            LOG.debug('StdOut from recreate backing lun: %s', out)
            LOG.debug('StdErr from recreate backing lun: %s', err)
Esempio n. 17
0
def get_qemu_img_version():
    info = utils.execute('qemu-img', '--help', check_exit_code=False)[0]
    pattern = r"qemu-img version ([0-9\.]*)"
    version = re.match(pattern, info)
    if not version:
        LOG.warning(_LW("qemu-img is not installed."))
        return None
    return _get_version_from_string(version.groups()[0])
Esempio n. 18
0
    def _restore_configuration(self):
        try:
            self._execute('storage-rtstool', 'restore', run_as_root=True)

        # On persistence failure we don't raise an exception, as target has
        # been successfully created.
        except putils.ProcessExecutionError:
            LOG.warning(_LW("Failed to restore iscsi LIO configuration."))
Esempio n. 19
0
    def _restore_configuration(self):
        try:
            self._execute('storage-rtstool', 'restore', run_as_root=True)

        # On persistence failure we don't raise an exception, as target has
        # been successfully created.
        except putils.ProcessExecutionError:
            LOG.warning(_LW("Failed to restore iscsi LIO configuration."))
Esempio n. 20
0
    def delete_key(self, ctxt, key_id, **kwargs):
        if ctxt is None:
            raise exception.NotAuthorized()

        if key_id != self.key_id:
            raise exception.KeyManagerError(
                reason="cannot delete non-existent key")

        LOG.warning(_LW("Not deleting key %s"), key_id)
Esempio n. 21
0
    def delete_key(self, ctxt, key_id, **kwargs):
        if ctxt is None:
            raise exception.NotAuthorized()

        if key_id != self.key_id:
            raise exception.KeyManagerError(
                reason="cannot delete non-existent key")

        LOG.warning(_LW("Not deleting key %s"), key_id)
Esempio n. 22
0
def disassociate_all(context, specs_id):
    """Disassociate qos_specs from all entities."""
    try:
        get_qos_specs(context, specs_id)
        db.qos_specs_disassociate_all(context, specs_id)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id)
        raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
                                                   type_id=None)
Esempio n. 23
0
    def _persist_configuration(self, vol_id):
        try:
            self._execute('storage-rtstool', 'save', run_as_root=True)

        # On persistence failure we don't raise an exception, as target has
        # been successfully created.
        except putils.ProcessExecutionError:
            LOG.warning(
                _LW("Failed to save iscsi LIO configuration when "
                    "modifying volume id: %(vol_id)s."), {'vol_id': vol_id})
Esempio n. 24
0
def disassociate_all(context, specs_id):
    """Disassociate qos_specs from all entities."""
    try:
        get_qos_specs(context, specs_id)
        db.qos_specs_disassociate_all(context, specs_id)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id)
        raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
                                                   type_id=None)
Esempio n. 25
0
    def _persist_configuration(self, vol_id):
        try:
            self._execute('storage-rtstool', 'save', run_as_root=True)

        # On persistence failure we don't raise an exception, as target has
        # been successfully created.
        except putils.ProcessExecutionError:
            LOG.warning(_LW("Failed to save iscsi LIO configuration when "
                            "modifying volume id: %(vol_id)s."),
                        {'vol_id': vol_id})
Esempio n. 26
0
    def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
        LOG.info(_LI("Removing iscsi_target for volume: %s"), vol_id)

        try:
            self._delete_logicalunit(tid, lun)
            session_info = self._find_sid_cid_for_target(tid, vol_name, vol_id)
            if session_info:
                sid, cid = session_info
                self._force_delete_target(tid, sid, cid)

            self._delete_target(tid)
        except putils.ProcessExecutionError:
            LOG.exception(
                _LE("Failed to remove iscsi target for volume "
                    "id:%s"), vol_id)
            raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)

        vol_uuid_file = vol_name
        conf_file = self.iet_conf
        if os.path.exists(conf_file):
            try:
                with utils.temporary_chown(conf_file):
                    with open(conf_file, 'r+') as iet_conf_text:
                        full_txt = iet_conf_text.readlines()
                        new_iet_conf_txt = []
                        count = 0
                        for line in full_txt:
                            if count > 0:
                                count -= 1
                                continue
                            elif vol_uuid_file in line:
                                count = 2
                                continue
                            else:
                                new_iet_conf_txt.append(line)

                        iet_conf_text.seek(0)
                        iet_conf_text.truncate(0)
                        iet_conf_text.writelines(new_iet_conf_txt)
            except Exception:
                LOG.exception(
                    _LE("Failed to update %(conf)s for volume id "
                        "%(vol_id)s after removing iscsi target"), {
                            'conf': conf_file,
                            'vol_id': vol_id
                        })
                raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
        else:
            LOG.warning(
                _LW("Failed to update %(conf)s for volume id "
                    "%(vol_id)s after removing iscsi target. "
                    "%(conf)s does not exist."), {
                        'conf': conf_file,
                        'vol_id': vol_id
                    })
Esempio n. 27
0
 def _limit_bps(self, rw, dev, bps):
     try:
         utils.execute('cgset',
                       '-r',
                       'blkio.throttle.%s_bps_device=%s %d' %
                       (rw, dev, bps),
                       self.cgroup,
                       run_as_root=True)
     except processutils.ProcessExecutionError:
         LOG.warning(
             _LW('Failed to setup blkio cgroup to throttle the '
                 'device \'%(device)s\'.'), {'device': dev})
Esempio n. 28
0
def disassociate_qos_specs(context, specs_id, type_id):
    """Disassociate qos_specs from volume type."""
    try:
        get_qos_specs(context, specs_id)
        db.qos_specs_disassociate(context, specs_id, type_id)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        LOG.warning(_LW('Failed to disassociate qos specs '
                        '%(id)s with type: %(vol_type_id)s'),
                    dict(id=specs_id, vol_type_id=type_id))
        raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
                                                   type_id=type_id)
Esempio n. 29
0
    def _load_extensions(self):
        """Load extensions specified on the command line."""

        extensions = list(self.cls_list)

        for ext_factory in extensions:
            try:
                self.load_extension(ext_factory)
            except Exception as exc:
                LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
                                '%(exc)s'),
                            {'ext_factory': ext_factory, 'exc': exc})
Esempio n. 30
0
 def _get_volume_type_id(self, volume_type, source_volume, snapshot):
     if not volume_type and source_volume:
         return source_volume['volume_type_id']
     elif snapshot is not None:
         if volume_type:
             current_volume_type_id = volume_type.get('id')
             if current_volume_type_id != snapshot['volume_type_id']:
                 msg = _LW("Volume type will be changed to "
                           "be the same as the source volume.")
                 LOG.warning(msg)
         return snapshot['volume_type_id']
     else:
         return volume_type.get('id')
Esempio n. 31
0
def disassociate_qos_specs(context, specs_id, type_id):
    """Disassociate qos_specs from volume type."""
    try:
        get_qos_specs(context, specs_id)
        db.qos_specs_disassociate(context, specs_id, type_id)
    except db_exc.DBError:
        LOG.exception(_LE('DB error:'))
        LOG.warning(
            _LW('Failed to disassociate qos specs '
                '%(id)s with type: %(vol_type_id)s'),
            dict(id=specs_id, vol_type_id=type_id))
        raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
                                                   type_id=type_id)
Esempio n. 32
0
def process_reserve_over_quota(context, overs, usages, quotas, size):
    def _consumed(name):
        return (usages[name]['reserved'] + usages[name]['in_use'])

    for over in overs:
        if 'gigabytes' in over:
            msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                      "%(s_size)sG snapshot (%(d_consumed)dG of "
                      "%(d_quota)dG already consumed).")
            LOG.warning(msg, {'s_pid': context.project_id,
                              's_size': size,
                              'd_consumed': _consumed(over),
                              'd_quota': quotas[over]})
            raise exception.VolumeSizeExceedsAvailableQuota(
                requested=size,
                consumed=_consumed('gigabytes'),
                quota=quotas['gigabytes'])
        elif 'snapshots' in over:
            msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                      "snapshot (%(d_consumed)d snapshots "
                      "already consumed).")
            LOG.warning(msg, {'s_pid': context.project_id,
                              'd_consumed': _consumed(over)})
            raise exception.SnapshotLimitExceeded(allowed=quotas[over])
Esempio n. 33
0
    def _load_extensions(self):
        """Load extensions specified on the command line."""

        extensions = list(self.cls_list)

        for ext_factory in extensions:
            try:
                self.load_extension(ext_factory)
            except Exception as exc:
                LOG.warning(
                    _LW('Failed to load extension %(ext_factory)s: '
                        '%(exc)s'), {
                            'ext_factory': ext_factory,
                            'exc': exc
                        })
Esempio n. 34
0
    def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
        LOG.info(_LI("Removing iscsi_target for volume: %s"), vol_id)

        try:
            self._delete_logicalunit(tid, lun)
            session_info = self._find_sid_cid_for_target(tid, vol_name, vol_id)
            if session_info:
                sid, cid = session_info
                self._force_delete_target(tid, sid, cid)

            self._delete_target(tid)
        except putils.ProcessExecutionError:
            LOG.exception(_LE("Failed to remove iscsi target for volume "
                              "id:%s"), vol_id)
            raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)

        vol_uuid_file = vol_name
        conf_file = self.iet_conf
        if os.path.exists(conf_file):
            try:
                with utils.temporary_chown(conf_file):
                    with open(conf_file, 'r+') as iet_conf_text:
                        full_txt = iet_conf_text.readlines()
                        new_iet_conf_txt = []
                        count = 0
                        for line in full_txt:
                            if count > 0:
                                count -= 1
                                continue
                            elif vol_uuid_file in line:
                                count = 2
                                continue
                            else:
                                new_iet_conf_txt.append(line)

                        iet_conf_text.seek(0)
                        iet_conf_text.truncate(0)
                        iet_conf_text.writelines(new_iet_conf_txt)
            except Exception:
                LOG.exception(_LE("Failed to update %(conf)s for volume id "
                                  "%(vol_id)s after removing iscsi target"),
                              {'conf': conf_file, 'vol_id': vol_id})
                raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
        else:
            LOG.warning(_LW("Failed to update %(conf)s for volume id "
                            "%(vol_id)s after removing iscsi target. "
                            "%(conf)s does not exist."),
                        {'conf': conf_file, 'vol_id': vol_id})
Esempio n. 35
0
 def _reconnect(self):
     """Reconnect with jittered exponential backoff increase."""
     LOG.info(_LI('Reconnecting to coordination backend.'))
     cap = cfg.CONF.coordination.max_reconnect_backoff
     backoff = base = cfg.CONF.coordination.initial_reconnect_backoff
     for attempt in itertools.count(1):
         try:
             self._start()
             break
         except coordination.ToozError:
             backoff = min(cap, random.uniform(base, backoff * 3))
             msg = _LW('Reconnect attempt %(attempt)s failed. '
                       'Next try in %(backoff).2fs.')
             LOG.warning(msg, {'attempt': attempt, 'backoff': backoff})
             self._dead.wait(backoff)
     LOG.info(_LI('Reconnected to coordination backend.'))
Esempio n. 36
0
 def _reconnect(self):
     """Reconnect with jittered exponential backoff increase."""
     LOG.info(_LI('Reconnecting to coordination backend.'))
     cap = cfg.CONF.coordination.max_reconnect_backoff
     backoff = base = cfg.CONF.coordination.initial_reconnect_backoff
     for attempt in itertools.count(1):
         try:
             self._start()
             break
         except coordination.ToozError:
             backoff = min(cap, random.uniform(base, backoff * 3))
             msg = _LW('Reconnect attempt %(attempt)s failed. '
                       'Next try in %(backoff).2fs.')
             LOG.warning(msg, {'attempt': attempt, 'backoff': backoff})
             self._dead.wait(backoff)
     LOG.info(_LI('Reconnected to coordination backend.'))
Esempio n. 37
0
    def _extract_availability_zone(self, availability_zone):
        if availability_zone is None:
            if CONF.default_availability_zone:
                availability_zone = CONF.default_availability_zone
            else:
                # For backwards compatibility use the storage_availability_zone
                availability_zone = CONF.storage_availability_zone

        valid = self._valid_availability_zone(availability_zone)
        if not valid:
            msg = _LW("Availability zone '%s' is invalid") % (
                availability_zone)
            LOG.warning(msg)
            raise exception.InvalidInput(reason=msg)

        return availability_zone
Esempio n. 38
0
    def decorator(self, *args, **kwargs):
        conn_info = terminate_connection(self, *args, **kwargs)
        if not conn_info:
            LOG.warning(_LW("Driver didn't return connection info from "
                            "terminate_connection call."))
            return None

        vol_type = conn_info.get('driver_volume_type', None)
        if vol_type == 'fibre_channel':
            if 'initiator_target_map' in conn_info['data']:
                zm = create_zone_manager()
                if zm:
                    LOG.debug("RemoveFCZone connection info: %(conninfo)s.",
                              {'conninfo': conn_info})
                    zm.delete_connection(conn_info)

        return conn_info
Esempio n. 39
0
    def decorator(self, *args, **kwargs):
        conn_info = initialize_connection(self, *args, **kwargs)
        if not conn_info:
            LOG.warning(_LW("Driver didn't return connection info, "
                            "can't add zone."))
            return None

        vol_type = conn_info.get('driver_volume_type', None)
        if vol_type == 'fibre_channel':
            if 'initiator_target_map' in conn_info['data']:
                zm = create_zone_manager()
                if zm:
                    LOG.debug("AddFCZone connection info: %(conninfo)s.",
                              {'conninfo': conn_info})
                    zm.add_connection(conn_info)

        return conn_info
Esempio n. 40
0
    def decorator(self, *args, **kwargs):
        conn_info = terminate_connection(self, *args, **kwargs)
        if not conn_info:
            LOG.warning(
                _LW("Driver didn't return connection info from "
                    "terminate_connection call."))
            return None

        vol_type = conn_info.get('driver_volume_type', None)
        if vol_type == 'fibre_channel':
            if 'initiator_target_map' in conn_info['data']:
                zm = create_zone_manager()
                if zm:
                    LOG.debug("RemoveFCZone connection info: %(conninfo)s.",
                              {'conninfo': conn_info})
                    zm.delete_connection(conn_info)

        return conn_info
Esempio n. 41
0
    def decorator(self, *args, **kwargs):
        conn_info = initialize_connection(self, *args, **kwargs)
        if not conn_info:
            LOG.warning(
                _LW("Driver didn't return connection info, "
                    "can't add zone."))
            return None

        vol_type = conn_info.get('driver_volume_type', None)
        if vol_type == 'fibre_channel':
            if 'initiator_target_map' in conn_info['data']:
                zm = create_zone_manager()
                if zm:
                    LOG.debug("AddFCZone connection info: %(conninfo)s.",
                              {'conninfo': conn_info})
                    zm.add_connection(conn_info)

        return conn_info
Esempio n. 42
0
def setup_tracing(trace_flags):
    """Set global variables for each trace flag.

    Sets variables TRACE_METHOD and TRACE_API, which represent
    whether to log method and api traces.

    :param trace_flags: a list of strings
    """
    global TRACE_METHOD
    global TRACE_API
    try:
        trace_flags = [flag.strip() for flag in trace_flags]
    except TypeError:  # Handle when trace_flags is None or a test mock
        trace_flags = []
    for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
        LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag)
    TRACE_METHOD = 'method' in trace_flags
    TRACE_API = 'api' in trace_flags
Esempio n. 43
0
 def _schedule(self, context, request_spec, filter_properties=None):
     weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                   filter_properties)
     # When we get the weighed_hosts, we clear those hosts whose backend
     # is not same as consistencygroup's backend.
     CG_backend = request_spec.get('CG_backend')
     if weighed_hosts and CG_backend:
         # Get host name including host@backend#pool info from
         # weighed_hosts.
         for host in weighed_hosts[::-1]:
             backend = utils.extract_host(host.obj.host)
             if backend != CG_backend:
                 weighed_hosts.remove(host)
     if not weighed_hosts:
         LOG.warning(_LW('No weighed hosts found for volume '
                         'with properties: %s'),
                     filter_properties['request_spec']['volume_type'])
         return None
     return self._choose_top_host(weighed_hosts, request_spec)
Esempio n. 44
0
    def _attach_volume(self, context, volume, properties):
        """Attach a volume."""

        try:
            conn = self.jacket_rpcapi.initialize_connection(
                context, volume, properties)
            return self._connect_device(conn)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.jacket_rpcapi.terminate_connection(context,
                                                            volume,
                                                            properties,
                                                            force=True)
                except Exception:
                    LOG.warning(
                        _LW("Failed to terminate the connection "
                            "of volume %(volume_id)s, but it is "
                            "acceptable."), {'volume_id', volume.id})
Esempio n. 45
0
    def _attach_volume(self, context, volume, properties):
        """Attach a volume."""

        try:
            conn = self.jacket_rpcapi.initialize_connection(context,
                                                            volume,
                                                            properties)
            return self._connect_device(conn)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.jacket_rpcapi.terminate_connection(context, volume,
                                                            properties,
                                                            force=True)
                except Exception:
                    LOG.warning(_LW("Failed to terminate the connection "
                                    "of volume %(volume_id)s, but it is "
                                    "acceptable."),
                                {'volume_id', volume.id})
Esempio n. 46
0
    def _get_image_volume_type(self, context, image_id):
        """Get cinder_img_volume_type property from the image metadata."""

        # Check image existence
        if image_id is None:
            return None

        image_meta = self.image_service.show(context, image_id)

        # check whether image is active
        if image_meta['status'] != 'active':
            msg = (_('Image %(image_id)s is not active.') %
                   {'image_id': image_id})
            raise exception.InvalidInput(reason=msg)

        # Retrieve 'cinder_img_volume_type' property from glance image
        # metadata.
        image_volume_type = "cinder_img_volume_type"
        properties = image_meta.get('properties')
        if properties:
            try:
                img_vol_type = properties.get(image_volume_type)
                if img_vol_type is None:
                    return None
                volume_type = volume_types.get_volume_type_by_name(
                    context,
                    img_vol_type)
            except exception.VolumeTypeNotFoundByName:
                LOG.warning(_LW("Failed to retrieve volume_type from image "
                                "metadata. '%(img_vol_type)s' doesn't match "
                                "any volume types."),
                            {'img_vol_type': img_vol_type})
                return None

            LOG.debug("Retrieved volume_type from glance image metadata. "
                      "image_id: %(image_id)s, "
                      "image property: %(image_volume_type)s, "
                      "volume_type: %(volume_type)s." %
                      {'image_id': image_id,
                       'image_volume_type': image_volume_type,
                       'volume_type': volume_type})
            return volume_type
Esempio n. 47
0
    def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
        """Restores the volume_type_id for encryption if needed.

        Only allow restoration of an encrypted backup if the destination
        volume has the same volume type as the source volume. Otherwise
        encryption will not work. If volume types are already the same,
        no action is needed.
        """
        dest_vol = self.db.volume_get(self.context, volume_id)
        if dest_vol['volume_type_id'] != src_volume_type_id:
            LOG.debug("Volume type id's do not match.")
            # If the volume types do not match, and the destination volume
            # does not have a volume type, force the destination volume
            # to have the encrypted volume type, provided it still exists.
            if dest_vol['volume_type_id'] is None:
                try:
                    self.db.volume_type_get(
                        self.context, src_volume_type_id)
                except exception.VolumeTypeNotFound:
                    LOG.debug("Volume type of source volume has been "
                              "deleted. Encrypted backup restore has "
                              "failed.")
                    msg = _("The source volume type '%s' is not "
                            "available.") % (src_volume_type_id)
                    raise exception.EncryptedBackupOperationFailed(msg)
                # Update dest volume with src volume's volume_type_id.
                LOG.debug("The volume type of the destination volume "
                          "will become the volume type of the source "
                          "volume.")
                self.db.volume_update(self.context, volume_id,
                                      {'volume_type_id': src_volume_type_id})
            else:
                # Volume type id's do not match, and destination volume
                # has a volume type. Throw exception.
                LOG.warning(_LW("Destination volume type is different from "
                                "source volume type for an encrypted volume. "
                                "Encrypted backup restore has failed."))
                msg = (_("The source volume type '%(src)s' is different "
                         "than the destination volume type '%(dest)s'.") %
                       {'src': src_volume_type_id,
                        'dest': dest_vol['volume_type_id']})
                raise exception.EncryptedBackupOperationFailed(msg)
Esempio n. 48
0
 def _schedule(self, context, request_spec, filter_properties=None):
     weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                   filter_properties)
     # When we get the weighed_hosts, we clear those hosts whose backend
     # is not same as consistencygroup's backend.
     CG_backend = request_spec.get('CG_backend')
     if weighed_hosts and CG_backend:
         # Get host name including host@backend#pool info from
         # weighed_hosts.
         for host in weighed_hosts[::-1]:
             backend = utils.extract_host(host.obj.host)
             if backend != CG_backend:
                 weighed_hosts.remove(host)
     if not weighed_hosts:
         LOG.warning(
             _LW('No weighed hosts found for volume '
                 'with properties: %s'),
             filter_properties['request_spec']['volume_type'])
         return None
     return self._choose_top_host(weighed_hosts, request_spec)
Esempio n. 49
0
    def _setup_extensions(self, ext_mgr):
        for extension in ext_mgr.get_controller_extensions():
            collection = extension.collection
            controller = extension.controller

            if collection not in self.resources:
                LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
                                'resource %(collection)s: No such resource'),
                            {'ext_name': extension.extension.name,
                             'collection': collection})
                continue

            LOG.debug('Extension %(ext_name)s extending resource: '
                      '%(collection)s',
                      {'ext_name': extension.extension.name,
                       'collection': collection})

            resource = self.resources[collection]
            resource.register_actions(controller)
            resource.register_extensions(controller)
Esempio n. 50
0
    def _update_host_state_map(self, context):

        # Get resource usage across the available volume nodes:
        topic = CONF.volume_topic
        volume_services = storage.ServiceList.get_all_by_topic(context,
                                                               topic,
                                                               disabled=False)
        active_hosts = set()
        no_capabilities_hosts = set()
        for service in volume_services.objects:
            host = service.host
            if not utils.service_is_up(service):
                LOG.warning(_LW("volume service is down. (host: %s)"), host)
                continue
            capabilities = self.service_states.get(host, None)
            if capabilities is None:
                no_capabilities_hosts.add(host)
                continue

            host_state = self.host_state_map.get(host)
            if not host_state:
                host_state = self.host_state_cls(host,
                                                 capabilities=capabilities,
                                                 service=
                                                 dict(service))
                self.host_state_map[host] = host_state
            # update capabilities and attributes in host_state
            host_state.update_from_volume_capability(capabilities,
                                                     service=
                                                     dict(service))
            active_hosts.add(host)

        self._no_capabilities_hosts = no_capabilities_hosts

        # remove non-active hosts from host_state_map
        nonactive_hosts = set(self.host_state_map.keys()) - active_hosts
        for host in nonactive_hosts:
            LOG.info(_LI("Removing non-active host: %(host)s from "
                         "scheduler cache."), {'host': host})
            del self.host_state_map[host]
Esempio n. 51
0
    def _run_ssh(self, cmd_list, check_exit_code=True):

        command = ' '.join(cmd_list)

        if not self.sshpool:
            self.sshpool = ssh_utils.SSHPool(self.switch_ip,
                                             self.switch_port,
                                             None,
                                             self.switch_user,
                                             self.switch_pwd,
                                             min_size=1,
                                             max_size=5)
        try:
            with self.sshpool.item() as ssh:
                return processutils.ssh_execute(
                    ssh,
                    command,
                    check_exit_code=check_exit_code)

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.warning(_LW("Error running SSH command: %s"), command)
Esempio n. 52
0
def _calculate_count(size_in_m, blocksize):

    # Check if volume_dd_blocksize is valid
    try:
        # Rule out zero-sized/negative/float dd blocksize which
        # cannot be caught by strutils
        if blocksize.startswith(('-', '0')) or '.' in blocksize:
            raise ValueError
        bs = strutils.string_to_bytes('%sB' % blocksize)
    except ValueError:
        LOG.warning(_LW("Incorrect value error: %(blocksize)s, "
                        "it may indicate that \'volume_dd_blocksize\' "
                        "was configured incorrectly. Fall back to default."),
                    {'blocksize': blocksize})
        # Fall back to default blocksize
        CONF.clear_override('volume_dd_blocksize')
        blocksize = CONF.volume_dd_blocksize
        bs = strutils.string_to_bytes('%sB' % blocksize)

    count = math.ceil(size_in_m * units.Mi / bs)

    return blocksize, int(count)
Esempio n. 53
0
def convert_config_string_to_dict(config_string):
    """Convert config file replication string to a dict.

    The only supported form is as follows:
    "{'key-1'='val-1' 'key-2'='val-2'...}"

    :param config_string: Properly formatted string to convert to dict.
    :response: dict of string values
    """

    resultant_dict = {}

    try:
        st = config_string.replace("=", ":")
        st = st.replace(" ", ", ")
        resultant_dict = ast.literal_eval(st)
    except Exception:
        LOG.warning(_LW("Error encountered translating config_string: "
                        "%(config_string)s to dict"),
                    {'config_string': config_string})

    return resultant_dict
Esempio n. 54
0
    def _update_host_state_map(self, context):

        # Get resource usage across the available volume nodes:
        topic = CONF.volume_topic
        volume_services = storage.ServiceList.get_all_by_topic(context,
                                                               topic,
                                                               disabled=False)
        active_hosts = set()
        no_capabilities_hosts = set()
        for service in volume_services.objects:
            host = service.host
            if not utils.service_is_up(service):
                LOG.warning(_LW("volume service is down. (host: %s)"), host)
                continue
            capabilities = self.service_states.get(host, None)
            if capabilities is None:
                no_capabilities_hosts.add(host)
                continue

            host_state = self.host_state_map.get(host)
            if not host_state:
                host_state = self.host_state_cls(host,
                                                 capabilities=capabilities,
                                                 service=dict(service))
                self.host_state_map[host] = host_state
            # update capabilities and attributes in host_state
            host_state.update_from_volume_capability(capabilities,
                                                     service=dict(service))
            active_hosts.add(host)

        self._no_capabilities_hosts = no_capabilities_hosts

        # remove non-active hosts from host_state_map
        nonactive_hosts = set(self.host_state_map.keys()) - active_hosts
        for host in nonactive_hosts:
            LOG.info(
                _LI("Removing non-active host: %(host)s from "
                    "scheduler cache."), {'host': host})
            del self.host_state_map[host]