Exemple #1
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid']
                        and volume['instance_uuid'] != instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)
            self.db.volume_update(context, volume_id, {
                "instance_uuid": instance_uuid,
                "status": "attaching"
            })

            # TODO(vish): refactor this into a more general "reserve"
            # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
            if not uuidutils.is_uuid_like(instance_uuid):
                raise exception.InvalidUUID(uuid=instance_uuid)

            try:
                self.driver.attach_volume(context, volume_id, instance_uuid,
                                          mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id,
                                          {'status': 'error_attaching'})

            self.db.volume_attached(context.elevated(), volume_id,
                                    instance_uuid, mountpoint)
    def host_passes(self, host_state, filter_properties):
        context = filter_properties['context']
        host = volume_utils.extract_host(host_state.host, 'host')

        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)

        # Without 'local_to_instance' hint
        if not instance_uuid:
            return True

        if not uuidutils.is_uuid_like(instance_uuid):
            raise exception.InvalidUUID(uuid=instance_uuid)

        # TODO(adrienverge): Currently it is not recommended to allow instance
        # migrations for hypervisors where this hint will be used. In case of
        # instance migration, a previously locally-created volume will not be
        # automatically migrated. Also in case of instance migration during the
        # volume's scheduling, the result is unpredictable. A future
        # enhancement would be to subscribe to Nova migration events (e.g. via
        # Ceilometer).

        # First, lookup for already-known information in local cache
        if instance_uuid in self._cache:
            return self._cache[instance_uuid] == host

        if not self._nova_has_extended_server_attributes(context):
            LOG.warning(
                _LW('Hint "%s" dropped because '
                    'ExtendedServerAttributes not active in Nova.'),
                HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        server = nova.API().get_server(context,
                                       instance_uuid,
                                       privileged_user=True,
                                       timeout=REQUESTS_TIMEOUT)

        if not hasattr(server, INSTANCE_HOST_PROP):
            LOG.warning(
                _LW('Hint "%s" dropped because Nova did not return '
                    'enough information. Either Nova policy needs to '
                    'be changed or a privileged account for Nova '
                    'should be specified in conf.'), HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)

        # Match if given instance is hosted on host
        return self._cache[instance_uuid] == host
Exemple #3
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid']
                        and volume['instance_uuid'] != instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host']
                        and volume['attached_host'] != host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            now = timeutils.strtime()
            self.db.volume_update(
                context, volume_id, {
                    "instance_uuid": instance_uuid,
                    "attached_host": host_name,
                    "status": "attaching",
                    "attach_time": now
                })

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(
                host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)
            try:
                self.driver.attach_volume(context, volume, instance_uuid,
                                          host_name_sanitized, mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id,
                                          {'status': 'error_attaching'})

            self.db.volume_attached(context.elevated(), volume_id,
                                    instance_uuid, host_name_sanitized,
                                    mountpoint)
Exemple #4
0
    def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
        """Updates db to show volume is attached"""
        # TODO(vish): refactor this into a more general "reserve"
        # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
        if not utils.is_uuid_like(instance_uuid):
            raise exception.InvalidUUID(instance_uuid)

        try:
            self.driver.attach_volume(context,
                                      volume_id,
                                      instance_uuid,
                                      mountpoint)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_id,
                                      {'status': 'error_attaching'})

        self.db.volume_attached(context.elevated(),
                                volume_id,
                                instance_uuid,
                                mountpoint)
Exemple #5
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            volume_metadata = self.db.volume_admin_metadata_get(
                context.elevated(), volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid']
                        and volume['instance_uuid'] != instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host']
                        and volume['attached_host'] != host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
                if (volume_metadata.get('attached_mode')
                        and volume_metadata.get('attached_mode') != mode):
                    msg = _("being attached by different mode")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            self._notify_about_volume_usage(context, volume, "attach.start")
            self.db.volume_update(
                context, volume_id, {
                    "instance_uuid": instance_uuid,
                    "attached_host": host_name,
                    "status": "attaching",
                    "attach_time": timeutils.strtime()
                })
            self.db.volume_admin_metadata_update(context.elevated(), volume_id,
                                                 {"attached_mode": mode},
                                                 False)

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(
                host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)

            if volume_metadata.get('readonly') == 'True' and mode != 'ro':
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidVolumeAttachMode(mode=mode,
                                                        volume_id=volume_id)
            try:
                self.driver.attach_volume(context, volume, instance_uuid,
                                          host_name_sanitized, mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id,
                                          {'status': 'error_attaching'})

            volume = self.db.volume_attached(context.elevated(), volume_id,
                                             instance_uuid,
                                             host_name_sanitized, mountpoint)
            self._notify_about_volume_usage(context, volume, "attach.end")