Ejemplo n.º 1
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), volume_id)
            if volume["status"] == "attaching":
                if volume["instance_uuid"] and volume["instance_uuid"] != instance_uuid:
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if volume["attached_host"] and volume["attached_host"] != host_name:
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
                if volume_metadata.get("attached_mode") and volume_metadata.get("attached_mode") != mode:
                    msg = _("being attached by different mode")
                    raise exception.InvalidVolume(reason=msg)
            elif volume["status"] != "available":
                msg = _("status must be available or attaching")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            self._notify_about_volume_usage(context, volume, "attach.start")
            self.db.volume_update(
                context,
                volume_id,
                {
                    "instance_uuid": instance_uuid,
                    "attached_host": host_name,
                    "status": "attaching",
                    "attach_time": timeutils.strtime(),
                },
            )
            self.db.volume_admin_metadata_update(context.elevated(), volume_id, {"attached_mode": mode}, False)

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id, {"status": "error_attaching"})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)

            if volume_metadata.get("readonly") == "True" and mode != "ro":
                self.db.volume_update(context, volume_id, {"status": "error_attaching"})
                raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume_id)
            try:
                # NOTE(flaper87): Verify the driver is enabled
                # before going forward. The exception will be caught
                # and the volume status updated.
                utils.require_driver_initialized(self.driver)

                self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id, {"status": "error_attaching"})

            volume = self.db.volume_attached(
                context.elevated(), volume_id, instance_uuid, host_name_sanitized, mountpoint
            )
            self._notify_about_volume_usage(context, volume, "attach.end")
Ejemplo n.º 2
0
    def detach_volume(self, context, volume_id):
        """Updates db to show volume is detached."""
        # TODO(vish): refactor this into a more general "unreserve"
        # TODO(sleepsonthefloor): Is this 'elevated' appropriate?

        volume = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume, "detach.start")
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the volume status updated.
            utils.require_driver_initialized(self.driver)

            self.driver.detach_volume(context, volume)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id, {"status": "error_detaching"})

        self.db.volume_detached(context.elevated(), volume_id)
        self.db.volume_admin_metadata_delete(context.elevated(), volume_id, "attached_mode")

        # Check for https://bugs.launchpad.net/cinder/+bug/1065702
        volume = self.db.volume_get(context, volume_id)
        if volume["provider_location"] and volume["name"] not in volume["provider_location"]:
            self.driver.ensure_export(context, volume)
        self._notify_about_volume_usage(context, volume, "detach.end")
Ejemplo n.º 3
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            volume_metadata = self.db.volume_admin_metadata_get(
                context.elevated(), volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid'] and volume['instance_uuid'] !=
                        instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host'] and volume['attached_host'] !=
                        host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
                if (volume_metadata.get('attached_mode') and
                        volume_metadata.get('attached_mode') != mode):
                    msg = _("being attached by different mode")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)
            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            self.db.volume_update(context, volume_id,
                                  {"instance_uuid": instance_uuid,
                                   "mountpoint": mountpoint,
                                   "attached_host": host_name
                                   })

            self.db.volume_admin_metadata_update(context.elevated(),
                                                 volume_id,
                                                 {"attached_mode": mode},
                                                 False)
Ejemplo n.º 4
0
    def detach_volume(self, context, volume_id):
        """Updates db to show volume is detached"""
        # TODO(vish): refactor this into a more general "unreserve"
        # TODO(sleepsonthefloor): Is this 'elevated' appropriate?

        volume = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume, "detach.start")
        try:
            self.driver.detach_volume(context, volume)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_id,
                                      {'status': 'error_detaching'})

        self.db.volume_detached(context.elevated(), volume_id)
        self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
                                             'attached_mode')

        # Check for https://bugs.launchpad.net/cinder/+bug/1065702
        volume = self.db.volume_get(context, volume_id)
        if (volume['provider_location'] and
                volume['name'] not in volume['provider_location']):
            self.driver.ensure_export(context, volume)
        self._notify_about_volume_usage(context, volume, "detach.end")
Ejemplo n.º 5
0
    def attach(self, context, volume, instance_uuid, host_name, mountpoint, mode):
        volume_metadata = self.get_volume_admin_metadata(context.elevated(), volume)
        if "readonly" not in volume_metadata:
            # NOTE(zhiyan): set a default value for read-only flag to metadata.
            self.update_volume_admin_metadata(context.elevated(), volume, {"readonly": "False"})
            volume_metadata["readonly"] = "False"

        if volume_metadata["readonly"] == "True" and mode != "ro":
            raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume["id"])

        return self.volume_rpcapi.attach_volume(context, volume, instance_uuid, host_name, mountpoint, mode)
Ejemplo n.º 6
0
    def delete_volume(self, context, volume_id):
        """Deletes and unexports volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)
        if volume_ref['host'] != self.host:
            raise exception.InvalidVolume(
                    reason=_("Volume is not local to this node"))

        self._notify_about_volume_usage(context, volume_ref, "delete.start")
        self._reset_stats()
        try:
            LOG.debug(_("volume %s: removing export"), volume_ref['name'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug(_("volume %s: deleting"), volume_ref['name'])
            self.driver.delete_volume(volume_ref)
        except exception.VolumeIsBusy:
            LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
            self.driver.ensure_export(context, volume_ref)
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'],
                                      {'status': 'error_deleting'})

        self.db.volume_destroy(context, volume_id)
        LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
        self._notify_about_volume_usage(context, volume_ref, "delete.end")
        return True
Ejemplo n.º 7
0
    def copy_volume_to_image(self, context, volume_id, image_meta):
        """Uploads the specified volume to Glance.

        image_meta is a dictionary containing the following keys:
        'id', 'container_format', 'disk_format'

        """
        payload = {"volume_id": volume_id, "image_id": image_meta["id"]}
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the volume status updated.
            utils.require_driver_initialized(self.driver)

            volume = self.db.volume_get(context, volume_id)
            self.driver.ensure_export(context.elevated(), volume)
            image_service, image_id = glance.get_remote_image_service(context, image_meta["id"])
            self.driver.copy_volume_to_image(context, volume, image_service, image_meta)
            LOG.debug(
                _("Uploaded volume %(volume_id)s to " "image (%(image_id)s) successfully"),
                {"volume_id": volume_id, "image_id": image_id},
            )
        except Exception as error:
            with excutils.save_and_reraise_exception():
                payload["message"] = unicode(error)
        finally:
            if volume["instance_uuid"] is None and volume["attached_host"] is None:
                self.db.volume_update(context, volume_id, {"status": "available"})
            else:
                self.db.volume_update(context, volume_id, {"status": "in-use"})
Ejemplo n.º 8
0
    def copy_volume_to_image(self, context, volume_id, image_meta):
        """Uploads the specified volume to Glance.

        image_meta is a dictionary containing the following keys:
        'id', 'container_format', 'disk_format'

        """
        payload = {"volume_id": volume_id, "image_id": image_meta["id"]}
        try:
            volume = self.db.volume_get(context, volume_id)
            self.driver.ensure_export(context.elevated(), volume)
            image_service, image_id = glance.get_remote_image_service(context, image_meta["id"])
            self.driver.copy_volume_to_image(context, volume, image_service, image_meta)
            LOG.debug(
                _("Uploaded volume %(volume_id)s to " "image (%(image_id)s) successfully"),
                {"volume_id": volume_id, "image_id": image_id},
            )
        except Exception as error:
            with excutils.save_and_reraise_exception():
                payload["message"] = unicode(error)
        finally:
            if volume["instance_uuid"] is None:
                self.db.volume_update(context, volume_id, {"status": "available"})
            else:
                self.db.volume_update(context, volume_id, {"status": "in-use"})
Ejemplo n.º 9
0
 def detach_volume(self, context, volume_id):
     """Updates db to show volume is detached"""
     # TODO(vish): refactor this into a more general "unreserve"
     # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
     # self.db.volume_detached(context.elevated(), volume_id)
     self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
                                          'attached_mode')
Ejemplo n.º 10
0
 def get(self, context, volume_id, viewable_admin_meta=False):
     if viewable_admin_meta:
         context = context.elevated()
     rv = self.db.volume_get(context, volume_id)
     volume = dict(rv.iteritems())
     check_policy(context, 'get', volume)
     return volume
Ejemplo n.º 11
0
    def create_volume(self, context, volume_id, request_spec=None,
                      filter_properties=None, allow_reschedule=True,
                      snapshot_id=None, image_id=None, source_volid=None):

        """Creates and exports the volume."""
        context_saved = context.deepcopy()
        context = context.elevated()
        if filter_properties is None:
            filter_properties = {}

        try:
            flow_engine = create_volume.get_manager_flow(
                context,
                self.db,
                self.driver,
                self.scheduler_rpcapi,
                self.host,
                volume_id,
                snapshot_id=snapshot_id,
                image_id=image_id,
                source_volid=source_volid,
                allow_reschedule=allow_reschedule,
                reschedule_context=context_saved,
                request_spec=request_spec,
                filter_properties=filter_properties)
        except Exception:
            LOG.exception(_("Failed to create manager volume flow"))
            raise exception.CinderException(
                _("Failed to create manager volume flow"))

        if snapshot_id is not None:
            # Make sure the snapshot is not deleted until we are done with it.
            locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
        elif source_volid is not None:
            # Make sure the volume is not deleted until we are done with it.
            locked_action = "%s-%s" % (source_volid, 'delete_volume')
        else:
            locked_action = None

        def _run_flow():
            # This code executes create volume flow. If something goes wrong,
            # flow reverts all job that was done and reraises an exception.
            # Otherwise, all data that was generated by flow becomes available
            # in flow engine's storage.
            flow_engine.run()

        @utils.synchronized(locked_action, external=True)
        def _run_flow_locked():
            _run_flow()

        if locked_action is None:
            _run_flow()
        else:
            _run_flow_locked()

        # Fetch created volume from storage
        volume_ref = flow_engine.storage.fetch('volume')
        # Update volume stats
        self.stats['allocated_capacity_gb'] += volume_ref['size']
        return volume_ref['id']
Ejemplo n.º 12
0
def update(context, id, name, description, is_public=None):
    """Update volume type by id."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)
    elevated = context if context.is_admin else context.elevated()
    old_volume_type = get_volume_type(elevated, id)
    try:
        db.volume_type_update(elevated, id,
                              dict(name=name, description=description,
                                   is_public=is_public))
        # Rename resource in quota if volume type name is changed.
        if name:
            old_type_name = old_volume_type.get('name')
            if old_type_name != name:
                old_description = old_volume_type.get('description')
                old_public = old_volume_type.get('is_public')
                try:
                    QUOTAS.update_quota_resource(elevated,
                                                 old_type_name,
                                                 name)
                # Rollback the updated information to the original
                except db_exc.DBError:
                    db.volume_type_update(elevated, id,
                                          dict(name=old_type_name,
                                               description=old_description,
                                               is_public=old_public))
                    raise
    except db_exc.DBError:
        LOG.exception('DB error:')
        raise exception.VolumeTypeUpdateFailed(id=id)
Ejemplo n.º 13
0
    def delete_snapshot(self, context, snapshot_id):
        """Deletes and unexports snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        LOG.info(_("snapshot %s: deleting"), snapshot_ref['name'])

        try:
            LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
            self.driver.delete_snapshot(snapshot_ref)
        except exception.SnapshotIsBusy:
            LOG.debug(_("snapshot %s: snapshot is busy"), snapshot_ref['name'])
            self.db.snapshot_update(context,
                                    snapshot_ref['id'],
                                    {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error_deleting'})

        self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
        self.db.snapshot_destroy(context, snapshot_id)
        LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
        return True
Ejemplo n.º 14
0
def destroy(context, id):
    """Marks volume types as deleted."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)
    elevated = context if context.is_admin else context.elevated()
    return db.volume_type_destroy(elevated, id)
Ejemplo n.º 15
0
    def create_snapshot(self, context, volume_id, snapshot_id):
        """Creates and exports the snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])

        try:
            snap_name = snapshot_ref['name']
            LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
            model_update = self.driver.create_snapshot(snapshot_ref)
            if model_update:
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        model_update)

        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error'})

        self.db.snapshot_update(context,
                                snapshot_ref['id'], {'status': 'available',
                                                     'progress': '100%'})
        self.db.volume_glance_metadata_copy_to_snapshot(context,
                                                        snapshot_ref['id'],
                                                        volume_id)
        LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
        return snapshot_id
Ejemplo n.º 16
0
    def delete_snapshot(self, context, snapshot_id):
        """Deletes and unexports snapshot."""
        caller_context = context
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        project_id = snapshot_ref['project_id']

        LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(
            context, snapshot_ref, "delete.start")

        try:
            LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])

            # Pass context so that drivers that want to use it, can,
            # but it is not a requirement for all drivers.
            snapshot_ref['context'] = caller_context

            self.driver.delete_snapshot(snapshot_ref)
        except exception.SnapshotIsBusy:
            LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
                      snapshot_ref['id'])
            self.db.snapshot_update(context,
                                    snapshot_ref['id'],
                                    {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error_deleting'})

        # Get reservations
        try:
            if CONF.no_snapshot_gb_quota:
                reserve_opts = {'snapshots': -1}
            else:
                reserve_opts = {
                    'snapshots': -1,
                    'gigabytes': -snapshot_ref['volume_size'],
                }
            volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])
            QUOTAS.add_volume_type_opts(context,
                                        reserve_opts,
                                        volume_ref.get('volume_type_id'))
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_("Failed to update usages deleting snapshot"))
        self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
        self.db.snapshot_destroy(context, snapshot_id)
        LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)
        return True
Ejemplo n.º 17
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid'] and volume['instance_uuid'] !=
                        instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)
            self.db.volume_update(context, volume_id,
                                  {"instance_uuid": instance_uuid,
                                   "status": "attaching"})

            # TODO(vish): refactor this into a more general "reserve"
            # TODO(sleepsonthefloor): Is this 'elevated' appropriate?
            if not uuidutils.is_uuid_like(instance_uuid):
                raise exception.InvalidUUID(uuid=instance_uuid)

            try:
                self.driver.attach_volume(context,
                                          volume_id,
                                          instance_uuid,
                                          mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context,
                                          volume_id,
                                          {'status': 'error_attaching'})

            self.db.volume_attached(context.elevated(),
                                    volume_id,
                                    instance_uuid,
                                    mountpoint)
Ejemplo n.º 18
0
    def copy_volume_to_image(self, context, volume_id, image_meta):
        """Uploads the specified volume to Glance.

        image_meta is a dictionary containing the following keys:
        'id', 'container_format', 'disk_format'

        """
        payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
        try:
            volume = self.db.volume_get(context, volume_id)
            self.driver.ensure_export(context.elevated(), volume)
            image_service, image_id = \
                glance.get_remote_image_service(context, image_meta['id'])
            self.driver.copy_volume_to_image(context, volume, image_service,
                                             image_meta)
            LOG.debug(_("Uploaded volume %(volume_id)s to "
                        "image (%(image_id)s) successfully"),
                      {'volume_id': volume_id, 'image_id': image_id})
        except Exception as error:
            with excutils.save_and_reraise_exception():
                payload['message'] = unicode(error)
        finally:
            if (volume['instance_uuid'] is None and
                    volume['attached_host'] is None):
                self.db.volume_update(context, volume_id,
                                      {'status': 'available'})
            else:
                self.db.volume_update(context, volume_id,
                                      {'status': 'in-use'})
Ejemplo n.º 19
0
    def delete_volume(self, context, volume_id):
        """Deletes and unexports volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)

        if context.project_id != volume_ref['project_id']:
            project_id = volume_ref['project_id']
        else:
            project_id = context.project_id

        LOG.info(_("volume %s: deleting"), volume_ref['name'])
        if volume_ref['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)
        if volume_ref['host'] != self.host:
            raise exception.InvalidVolume(
                reason=_("volume is not local to this node"))

        self._notify_about_volume_usage(context, volume_ref, "delete.start")
        self._reset_stats()
        try:
            LOG.debug(_("volume %s: removing export"), volume_ref['name'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug(_("volume %s: deleting"), volume_ref['name'])
            self.driver.delete_volume(volume_ref)
        except exception.VolumeIsBusy:
            LOG.error(_("Cannot delete volume %s: volume is busy"),
                      volume_ref['name'])
            self.driver.ensure_export(context, volume_ref)
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'],
                                      {'status': 'error_deleting'})

        # Get reservations
        try:
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          volumes=-1,
                                          gigabytes=-volume_ref['size'])
        except Exception:
            reservations = None
            LOG.exception(_("Failed to update usages deleting volume"))

        self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
        self.db.volume_destroy(context, volume_id)
        LOG.info(_("volume %s: deleted successfully"), volume_ref['name'])
        self._notify_about_volume_usage(context, volume_ref, "delete.end")

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)

        self.publish_service_capabilities(context)

        return True
Ejemplo n.º 20
0
    def create_volume(self, context, volume_id, request_spec=None,
                      filter_properties=None, allow_reschedule=True,
                      snapshot_id=None, image_id=None, source_volid=None):
        """Creates and exports the volume."""

        flow = create_volume.get_manager_flow(
            self.db,
            self.driver,
            self.scheduler_rpcapi,
            self.host,
            volume_id,
            request_spec=request_spec,
            filter_properties=filter_properties,
            allow_reschedule=allow_reschedule,
            snapshot_id=snapshot_id,
            image_id=image_id,
            source_volid=source_volid,
            reschedule_context=context.deepcopy())

        assert flow, _('Manager volume flow not retrieved')

        flow.run(context.elevated())
        if flow.state != states.SUCCESS:
            raise exception.CinderException(_("Failed to successfully complete"
                                              " manager volume workflow"))

        self._reset_stats()
        return volume_id
Ejemplo n.º 21
0
    def migrate_volume(self, context, volume, host, force_host_copy):
        """Migrate the volume to the specified host."""

        # We only handle "available" volumes for now
        if volume['status'] not in ['available', 'in-use']:
            msg = _('Volume status must be available/in-use.')
            LOG.error(msg)
            raise exception.InvalidVolume(reason=msg)

        # Make sure volume is not part of a migration
        if volume['migration_status'] is not None:
            msg = _("Volume is already part of an active migration")
            raise exception.InvalidVolume(reason=msg)

        # We only handle volumes without snapshots for now
        snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
        if snaps:
            msg = _("volume must not have snapshots")
            LOG.error(msg)
            raise exception.InvalidVolume(reason=msg)

        # Make sure the host is in the list of available hosts
        elevated = context.elevated()
        topic = CONF.volume_topic
        services = self.db.service_get_all_by_topic(elevated,
                                                    topic,
                                                    disabled=False)
        found = False
        for service in services:
            if utils.service_is_up(service) and service['host'] == host:
                found = True
        if not found:
            msg = (_('No available service named %s') % host)
            LOG.error(msg)
            raise exception.InvalidHost(reason=msg)

        # Make sure the destination host is different than the current one
        if host == volume['host']:
            msg = _('Destination host must be different than current host')
            LOG.error(msg)
            raise exception.InvalidHost(reason=msg)

        self.update(context, volume, {'migration_status': 'starting'})

        # Call the scheduler to ensure that the host exists and that it can
        # accept the volume
        volume_type = {}
        volume_type_id = volume['volume_type_id']
        if volume_type_id:
            volume_type = volume_types.get_volume_type(context, volume_type_id)
        request_spec = {'volume_properties': volume,
                        'volume_type': volume_type,
                        'volume_id': volume['id']}
        self.scheduler_rpcapi.migrate_volume_to_host(context,
                                                     CONF.volume_topic,
                                                     volume['id'],
                                                     host,
                                                     force_host_copy,
                                                     request_spec)
Ejemplo n.º 22
0
    def delete(self, context, volume, force=False):
        if context.is_admin and context.project_id != volume['project_id']:
            project_id = volume['project_id']
        else:
            project_id = context.project_id

        volume_id = volume['id']
        if not volume['host']:
            # NOTE(vish): scheduling failed, so delete it
            # Note(zhiteng): update volume quota reservation
            try:
                reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
                QUOTAS.add_volume_type_opts(context,
                                            reserve_opts,
                                            volume['volume_type_id'])
                reservations = QUOTAS.reserve(context,
                                              project_id=project_id,
                                              **reserve_opts)
            except Exception:
                reservations = None
                LOG.exception(_("Failed to update quota for deleting volume"))
            self.db.volume_destroy(context.elevated(), volume_id)

            if reservations:
                QUOTAS.commit(context, reservations, project_id=project_id)
            return
        if not force and volume['status'] not in ["available", "error",
                                                  "error_restoring",
                                                  "error_extending"]:
            msg = _("Volume status must be available or error, "
                    "but current status is: %s") % volume['status']
            raise exception.InvalidVolume(reason=msg)

        if volume['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)

        if volume['migration_status'] != None:
            # Volume is migrating, wait until done
            msg = _("Volume cannot be deleted while migrating")
            raise exception.InvalidVolume(reason=msg)

        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
        if len(snapshots):
            msg = _("Volume still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidVolume(reason=msg)

        # If the volume is encrypted, delete its encryption key from the key
        # manager. This operation makes volume deletion an irreversible process
        # because the volume cannot be decrypted without its key.
        encryption_key_id = volume.get('encryption_key_id', None)
        if encryption_key_id is not None:
            self.key_manager.delete_key(context, encryption_key_id)

        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id, {'status': 'deleting',
                                                   'terminated_at': now})

        self.volume_rpcapi.delete_volume(context, volume)
Ejemplo n.º 23
0
    def delete_volume(self, context, volume_id, unmanage_only=False):
        """Deletes and unexports volume."""
        context = context.elevated()

        volume_ref = self.db.volume_get(context, volume_id)

        if context.project_id != volume_ref['project_id']:
            project_id = volume_ref['project_id']
        else:
            project_id = context.project_id

        LOG.info(_("volume %s: deleting"), volume_ref['id'])
        if volume_ref['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)
        if volume_ref['host'] != self.host:
            raise exception.InvalidVolume(
                reason=_("volume is not local to this node"))

        self._notify_about_volume_usage(context, volume_ref, "delete.start")
        self._reset_stats()

        try:
            self._delete_cascaded_volume(context, volume_id)
        except Exception:
            LOG.exception(_("Failed to deleting volume"))
        # Get reservations
        try:
            reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
            QUOTAS.add_volume_type_opts(context,
                                        reserve_opts,
                                        volume_ref.get('volume_type_id'))
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_("Failed to update usages deleting volume"))

        # Delete glance metadata if it exists
        try:
            self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
            LOG.debug(_("volume %s: glance metadata deleted"),
                      volume_ref['id'])
        except exception.GlanceMetadataNotFound:
            LOG.debug(_("no glance metadata found for volume %s"),
                      volume_ref['id'])

        self.db.volume_destroy(context, volume_id)
        LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
        self._notify_about_volume_usage(context, volume_ref, "delete.end")

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)

        self.publish_service_capabilities(context)

        return True
Ejemplo n.º 24
0
    def delete_backup(self, context, backup_id):
        """Delete volume backup from configured backup service."""
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the backup status updated. Fail early since there
            # are no other status to change but backup's
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized as err:
            with excutils.save_and_reraise_exception():
                    self.db.backup_update(context, backup_id,
                                          {'status': 'error',
                                           'fail_reason':
                                           unicode(err)})

        LOG.info(_('Delete backup started, backup: %s.'), backup_id)
        backup = self.db.backup_get(context, backup_id)
        self.db.backup_update(context, backup_id, {'host': self.host})

        expected_status = 'deleting'
        actual_status = backup['status']
        if actual_status != expected_status:
            err = _('Delete_backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                'expected_status': expected_status,
                'actual_status': actual_status,
            }
            self.db.backup_update(context, backup_id, {'status': 'error',
                                                       'fail_reason': err})
            raise exception.InvalidBackup(reason=err)

        backup_service = self._map_service_to_driver(backup['service'])
        if backup_service is not None:
            configured_service = self.driver_name
            if backup_service != configured_service:
                err = _('Delete backup aborted, the backup service currently'
                        ' configured [%(configured_service)s] is not the'
                        ' backup service that was used to create this'
                        ' backup [%(backup_service)s].') % {
                    'configured_service': configured_service,
                    'backup_service': backup_service,
                }
                self.db.backup_update(context, backup_id,
                                      {'status': 'error'})
                raise exception.InvalidBackup(reason=err)

            try:
                backup_service = self.service.get_backup_driver(context)
                backup_service.delete(backup)
            except Exception as err:
                with excutils.save_and_reraise_exception():
                    self.db.backup_update(context, backup_id,
                                          {'status': 'error',
                                           'fail_reason':
                                           unicode(err)})

        context = context.elevated()
        self.db.backup_destroy(context, backup_id)
        LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
Ejemplo n.º 25
0
def destroy(context, id):
    """Marks group types as deleted."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidGroupType(reason=msg)
    else:
        elevated = context if context.is_admin else context.elevated()
        db.group_type_destroy(elevated, id)
Ejemplo n.º 26
0
    def detach_volume(self, context, volume_id):
        """Updates db to show volume is detached"""
        # TODO(vish): refactor this into a more general "unreserve"
        # TODO(sleepsonthefloor): Is this 'elevated' appropriate?

        volume = self.db.volume_get(context, volume_id)
        try:
            self.driver.detach_volume(context, volume)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id, {"status": "error_detaching"})

        self.db.volume_detached(context.elevated(), volume_id)
        self.db.volume_admin_metadata_delete(context.elevated(), volume_id, "attached_mode")

        # Check for https://bugs.launchpad.net/cinder/+bug/1065702
        volume = self.db.volume_get(context, volume_id)
        if volume["provider_location"] and volume["name"] not in volume["provider_location"]:
            self.driver.ensure_export(context, volume)
Ejemplo n.º 27
0
    def accept_transfer(self, context, volume_id, new_user, new_project):
        # NOTE(flaper87): Verify the driver is enabled
        # before going forward. The exception will be caught
        # and the volume status updated.
        utils.require_driver_initialized(self.driver)

        # NOTE(jdg): need elevated context as we haven't "given" the vol
        # yet
        volume_ref = self.db.volume_get(context.elevated(), volume_id)
        self.driver.accept_transfer(context, volume_ref, new_user, new_project)
Ejemplo n.º 28
0
def remove_group_type_access(context, group_type_id, project_id):
    """Remove access to group type for project_id."""
    if group_type_id is None:
        msg = _("group_type_id cannot be None")
        raise exception.InvalidGroupType(reason=msg)
    elevated = context if context.is_admin else context.elevated()
    if is_public_group_type(elevated, group_type_id):
        msg = _("Type access modification is not applicable to public group " "type.")
        raise exception.InvalidGroupType(reason=msg)
    return db.group_type_access_remove(elevated, group_type_id, project_id)
Ejemplo n.º 29
0
    def create_volume(self, context, volume_id, snapshot_id=None, image_id=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume_ref, "create.start")
        LOG.info(_("volume %s: creating"), volume_ref["name"])

        self.db.volume_update(context, volume_id, {"host": self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref["host"] = self.host

        status = "available"
        model_update = False

        try:
            vol_name = volume_ref["name"]
            vol_size = volume_ref["size"]
            LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals())
            if snapshot_id is None and image_id is None:
                model_update = self.driver.create_volume(volume_ref)
            elif snapshot_id is not None:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(volume_ref, snapshot_ref)
            else:
                # create the volume from an image
                image_service, image_id = glance.get_remote_image_service(context, image_id)
                image_location = image_service.get_location(context, image_id)
                cloned = self.driver.clone_image(volume_ref, image_location)
                if not cloned:
                    model_update = self.driver.create_volume(volume_ref)
                    status = "downloading"

            if model_update:
                self.db.volume_update(context, volume_ref["id"], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref["name"])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref["id"], model_update)

        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_ref["id"], {"status": "error"})

        now = timeutils.utcnow()
        self.db.volume_update(context, volume_ref["id"], {"status": status, "launched_at": now})
        LOG.debug(_("volume %s: created successfully"), volume_ref["name"])
        self._reset_stats()

        if image_id and not cloned:
            # copy the image onto the volume.
            self._copy_image_to_volume(context, volume_ref, image_id)
        self._notify_about_volume_usage(context, volume_ref, "create.end")
        return volume_ref["id"]
Ejemplo n.º 30
0
    def attach(self, context, volume, instance_uuid, host_name,
               mountpoint, mode):
        volume_metadata = self.get_volume_admin_metadata(context.elevated(),
                                                         volume)
        if 'readonly' not in volume_metadata:
            # NOTE(zhiyan): set a default value for read-only flag to metadata.
            self.update_volume_admin_metadata(context.elevated(), volume,
                                              {'readonly': 'False'})
            volume_metadata['readonly'] = 'False'

        if volume_metadata['readonly'] == 'True' and mode != 'ro':
            raise exception.InvalidVolumeAttachMode(mode=mode,
                                                    volume_id=volume['id'])

        return self.volume_rpcapi.attach_volume(context,
                                                volume,
                                                instance_uuid,
                                                host_name,
                                                mountpoint,
                                                mode)
Ejemplo n.º 31
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)

        if volume['status'] != "available":
            msg = _('Volume to be backed up must be available')
            raise exception.InvalidVolume(reason=msg)

        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume, volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # do quota reserver before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup of the volume and use it as the parent
        # backup to do an incremental backup.
        latest_backup = None
        # Added for periodic backup
        if getattr(context, 'periodic', False):
            latest_backup = None
            if description:
                description = PERIODICSTR + description
            else:
                description = PERIODICSTR
        else:
            if incremental:
                backups = self.db.backup_get_all_by_volume(
                    context.elevated(), volume_id)
                if backups:
                    normal_backups = []
                    for bk in backups:
                        if not bk.display_description or \
                                PERIODICSTR not in bk.display_description:
                            LOG.debug("Found normal backup %(bak)s "
                                      "for volume %(vol)s." % {
                                          "bak": bk.id,
                                          "vol": volume_id
                                      })
                            normal_backups.append(bk)
                    if normal_backups:
                        LOG.debug(
                            "The normal backups for volume "
                            "%(vol)s: %(baks)s." % {
                                "vol": volume_id,
                                "baks": [bk.id for bk in normal_backups]
                            })
                        latest_backup = max(normal_backups,
                                            key=lambda x: x['created_at'])
        parent_id = None
        if latest_backup:
            if latest_backup['status'] == "available":
                parent_id = latest_backup['id']
                LOG.info(
                    _LI("Found parent backup %(bak)s for volume "
                        "%(volume)s. Do an incremental backup."), {
                            'bak': latest_backup['id'],
                            'volume': volume['id']
                        })
            elif latest_backup['status'] == "creating":
                msg = _('The parent backup is creating.')
                LOG.info(_LI("The parent backup %(bak)s is creating."),
                         {'bak': latest_backup['id']})
                raise exception.InvalidBackup(reason=msg)
            else:
                LOG.info(
                    _LI("No backups available to do an incremental "
                        "backup, do a full backup for volume %(volume)s."),
                    {'volume': volume['id']})
        else:
            LOG.info(
                _LI("No backups available to do an incremental "
                    "backup, do a full backup for volume %(volume)s."),
                {'volume': volume['id']})

        self.db.volume_update(context, volume_id, {'status': 'backing-up'})

        options = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'display_name': name,
            'display_description': description,
            'volume_id': volume_id,
            'status': 'creating',
            'container': container,
            'parent_id': parent_id,
            # Set backup size to "0" which means
            # it's not available. Backup driver
            # will return the exact size when
            # backing up is done. We lined up with OP
            # that when backup is in "creating" status,
            # OP will show "--" in the "size" field
            # instead of "0".
            # 'size': volume['size'],
            'size': 0,
            'host': volume_host,
        }
        try:
            backup = self.db.backup_create(context, options)
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.db.backup_destroy(context, backup['id'])
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup['host'], backup['id'],
                                         volume_id)

        return backup
Ejemplo n.º 32
0
    def delete(self, context, volume, force=False, unmanage_only=False):
        if context.is_admin and context.project_id != volume['project_id']:
            project_id = volume['project_id']
        else:
            project_id = context.project_id

        volume_id = volume['id']
        if not volume['host']:
            volume_utils.notify_about_volume_usage(context, volume,
                                                   "delete.start")
            # NOTE(vish): scheduling failed, so delete it
            # Note(zhiteng): update volume quota reservation
            try:
                reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
                QUOTAS.add_volume_type_opts(context, reserve_opts,
                                            volume['volume_type_id'])
                reservations = QUOTAS.reserve(context,
                                              project_id=project_id,
                                              **reserve_opts)
            except Exception:
                reservations = None
                LOG.exception(_("Failed to update quota for deleting volume"))
            self.db.volume_destroy(context.elevated(), volume_id)

            if reservations:
                QUOTAS.commit(context, reservations, project_id=project_id)

            volume_utils.notify_about_volume_usage(context, volume,
                                                   "delete.end")
            return
        if not force and volume['status'] not in [
                "available", "error", "error_restoring", "error_extending"
        ]:
            msg = _("Volume status must be available or error, "
                    "but current status is: %s") % volume['status']
            raise exception.InvalidVolume(reason=msg)

        if volume['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)

        if volume['migration_status'] is not None:
            # Volume is migrating, wait until done
            msg = _("Volume cannot be deleted while migrating")
            raise exception.InvalidVolume(reason=msg)

        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
        if len(snapshots):
            msg = _("Volume still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidVolume(reason=msg)

        # If the volume is encrypted, delete its encryption key from the key
        # manager. This operation makes volume deletion an irreversible process
        # because the volume cannot be decrypted without its key.
        encryption_key_id = volume.get('encryption_key_id', None)
        if encryption_key_id is not None:
            self.key_manager.delete_key(context, encryption_key_id)

        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id, {
            'status': 'deleting',
            'terminated_at': now
        })

        self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
Ejemplo n.º 33
0
    def create_instance_backup(self,
                               context,
                               instance_uuid,
                               name,
                               description,
                               volume_ids,
                               container,
                               incremental=False,
                               availability_zone=None,
                               force=True):
        """Make the RPC call to create backup for volume-based instance."""
        # Use the same policy as backup creatation
        check_policy(context, 'create')

        server = nova.API().get_server(context, instance_uuid)
        if server.status not in [
                "ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED", "SHELVED_OFFLOADED"
        ]:
            msg = (_("Instance %(instance_uuid)s in %(status)s status "
                     "which is not allowed to be backed up.") % {
                         'instance_uuid': instance_uuid,
                         'status': server.status
                     })
            raise exception.InvalidInstanceStatus(reason=msg)

        volumes = [
            self.volume_api.get(context, volume_id) for volume_id in volume_ids
        ]

        for volume in volumes:
            # Verify all volumes are in 'in-use' state
            if volume['status'] != "in-use":
                msg = (_('Volume to be backed up must be in-use '
                         'but the current status is "%s".') % volume['status'])
                raise exception.InvalidVolume(reason=msg)

            # Verify backup service is enabled on host
            volume_host = volume_utils.extract_host(volume['host'], 'host')
            if not self._is_backup_service_enabled(volume, volume_host):
                raise exception.ServiceNotFound(service_id='cinder-backup')

        backups = []
        inst_backup_kwargs = []

        # Add a 32-bit UUID prefix to display_description, in order to
        # distinguish which backups are created at the same time
        desc_prefix = str(uuid.uuid4()).replace('-', '')

        for volume in volumes:
            # Reserve a quota before setting volume status and backup status
            try:
                reserve_opts = {
                    'backups': 1,
                    'backup_gigabytes': volume['size']
                }
                LOG.info(
                    _LI("create_instance_backup "
                        "reserve_opts: %(reserve_opts)s"),
                    {'reserve_opts': reserve_opts})
                reservations = QUOTAS.reserve(context, **reserve_opts)
            except exception.OverQuota as e:
                overs = e.kwargs['overs']
                usages = e.kwargs['usages']
                quotas = e.kwargs['quotas']

                # reset status for the other volumes and
                # remove the related backup
                for backup in backups:
                    self.db.volume_update(context, backup['volume_id'],
                                          {'status': 'in-use'})
                    self.db.backup_update(context, backup['id'],
                                          {'status': 'error'})
                    self.delete(context, backup['id'])

                def _consumed(resource_name):
                    return (usages[resource_name]['reserved'] +
                            usages[resource_name]['in_use'])

                for over in overs:
                    if 'gigabytes' in over:
                        msg = _LW("Quota exceeded for %(s_pid)s, tried to "
                                  "create "
                                  "%(s_size)sG backup "
                                  "(%(d_consumed)dG of "
                                  "%(d_quota)dG already consumed)")
                        LOG.warning(
                            msg, {
                                's_pid': context.project_id,
                                's_size': volume['size'],
                                'd_consumed': _consumed(over),
                                'd_quota': quotas[over]
                            })
                        raise exception.VolumeBackupSizeExceedsAvailableQuota(
                            requested=volume['size'],
                            consumed=_consumed('backup_gigabytes'),
                            quota=quotas['backup_gigabytes'])
                    elif 'backups' in over:
                        msg = _LW("Quota exceeded for %(s_pid)s, tried to "
                                  "create backups (%(d_consumed)d backups "
                                  "already consumed)")
                        LOG.warning(
                            msg, {
                                's_pid': context.project_id,
                                'd_consumed': _consumed(over)
                            })
                        raise exception.BackupLimitExceeded(
                            allowed=quotas[over])

            # Since Ceph doesn't use parent_id to determine an incremental
            # backup, comment this part.
            #
            # Find the latest backup of the volume and use it as the parent
            # backup to do an incremental backup.
            # latest_backup = None
            # if incremental:
            #     backups = \
            #              objects.BackupList.get_all_by_volume(context.elevated(),
            #                                                   volume['id'])
            #     if backups.objects:
            #         latest_backup = max(backups.objects,
            #                             key=lambda x: x['created_at'])
            #     else:
            #         msg = _('No backups available \
            #                  to do an incremental backup.')
            #         raise exception.InvalidBackup(reason=msg)
            latest_backup = None
            # Added for periodic backup
            if getattr(context, 'periodic', False):
                latest_backup = None
                description = PERIODICSTR + description if description \
                    else PERIODICSTR
            else:
                if incremental:
                    all_backups = self.db.\
                        backup_get_all_by_volume(context.elevated(),
                                                 volume['id'])
                    if all_backups:
                        normal_backups = []
                        for bk in all_backups:
                            if not bk.display_description or \
                                    PERIODICSTR not in bk.display_description:
                                normal_backups.append(bk)
                        if normal_backups:
                            latest_backup = max(normal_backups,
                                                key=lambda x: x['created_at'])

            parent_id = None
            if latest_backup:
                if latest_backup['status'] == "available":
                    parent_id = latest_backup['id']
                    LOG.info(
                        _LI("Found parent backup %(bak)s for volume "
                            "%(volume)s. Do an incremental backup."), {
                                'bak': latest_backup['id'],
                                'volume': volume['id']
                            })
                elif latest_backup['status'] == "creating":
                    msg = _('The parent backup is creating.')
                    LOG.info(_LI("The parent backup %(bak)s is creating."),
                             {'bak': latest_backup['id']})
                    raise exception.InvalidBackup(reason=msg)
                else:
                    LOG.info(
                        _LI("No backups available to do an incremental "
                            "backup, do a full backup for "
                            "volume %(volume)s."), {'volume': volume['id']})
            else:
                LOG.info(
                    _LI("No backups available to do an incremental "
                        "backup, do a full backup for volume %(volume)s."),
                    {'volume': volume['id']})

            options = {
                'user_id':
                context.user_id,
                'project_id':
                context.project_id,
                'display_name':
                name,
                'display_description': (lambda x: desc_prefix + x
                                        if x else desc_prefix)(description),
                'volume_id':
                volume['id'],
                'status':
                'creating',
                'container':
                container,
                'parent_id':
                parent_id,
                # Set backup size to "0" which means
                # it's not available. Backup driver
                # will return the exact size when
                # backing up is done. We lined up with OP
                # that when backup is in "creating" status,
                # OP will show "--" in the "size" field
                # instead of "0".
                # 'size': volume['size'],
                'size':
                0,
                'host':
                volume_host,
            }

            # (maqi) Use volume display_description field to save volume
            # previous_status since volumes in Kilo don't have
            # previous_status field in database
            previous_status = volume['status']
            self.db.volume_update(context, volume['id'], {
                'status': 'backing-up',
                'display_description': previous_status
            })

            try:
                backup = self.db.backup_create(context, options)
                QUOTAS.commit(context, reservations)
            except Exception:
                with excutils.save_and_reraise_exception():
                    try:
                        self.db.backup_destroy(context, backup['id'])
                    finally:
                        QUOTAS.rollback(context, reservations)
            backups.append(backup)
            kwargs = {
                'host': backup['host'],
                'backup_id': backup['id'],
                'volume_id': volume['id'],
            }
            inst_backup_kwargs.append(kwargs)

        self.backup_rpcapi.create_instance_backup(context, instance_uuid,
                                                  inst_backup_kwargs)
        LOG.debug(
            "I am ready to return from create_instance_backup"
            "with result: %(backups)s", {'backups': backups})
        return backups
Ejemplo n.º 34
0
    def retype(self, context, volume, new_type, migration_policy=None):
        """Attempt to modify the type associated with an existing volume."""
        if volume['status'] not in ['available', 'in-use']:
            msg = _('Unable to update type due to incorrect status '
                    'on volume: %s') % volume['id']
            LOG.error(msg)
            raise exception.InvalidVolume(reason=msg)

        if volume['migration_status'] is not None:
            msg = (_("Volume %s is already part of an active migration.") %
                   volume['id'])
            LOG.error(msg)
            raise exception.InvalidVolume(reason=msg)

        if migration_policy and migration_policy not in ['on-demand', 'never']:
            msg = _('migration_policy must be \'on-demand\' or \'never\', '
                    'passed: %s') % new_type
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        # Support specifying volume type by ID or name
        try:
            if uuidutils.is_uuid_like(new_type):
                vol_type = volume_types.get_volume_type(context, new_type)
            else:
                vol_type = volume_types.get_volume_type_by_name(
                    context, new_type)
        except exception.InvalidVolumeType:
            msg = _('Invalid volume_type passed: %s') % new_type
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        vol_type_id = vol_type['id']
        vol_type_qos_id = vol_type['qos_specs_id']

        old_vol_type = None
        old_vol_type_id = volume['volume_type_id']
        old_vol_type_qos_id = None

        # Error if the original and new type are the same
        if volume['volume_type_id'] == vol_type_id:
            msg = (_('New volume_type same as original: %s') % new_type)
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        if volume['volume_type_id']:
            old_vol_type = volume_types.get_volume_type(
                context, old_vol_type_id)
            old_vol_type_qos_id = old_vol_type['qos_specs_id']

        # We don't support changing encryption requirements yet
        old_enc = volume_types.get_volume_type_encryption(
            context, old_vol_type_id)
        new_enc = volume_types.get_volume_type_encryption(context, vol_type_id)
        if old_enc != new_enc:
            msg = _('Retype cannot change encryption requirements')
            raise exception.InvalidInput(reason=msg)

        # We don't support changing QoS at the front-end yet for in-use volumes
        # TODO(avishay): Call Nova to change QoS setting (libvirt has support
        # - virDomainSetBlockIoTune() - Nova does not have support yet).
        if (volume['status'] != 'available'
                and old_vol_type_qos_id != vol_type_qos_id):
            for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
                if qos_id:
                    specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
                    if specs['qos_specs']['consumer'] != 'back-end':
                        msg = _('Retype cannot change front-end qos specs for '
                                'in-use volumes')
                        raise exception.InvalidInput(reason=msg)

        # We're checking here in so that we can report any quota issues as
        # early as possible, but won't commit until we change the type. We
        # pass the reservations onward in case we need to roll back.
        reservations = quota_utils.get_volume_type_reservation(
            context, volume, vol_type_id)

        self.update(context, volume, {'status': 'retyping'})

        request_spec = {
            'volume_properties': volume,
            'volume_id': volume['id'],
            'volume_type': vol_type,
            'migration_policy': migration_policy,
            'quota_reservations': reservations
        }

        self.scheduler_rpcapi.retype(context,
                                     CONF.volume_topic,
                                     volume['id'],
                                     request_spec=request_spec,
                                     filter_properties={})
Ejemplo n.º 35
0
 def update_readonly_flag(self, context, volume, flag):
     if volume['status'] != 'available':
         msg = _('Volume status must be available to update readonly flag.')
         raise exception.InvalidVolume(reason=msg)
     self.update_volume_admin_metadata(context.elevated(), volume,
                                       {'readonly': str(flag)})
Ejemplo n.º 36
0
    def _get_weighted_candidates_by_group_type(
            self, context: context.RequestContext, group_spec: dict,
            group_filter_properties: Optional[dict] = None) \
            -> list[WeighedHost]:
        """Finds backends that supports the group type.

        Returns a list of backends that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()

        weighed_backends = []
        volume_properties = group_spec['volume_properties']
        # Since Cinder is using mixed filters from Oslo and it's own, which
        # takes 'resource_XX' and 'volume_XX' as input respectively,
        # copying 'volume_XX' to 'resource_XX' will make both filters
        # happy.
        resource_properties = volume_properties.copy()
        group_type = group_spec.get("group_type", None)
        resource_type = group_spec.get("group_type", None)
        group_spec.update({'resource_properties': resource_properties})

        config_options = self._get_configuration_options()

        if group_filter_properties is None:
            group_filter_properties = {}
        self._populate_retry(group_filter_properties, resource_properties)

        group_filter_properties.update({
            'context': context,
            'request_spec': group_spec,
            'config_options': config_options,
            'group_type': group_type,
            'resource_type': resource_type
        })

        self.populate_filter_properties(group_spec, group_filter_properties)

        # Find our local list of acceptable backends by filtering and
        # weighing our options. we virtually consume resources on
        # it so subsequent selections can adjust accordingly.

        # Note: remember, we are using an iterator here. So only
        # traverse this list once.
        all_backends = self.host_manager.get_all_backend_states(elevated)
        if not all_backends:
            return []

        # Filter local backends based on requirements ...
        backends = self.host_manager.get_filtered_backends(
            all_backends, group_filter_properties)

        if not backends:
            return []

        LOG.debug("Filtered %s", backends)

        # weighted_backends = WeightedHost() ... the best backend for the job.
        weighed_backends = self.host_manager.get_weighed_backends(
            backends, group_filter_properties)
        if not weighed_backends:
            return []

        return weighed_backends
Ejemplo n.º 37
0
    def delete_volume(self, context, volume_id):
        """Deletes and unexports volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)

        if context.project_id != volume_ref['project_id']:
            project_id = volume_ref['project_id']
        else:
            project_id = context.project_id

        LOG.info(_("volume %s: deleting"), volume_ref['id'])
        if volume_ref['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)
        if volume_ref['host'] != self.host:
            raise exception.InvalidVolume(
                reason=_("volume is not local to this node"))

        self._notify_about_volume_usage(context, volume_ref, "delete.start")
        self._reset_stats()
        try:
            LOG.debug(_("volume %s: removing export"), volume_ref['id'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug(_("volume %s: deleting"), volume_ref['id'])
            self.driver.delete_volume(volume_ref)
        except exception.VolumeIsBusy:
            LOG.error(_("Cannot delete volume %s: volume is busy"),
                      volume_ref['id'])
            self.driver.ensure_export(context, volume_ref)
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'],
                                      {'status': 'error_deleting'})

        # If deleting the source volume in a migration, we want to skip quotas
        # and other database updates.
        if volume_ref['migration_status']:
            return True

        # Get reservations
        try:
            reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
            QUOTAS.add_volume_type_opts(context,
                                        reserve_opts,
                                        volume_ref.get('volume_type_id'))
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_("Failed to update usages deleting volume"))

        # Delete glance metadata if it exists
        try:
            self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
            LOG.debug(_("volume %s: glance metadata deleted"),
                      volume_ref['id'])
        except exception.GlanceMetadataNotFound:
            LOG.debug(_("no glance metadata found for volume %s"),
                      volume_ref['id'])

        self.db.volume_destroy(context, volume_id)
        LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
        self._notify_about_volume_usage(context, volume_ref, "delete.end")

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)

        self.publish_service_capabilities(context)

        return True
Ejemplo n.º 38
0
    def create_volume(self,
                      context,
                      volume_id,
                      request_spec=None,
                      filter_properties=None,
                      allow_reschedule=True,
                      snapshot_id=None,
                      image_id=None,
                      source_volid=None):
        """Creates and exports the volume."""
        context_saved = context.deepcopy()
        context = context.elevated()
        if filter_properties is None:
            filter_properties = {}
        volume_ref = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume_ref, "create.start")

        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        if volume_ref['status'] == 'migration_target_creating':
            status = 'migration_target'
        else:
            status = 'available'
        model_update = False
        image_meta = None
        cloned = False

        try:
            LOG.debug(
                _("volume %(vol_name)s: creating lv of"
                  " size %(vol_size)sG"), {
                      'vol_name': volume_ref['name'],
                      'vol_size': volume_ref['size']
                  })
            snapshot_ref = None
            sourcevol_ref = None
            image_service = None
            image_location = None
            image_meta = None

            if snapshot_id is not None:
                LOG.info(_("volume %s: creating from snapshot"),
                         volume_ref['name'])
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
            elif source_volid is not None:
                LOG.info(_("volume %s: creating from existing volume"),
                         volume_ref['name'])
                sourcevol_ref = self.db.volume_get(context, source_volid)
            elif image_id is not None:
                LOG.info(_("volume %s: creating from image"),
                         volume_ref['name'])
                # create the volume from an image
                image_service, image_id = \
                    glance.get_remote_image_service(context,
                                                    image_id)
                image_location = image_service.get_location(context, image_id)
                image_meta = image_service.show(context, image_id)
            else:
                LOG.info(_("volume %s: creating"), volume_ref['name'])

            try:
                model_update, cloned = self._create_volume(
                    context, volume_ref, snapshot_ref, sourcevol_ref,
                    image_service, image_id, image_location)
            except exception.ImageCopyFailure as ex:
                LOG.error(
                    _('Setting volume: %s status to error '
                      'after failed image copy.'), volume_ref['id'])
                self.db.volume_update(context, volume_ref['id'],
                                      {'status': 'error'})
                return
            except Exception:
                exc_info = sys.exc_info()
                # restore source volume status before reschedule
                # FIXME(zhiteng) do all the clean-up before reschedule
                if sourcevol_ref is not None:
                    self.db.volume_update(context, sourcevol_ref['id'],
                                          {'status': sourcevol_ref['status']})
                rescheduled = False
                # try to re-schedule volume:
                if allow_reschedule:
                    rescheduled = self._reschedule_or_error(
                        context_saved, volume_id, exc_info, snapshot_id,
                        image_id, request_spec, filter_properties)

                if rescheduled:
                    LOG.error(_('Unexpected Error: '), exc_info=exc_info)
                    msg = (_('Creating %(volume_id)s %(snapshot_id)s '
                             '%(image_id)s was rescheduled due to '
                             '%(reason)s') % {
                                 'volume_id': volume_id,
                                 'snapshot_id': snapshot_id,
                                 'image_id': image_id,
                                 'reason': unicode(exc_info[1])
                             })
                    raise exception.CinderException(msg)
                else:
                    # not re-scheduling
                    raise exc_info[0], exc_info[1], exc_info[2]

            if model_update:
                volume_ref = self.db.volume_update(context, volume_ref['id'],
                                                   model_update)
            if sourcevol_ref is not None:
                self.db.volume_glance_metadata_copy_from_volume_to_volume(
                    context, source_volid, volume_id)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)
        except Exception:
            with excutils.save_and_reraise_exception():
                volume_ref['status'] = 'error'
                self.db.volume_update(context, volume_ref['id'],
                                      {'status': volume_ref['status']})
                LOG.error(_("volume %s: create failed"), volume_ref['name'])
                self._notify_about_volume_usage(context, volume_ref,
                                                "create.end")

        if snapshot_id:
            # Copy any Glance metadata from the original volume
            self.db.volume_glance_metadata_copy_to_volume(
                context, volume_ref['id'], snapshot_id)

        if image_id and image_meta:
            # Copy all of the Glance image properties to the
            # volume_glance_metadata table for future reference.
            self.db.volume_glance_metadata_create(context, volume_ref['id'],
                                                  'image_id', image_id)
            name = image_meta.get('name', None)
            if name:
                self.db.volume_glance_metadata_create(context,
                                                      volume_ref['id'],
                                                      'image_name', name)
            # Save some more attributes into the volume metadata
            IMAGE_ATTRIBUTES = [
                'size', 'disk_format', 'container_format', 'checksum',
                'min_disk', 'min_ram'
            ]
            for key in IMAGE_ATTRIBUTES:
                value = image_meta.get(key, None)
                if value is not None:
                    self.db.volume_glance_metadata_create(
                        context, volume_ref['id'], key, value)
            image_properties = image_meta.get('properties', {})
            for key, value in image_properties.items():
                self.db.volume_glance_metadata_create(context,
                                                      volume_ref['id'], key,
                                                      value)

        now = timeutils.utcnow()
        volume_ref['status'] = status
        self.db.volume_update(context, volume_ref['id'], {
            'status': volume_ref['status'],
            'launched_at': now
        })
        LOG.info(_("volume %s: created successfully"), volume_ref['name'])
        self._reset_stats()

        self._notify_about_volume_usage(context, volume_ref, "create.end")
        return volume_ref['id']
Ejemplo n.º 39
0
    def _get_import_backup(self, context, backup_url):
        """Prepare database backup record for import.

        This method decodes provided backup_url and expects to find the id of
        the backup in there.

        Then checks the DB for the presence of this backup record and if it
        finds it and is not deleted it will raise an exception because the
        record cannot be created or used.

        If the record is in deleted status then we must be trying to recover
        this record, so we'll reuse it.

        If the record doesn't already exist we create it with provided id.

        :param context: running context
        :param backup_url: backup description to be used by the backup driver
        :return: BackupImport object
        :raises InvalidBackup:
        :raises InvalidInput:
        """
        reservations = None
        backup = None
        # Deserialize string backup record into a dictionary
        backup_record = objects.Backup.decode_record(backup_url)

        # ID is a required field since it's what links incremental backups
        if 'id' not in backup_record:
            msg = _('Provided backup record is missing an id')
            raise exception.InvalidInput(reason=msg)

        # Since we use size to reserve&commit quota, size is another required
        # field.
        if 'size' not in backup_record:
            msg = _('Provided backup record is missing size attribute')
            raise exception.InvalidInput(reason=msg)

        try:
            reserve_opts = {
                'backups': 1,
                'backup_gigabytes': backup_record['size']
            }
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            quota_utils.process_reserve_over_quota(context,
                                                   e,
                                                   resource='backups',
                                                   size=backup_record['size'])

        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'volume_id': IMPORT_VOLUME_ID,
            'status': fields.BackupStatus.CREATING,
            'deleted_at': None,
            'deleted': False,
            'metadata': {}
        }

        try:
            try:
                # Try to get the backup with that ID in all projects even among
                # deleted entries.
                backup = objects.BackupImport.get_by_id(
                    context.elevated(read_deleted='yes'),
                    backup_record['id'],
                    project_only=False)

                # If record exists and it's not deleted we cannot proceed
                # with the import
                if backup.status != fields.BackupStatus.DELETED:
                    msg = _('Backup already exists in database.')
                    raise exception.InvalidBackup(reason=msg)

                # Otherwise we'll "revive" delete backup record
                backup.update(kwargs)
                backup.save()
                QUOTAS.commit(context, reservations)
            except exception.BackupNotFound:
                # If record doesn't exist create it with the specific ID
                backup = objects.BackupImport(context=context,
                                              id=backup_record['id'],
                                              **kwargs)
                backup.create()
                QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)
        return backup
Ejemplo n.º 40
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)

        if volume['status'] != "available":
            msg = (_('Volume to be backed up must be available '
                     'but the current status is "%s".') % volume['status'])
            raise exception.InvalidVolume(reason=msg)

        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume, volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # do quota reserver before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup of the volume and use it as the parent
        # backup to do an incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                latest_backup = max(backups.objects,
                                    key=lambda x: x['created_at'])
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != "available":
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        self.db.volume_update(context, volume_id, {'status': 'backing-up'})
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': 'creating',
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': volume_host,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Ejemplo n.º 41
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
            if snapshot['status'] not in ["available"]:
                msg = (_('Snapshot to be backed up must be available, '
                         'but the current status is "%s".') %
                       snapshot['status'])
                raise exception.InvalidSnapshot(reason=msg)
        elif volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume.host, 'host')
        host = self._get_available_backup_service_host(
            volume_host, volume.availability_zone)

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            quota_utils.process_reserve_over_quota(context,
                                                   e,
                                                   resource='backups',
                                                   size=volume.size)
        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at
            self.db.snapshot_update(
                context, snapshot_id,
                {'status': fields.SnapshotStatus.BACKING_UP})
        else:
            self.db.volume_update(context, volume_id, {
                'status': 'backing-up',
                'previous_status': previous_status
            })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Ejemplo n.º 42
0
 def accept_transfer(self, context, volume_id, new_user, new_project):
     # NOTE(jdg): need elevated context as we haven't "given" the vol
     # yet
     volume_ref = self.db.volume_get(context.elevated(), volume_id)
     self.driver.accept_transfer(context, volume_ref, new_user, new_project)
Ejemplo n.º 43
0
    def initialize_connection(self, context, volume_id, connector):
        """Prepare volume for connection from host represented by connector.

        This method calls the driver initialize_connection and returns
        it to the caller.  The connector parameter is a dictionary with
        information about the host that will connect to the volume in the
        following format::

            {
                'ip': ip,
                'initiator': initiator,
            }

        ip: the ip address of the connecting machine

        initiator: the iscsi initiator name of the connecting machine.
        This can be None if the connecting machine does not support iscsi
        connections.

        driver is responsible for doing any necessary security setup and
        returning a connection_info dictionary in the following format::

            {
                'driver_volume_type': driver_volume_type,
                'data': data,
            }

        driver_volume_type: a string to identify the type of volume.  This
                           can be used by the calling code to determine the
                           strategy for connecting to the volume. This could
                           be 'iscsi', 'rbd', 'sheepdog', etc.

        data: this is the data that the calling code will use to connect
              to the volume. Keep in mind that this will be serialized to
              json in various places, so it should not contain any non-json
              data types.
        """
        volume = self.db.volume_get(context, volume_id)
        self.driver.validate_connector(connector)
        conn_info = self.driver.initialize_connection(volume, connector)

        # Add qos_specs to connection info
        typeid = volume['volume_type_id']
        specs = {}
        if typeid:
            res = volume_types.get_volume_type_qos_specs(typeid)
            specs = res['qos_specs']

        # Don't pass qos_spec as empty dict
        qos_spec = dict(qos_spec=specs if specs else None)

        conn_info['data'].update(qos_spec)

        # Add access_mode to connection info
        volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
                                                            volume_id)
        if conn_info['data'].get('access_mode') is None:
            access_mode = volume_metadata.get('attached_mode')
            if access_mode is None:
                # NOTE(zhiyan): client didn't call 'os-attach' before
                access_mode = ('ro'
                               if volume_metadata.get('readonly') == 'True'
                               else 'rw')
            conn_info['data']['access_mode'] = access_mode
        return conn_info
Ejemplo n.º 44
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            volume_metadata = self.db.volume_admin_metadata_get(
                context.elevated(), volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid'] and volume['instance_uuid'] !=
                        instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host'] and volume['attached_host'] !=
                        host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
                if (volume_metadata.get('attached_mode') and
                        volume_metadata.get('attached_mode') != mode):
                    msg = _("being attached by different mode")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            self.db.volume_update(context, volume_id,
                                  {"instance_uuid": instance_uuid,
                                   "attached_host": host_name,
                                   "status": "attaching",
                                   "attach_time": timeutils.strtime()})
            self.db.volume_admin_metadata_update(context.elevated(),
                                                 volume_id,
                                                 {"attached_mode": mode},
                                                 False)

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(
                host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)

            if volume_metadata.get('readonly') == 'True' and mode != 'ro':
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidVolumeAttachMode(mode=mode,
                                                        volume_id=volume_id)
            try:
                self.driver.attach_volume(context,
                                          volume,
                                          instance_uuid,
                                          host_name_sanitized,
                                          mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id,
                                          {'status': 'error_attaching'})

            self.db.volume_attached(context.elevated(),
                                    volume_id,
                                    instance_uuid,
                                    host_name_sanitized,
                                    mountpoint)
Ejemplo n.º 45
0
    def create_volume(self,
                      context,
                      volume_id,
                      request_spec=None,
                      filter_properties=None,
                      allow_reschedule=True,
                      snapshot_id=None,
                      image_id=None,
                      source_volid=None):
        """Creates and exports the volume."""
        context_saved = context.deepcopy()
        context = context.elevated()
        if filter_properties is None:
            filter_properties = {}

        try:
            flow_engine = create_volume.get_manager_flow(
                context,
                self.db,
                self.driver,
                self.scheduler_rpcapi,
                self.host,
                volume_id,
                snapshot_id=snapshot_id,
                image_id=image_id,
                source_volid=source_volid,
                allow_reschedule=allow_reschedule,
                reschedule_context=context_saved,
                request_spec=request_spec,
                filter_properties=filter_properties)
        except Exception:
            LOG.exception(_("Failed to create manager volume flow"))
            raise exception.CinderException(
                _("Failed to create manager volume flow"))

        if snapshot_id is not None:
            # Make sure the snapshot is not deleted until we are done with it.
            locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
        elif source_volid is not None:
            # Make sure the volume is not deleted until we are done with it.
            locked_action = "%s-%s" % (source_volid, 'delete_volume')
        else:
            locked_action = None

        def _run_flow():
            # This code executes create volume flow. If something goes wrong,
            # flow reverts all job that was done and reraises an exception.
            # Otherwise, all data that was generated by flow becomes available
            # in flow engine's storage.
            flow_engine.run()

        @utils.synchronized(locked_action, external=True)
        def _run_flow_locked():
            _run_flow()

        if locked_action is None:
            _run_flow()
        else:
            _run_flow_locked()

        # Fetch created volume from storage
        volume_ref = flow_engine.storage.fetch('volume')
        return volume_ref['id']
Ejemplo n.º 46
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".') % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume['availability_zone'],
                                               volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id, {
            'status': 'backing-up',
            'previous_status': previous_status
        })
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': volume_host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Ejemplo n.º 47
0
    def _get_import_backup(self, context, backup_url):
        """Prepare database backup record for import.

        This method decodes provided backup_url and expects to find the id of
        the backup in there.

        Then checks the DB for the presence of this backup record and if it
        finds it and is not deleted it will raise an exception because the
        record cannot be created or used.

        If the record is in deleted status then we must be trying to recover
        this record, so we'll reuse it.

        If the record doesn't already exist we create it with provided id.

        :param context: running context
        :param backup_url: backup description to be used by the backup driver
        :return: BackupImport object
        :raises InvalidBackup:
        :raises InvalidInput:
        """
        # Deserialize string backup record into a dictionary
        backup_record = objects.Backup.decode_record(backup_url)

        # ID is a required field since it's what links incremental backups
        if 'id' not in backup_record:
            msg = _('Provided backup record is missing an id')
            raise exception.InvalidInput(reason=msg)

        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'volume_id': IMPORT_VOLUME_ID,
            'status': fields.BackupStatus.CREATING,
            'deleted_at': None,
            'deleted': False,
            'metadata': {}
        }

        try:
            # Try to get the backup with that ID in all projects even among
            # deleted entries.
            backup = objects.BackupImport.get_by_id(
                context.elevated(read_deleted='yes'),
                backup_record['id'],
                project_only=False)

            # If record exists and it's not deleted we cannot proceed with the
            # import
            if backup.status != fields.BackupStatus.DELETED:
                msg = _('Backup already exists in database.')
                raise exception.InvalidBackup(reason=msg)

            # Otherwise we'll "revive" delete backup record
            backup.update(kwargs)
            backup.save()

        except exception.BackupNotFound:
            # If record doesn't exist create it with the specific ID
            backup = objects.BackupImport(context=context,
                                          id=backup_record['id'],
                                          **kwargs)
            backup.create()

        return backup
Ejemplo n.º 48
0
    def _get_weighted_candidates_generic_group(
            self,
            context: context.RequestContext,
            group_spec: dict,
            request_spec_list: list[dict],
            group_filter_properties: Optional[dict] = None,
            filter_properties_list: Optional[list[dict]] = None) -> list:
        """Finds backends that supports the group.

        Returns a list of backends that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()

        backends_by_group_type = self._get_weighted_candidates_by_group_type(
            context, group_spec, group_filter_properties)

        weighed_backends = []
        backends_by_vol_type = []
        index = 0
        for request_spec in request_spec_list:
            volume_properties = request_spec['volume_properties']
            # Since Cinder is using mixed filters from Oslo and it's own, which
            # takes 'resource_XX' and 'volume_XX' as input respectively,
            # copying 'volume_XX' to 'resource_XX' will make both filters
            # happy.
            resource_properties = volume_properties.copy()
            volume_type = request_spec.get("volume_type", None)
            resource_type = request_spec.get("volume_type", None)
            request_spec.update({'resource_properties': resource_properties})

            config_options = self._get_configuration_options()

            filter_properties = {}
            if filter_properties_list:
                filter_properties = filter_properties_list[index]
                if filter_properties is None:
                    filter_properties = {}
            self._populate_retry(filter_properties, request_spec)

            # Add group_support in extra_specs if it is not there.
            # Make sure it is populated in filter_properties
            # if 'group_support' not in resource_type.get(
            #         'extra_specs', {}):
            #     resource_type['extra_specs'].update(
            #         group_support='<is> True')

            filter_properties.update({
                'context': context,
                'request_spec': request_spec,
                'config_options': config_options,
                'volume_type': volume_type,
                'resource_type': resource_type
            })

            self.populate_filter_properties(request_spec, filter_properties)

            # Find our local list of acceptable backends by filtering and
            # weighing our options. we virtually consume resources on
            # it so subsequent selections can adjust accordingly.

            # Note: remember, we are using an iterator here. So only
            # traverse this list once.
            all_backends = self.host_manager.get_all_backend_states(elevated)
            if not all_backends:
                return []

            # Filter local backends based on requirements ...
            backends = self.host_manager.get_filtered_backends(
                all_backends, filter_properties)

            if not backends:
                return []

            LOG.debug("Filtered %s", backends)

            # weighted_backend = WeightedHost() ... the best
            # backend for the job.
            temp_weighed_backends = self.host_manager.get_weighed_backends(
                backends, filter_properties)
            if not temp_weighed_backends:
                return []
            if index == 0:
                backends_by_vol_type = temp_weighed_backends
            else:
                backends_by_vol_type = self._find_valid_backends(
                    backends_by_vol_type, temp_weighed_backends)
                if not backends_by_vol_type:
                    return []

            index += 1

        # Find backends selected by both the group type and volume types.
        weighed_backends = self._find_valid_backends(backends_by_vol_type,
                                                     backends_by_group_type)

        return weighed_backends
Ejemplo n.º 49
0
    def delete_backup(self, context, backup_id):
        """Delete volume backup from configured backup service."""
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the backup status updated. Fail early since there
            # are no other status to change but backup's
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized as err:
            with excutils.save_and_reraise_exception():
                self.db.backup_update(context, backup_id, {
                    'status': 'error',
                    'fail_reason': six.text_type(err)
                })

        LOG.info(_LI('Delete backup started, backup: %s.'), backup_id)
        backup = self.db.backup_get(context, backup_id)
        self._notify_about_backup_usage(context, backup, "delete.start")
        self.db.backup_update(context, backup_id, {'host': self.host})

        expected_status = 'deleting'
        actual_status = backup['status']
        if actual_status != expected_status:
            err = _('Delete_backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') \
                % {'expected_status': expected_status,
                   'actual_status': actual_status}
            self.db.backup_update(context, backup_id, {
                'status': 'error',
                'fail_reason': err
            })
            raise exception.InvalidBackup(reason=err)

        backup_service = self._map_service_to_driver(backup['service'])
        if backup_service is not None:
            configured_service = self.driver_name
            if backup_service != configured_service:
                err = _('Delete backup aborted, the backup service currently'
                        ' configured [%(configured_service)s] is not the'
                        ' backup service that was used to create this'
                        ' backup [%(backup_service)s].')\
                    % {'configured_service': configured_service,
                       'backup_service': backup_service}
                self.db.backup_update(context, backup_id, {'status': 'error'})
                raise exception.InvalidBackup(reason=err)

            try:
                backup_service = self.service.get_backup_driver(context)
                backup_service.delete(backup)
            except Exception as err:
                with excutils.save_and_reraise_exception():
                    self.db.backup_update(context, backup_id, {
                        'status': 'error',
                        'fail_reason': six.text_type(err)
                    })

        # Get reservations
        try:
            reserve_opts = {
                'backups': -1,
                'backup_gigabytes': -backup['size'],
            }
            reservations = QUOTAS.reserve(context,
                                          project_id=backup['project_id'],
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_LE("Failed to update usages deleting backup"))

        context = context.elevated()
        self.db.backup_destroy(context, backup_id)

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context,
                          reservations,
                          project_id=backup['project_id'])

        LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup_id)
        self._notify_about_backup_usage(context, backup, "delete.end")
Ejemplo n.º 50
0
    def create_volume(self, context, volume_id, request_spec=None,
                      filter_properties=None, allow_reschedule=True,
                      snapshot_id=None, image_id=None, source_volid=None):
        """Creates and exports the volume."""
        context = context.elevated()
        if filter_properties is None:
            filter_properties = {}
        volume_ref = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume_ref, "create.start")

        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        status = 'available'
        model_update = False
        image_meta = None
        cloned = False

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(_("volume %(vol_name)s: creating lv of"
                        " size %(vol_size)sG") % locals())
            snapshot_ref = None
            sourcevol_ref = None
            image_service = None
            image_location = None
            image_meta = None

            if snapshot_id is not None:
                LOG.info(_("volume %s: creating from snapshot"),
                         volume_ref['name'])
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
            elif source_volid is not None:
                LOG.info(_("volume %s: creating from existing volume"),
                         volume_ref['name'])
                sourcevol_ref = self.db.volume_get(context, source_volid)
            elif image_id is not None:
                LOG.info(_("volume %s: creating from image"),
                         volume_ref['name'])
                # create the volume from an image
                image_service, image_id = \
                    glance.get_remote_image_service(context,
                                                    image_id)
                image_location = image_service.get_location(context, image_id)
                image_meta = image_service.show(context, image_id)
            else:
                LOG.info(_("volume %s: creating"), volume_ref['name'])

            try:
                model_update, cloned = self._create_volume(context,
                                                           volume_ref,
                                                           snapshot_ref,
                                                           sourcevol_ref,
                                                           image_service,
                                                           image_id,
                                                           image_location)
            except Exception:
                # restore source volume status before reschedule
                if sourcevol_ref is not None:
                    self.db.volume_update(context, sourcevol_ref['id'],
                                          {'status': sourcevol_ref['status']})
                exc_info = sys.exc_info()
                # try to re-schedule volume:
                self._reschedule_or_reraise(context, volume_id, exc_info,
                                            snapshot_id, image_id,
                                            request_spec, filter_properties,
                                            allow_reschedule)
                return

            if model_update:
                volume_ref = self.db.volume_update(
                    context, volume_ref['id'], model_update)
            if sourcevol_ref is not None:
                self.db.volume_glance_metadata_copy_from_volume_to_volume(
                    context,
                    source_volid,
                    volume_id)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'], {'status': 'error'})
                LOG.error(_("volume %s: create failed"), volume_ref['name'])

        if snapshot_id:
            # Copy any Glance metadata from the original volume
            self.db.volume_glance_metadata_copy_to_volume(context,
                                                          volume_ref['id'],
                                                          snapshot_id)

        if image_id and not cloned:
            if image_meta:
                # Copy all of the Glance image properties to the
                # volume_glance_metadata table for future reference.
                self.db.volume_glance_metadata_create(context,
                                                      volume_ref['id'],
                                                      'image_id', image_id)
                name = image_meta.get('name', None)
                if name:
                    self.db.volume_glance_metadata_create(context,
                                                          volume_ref['id'],
                                                          'image_name', name)
                image_properties = image_meta.get('properties', {})
                for key, value in image_properties.items():
                    self.db.volume_glance_metadata_create(context,
                                                          volume_ref['id'],
                                                          key, value)

        now = timeutils.utcnow()
        self.db.volume_update(context,
                              volume_ref['id'], {'status': status,
                                                 'launched_at': now})
        LOG.info(_("volume %s: created successfully"), volume_ref['name'])
        self._reset_stats()

        self._notify_about_volume_usage(context, volume_ref, "create.end")
        return volume_ref['id']
Ejemplo n.º 51
0
    def create_volume(self, context, volume_id, snapshot_id=None,
                      image_id=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume_ref, "create.start")
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context,
                              volume_id,
                              {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        status = 'available'
        model_update = False

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(_("volume %(vol_name)s: creating lv of"
                    " size %(vol_size)sG") % locals())
            if snapshot_id is None and image_id is None:
                model_update = self.driver.create_volume(volume_ref)
            elif snapshot_id is not None:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(
                    volume_ref,
                    snapshot_ref)
            else:
                # create the volume from an image
                image_service, image_id = \
                               glance.get_remote_image_service(context,
                                                               image_id)
                image_location = image_service.get_location(context, image_id)
                cloned = self.driver.clone_image(volume_ref, image_location)
                if not cloned:
                    model_update = self.driver.create_volume(volume_ref)
                    status = 'downloading'

            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'], {'status': 'error'})

        now = timeutils.utcnow()
        self.db.volume_update(context,
                              volume_ref['id'], {'status': status,
                                                 'launched_at': now})
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        self._reset_stats()

        if image_id and not cloned:
            #copy the image onto the volume.
            self._copy_image_to_volume(context, volume_ref, image_id)
        self._notify_about_volume_usage(context, volume_ref, "create.end")
        return volume_ref['id']
Ejemplo n.º 52
0
    def _get_weighted_candidates(
            self,
            context: context.RequestContext,
            request_spec: dict,
            filter_properties: Optional[dict] = None) -> list:
        """Return a list of backends that meet required specs.

        Returned list is ordered by their fitness.
        """
        elevated = context.elevated()

        # Since Cinder is using mixed filters from Oslo and it's own, which
        # takes 'resource_XX' and 'volume_XX' as input respectively, copying
        # 'volume_XX' to 'resource_XX' will make both filters happy.
        volume_type = request_spec.get("volume_type")
        # When creating snapshots, the value of volume_type is None here
        # which causes issues in filters (Eg: Bug #1856126).
        # To prevent that, we set it as an empty dictionary here.
        if volume_type is None:
            volume_type = {}
        resource_type = volume_type

        config_options = self._get_configuration_options()

        if filter_properties is None:
            filter_properties = {}
        self._populate_retry(filter_properties, request_spec)

        request_spec_dict = jsonutils.to_primitive(request_spec)

        filter_properties.update({
            'context': context,
            'request_spec': request_spec_dict,
            'config_options': config_options,
            'volume_type': volume_type,
            'resource_type': resource_type
        })

        self.populate_filter_properties(request_spec, filter_properties)

        # If multiattach is enabled on a volume, we need to add
        # multiattach to extra specs, so that the capability
        # filtering is enabled.
        multiattach = request_spec['volume_properties'].get(
            'multiattach', False)
        if multiattach and 'multiattach' not in resource_type.get(
                'extra_specs', {}):
            if 'extra_specs' not in resource_type:
                resource_type['extra_specs'] = {}

            resource_type['extra_specs'].update(multiattach='<is> True')

        # Revert volume consumed capacity if it's a rescheduled request
        retry = filter_properties.get('retry', {})
        if retry.get('backends', []):
            self.host_manager.revert_volume_consumed_capacity(
                retry['backends'][-1],
                request_spec['volume_properties']['size'])
        # Find our local list of acceptable backends by filtering and
        # weighing our options. we virtually consume resources on
        # it so subsequent selections can adjust accordingly.

        # Note: remember, we are using an iterator here. So only
        # traverse this list once.
        backends = self.host_manager.get_all_backend_states(elevated)

        # Filter local hosts based on requirements ...
        backends = self.host_manager.get_filtered_backends(
            backends, filter_properties)
        if not backends:
            return []

        LOG.debug("Filtered %s", backends)
        # weighted_backends = WeightedHost() ... the best
        # backend for the job.
        weighed_backends = self.host_manager.get_weighed_backends(
            backends, filter_properties)
        return weighed_backends