Example #1
0
    def _check_target_flavor(self, instance, flavor, block_device_info):
        new_root_gb = flavor.root_gb
        curr_root_gb = instance.root_gb

        ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
        curr_eph_gb = sum(eph.get('size', 0) for eph in ephemerals)
        new_eph_gb = instance.ephemeral_gb

        if new_root_gb < curr_root_gb:
            raise exception.InstanceFaultRollback(
                exception.CannotResizeDisk(
                    reason=_("Cannot resize the root disk to a smaller size. "
                             "Current size: %(curr_root_gb)s GB. Requested "
                             "size: %(new_root_gb)s GB.") % {
                                 'curr_root_gb': curr_root_gb,
                                 'new_root_gb': new_root_gb
                             }))

        if new_eph_gb < curr_eph_gb:
            raise exception.InstanceFaultRollback(
                exception.CannotResizeDisk(
                    reason=_("Cannot resize the ephemeral disk(s) to a smaller"
                             " size. Current total ephemeral size: "
                             "%(curr_eph_gb)s GB. Requested total size: "
                             "%(new_eph_gb)s GB") % {
                                 'curr_eph_gb': curr_eph_gb,
                                 'new_eph_gb': new_eph_gb
                             }))
Example #2
0
    def migrate_disk_and_power_off(self,
                                   context,
                                   instance,
                                   dest,
                                   flavor,
                                   network_info,
                                   block_device_info=None,
                                   timeout=0,
                                   retry_interval=0):
        """Transfers the disk of a running instance in multiple phases, turning
        off the instance before the end.
        """
        LOG.debug("`Migrate disk and power off` method called.",
                  instance=instance)
        # Power off the instance
        self._vbox_ops.power_off(instance, timeout, retry_interval)

        # Check if resize is posible
        if flavor['root_gb'] < instance['root_gb']:
            raise exception.InstanceFaultRollback(
                i18n._("Cannot resize the root disk to a smaller size. "
                       "Current size: %(current_size)s GB. Requested size: "
                       "%(new_size)s GB") % {
                           'current_size': instance['root_gb'],
                           'new_size': flavor['root_gb']
                       })

        # Migrate the disks
        disks = self._detach_storage(instance)
        if disks:
            self._migrate_disk_files(instance, disks, dest)
Example #3
0
    def instance_claim(self, context, instance_ref, limits=None):
        """Indicate that some resources are needed for an upcoming compute
        instance build operation.

        This should be called before the compute node is about to perform
        an instance build operation that will consume additional resources.

        :param context: security context
        :param instance_ref: instance to reserve resources for
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  It can
                  be used to revert the resource usage if an error occurs
                  during the instance build.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # set the 'host' and node fields and continue the build:
            self._set_instance_host_and_node(context, instance_ref)
            return claims.NopClaim()

        # sanity checks:
        if instance_ref['host']:
            LOG.warning(_("Host field should not be set on the instance until "
                          "resources have been claimed."),
                          instance=instance_ref)

        if instance_ref['node']:
            LOG.warning(_("Node field should not be set on the instance "
                          "until resources have been claimed."),
                          instance=instance_ref)

        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance_ref)
        LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
                  "MB", {'flavor': instance_ref['memory_mb'],
                          'overhead': overhead['memory_mb']})

        try:
            claim = h_claims.HuaweiClaim(context, instance_ref, self,
                                         self.compute_node,
                                         overhead=overhead, limits=limits)
        except Exception as e:
            # Update partial stats locally and populate them to Scheduler
            self._update(context.elevated(), self.compute_node, force=True)
            raise exception.InstanceFaultRollback(inner_exception=e)

        self._set_instance_host_and_node(context, instance_ref)
        instance_ref['numa_topology'] = claim.claimed_numa_topology

        # Mark resources in-use and update stats
        self._update_usage_from_instance(context, self.compute_node,
                                         instance_ref)

        elevated = context.elevated()
        # persist changes to the compute node:
        self._update(elevated, self.compute_node)

        return claim
Example #4
0
    def delete_vm(self,
                  context,
                  instance,
                  block_device_info=None,
                  destroy_disks=True):
        """Delete VM on FC

        :param context:
        :param instance:
        :param block_device_info:
        :param destroy_disks:
        :return:
        """

        # if revert resize, only stop vm. when resize operation
        # task state will be resize_reverting or resize_confirming
        if instance and (instance.get('task_state') == 'resize_reverting'
                         or instance.get('task_state') == 'resize_confirming'):
            LOG.info(_('revert resize now, here only stop vm.'))
            try:
                self.stop_vm(instance)
            except Exception as e:
                LOG.warn(_('stop vm failed, trigger rollback'))
                raise exception.InstanceFaultRollback(inner_exception=e)
            return

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
        except exception.InstanceNotFound:
            LOG.warn(_('instance exist no more. ignore this deleting.'))
            return

        # detach volume created by cinder
        if block_device_info:
            LOG.info(_('now will stop vm before detach cinder volumes.'))
            self.stop_vm(instance)
            for vol in block_device_info['block_device_mapping']:
                self.detach_volume(vol['connection_info'], instance)

        # if vm is in fault-resuming or unknown status, stop it before delete
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(
                _("vm %s status is fault-resuming or unknown, "
                  "stop it before delete."), fc_vm.uri)
            self.stop_vm(instance)

        self._delete_vm_with_fc_vm(fc_vm, destroy_disks)

        # update affinity group info if needed
        try:
            self._update_drs_rules(instance)
            self._update_affinity_groups(context, instance)
        #ignore pylint:disable=W0703
        except Exception as excp:
            utils.log_exception(excp)
            LOG.error(_('update affinity group info failed !'))
Example #5
0
    def _check_target_flavor(self, instance, flavor):
        new_root_gb = flavor.root_gb
        curr_root_gb = instance.flavor.root_gb

        if new_root_gb < curr_root_gb:
            raise exception.InstanceFaultRollback(
                exception.CannotResizeDisk(
                    reason=_("Cannot resize the root disk to a smaller size. "
                             "Current size: %(curr_root_gb)s GB. Requested "
                             "size: %(new_root_gb)s GB.") % {
                                 'curr_root_gb': curr_root_gb,
                                 'new_root_gb': new_root_gb}))
Example #6
0
    def _check_target_flavor(self, instance, flavor):
        new_root_gb = flavor['root_gb']
        curr_root_gb = instance.root_gb

        if new_root_gb < curr_root_gb:
            raise exception.InstanceFaultRollback(
                vmutils.VHDResizeException(
                    _("Cannot resize the root disk to a smaller size. "
                      "Current size: %(curr_root_gb)s GB. Requested size: "
                      "%(new_root_gb)s GB") %
                    {'curr_root_gb': curr_root_gb,
                     'new_root_gb': new_root_gb}))
Example #7
0
    def resize_claim(self, context, instance, instance_type,
                     image_meta=None, limits=None):
        """Indicate that resources are needed for a resize operation to this
        compute host.
        :param context: security context
        :param instance: instance object to reserve resources for
        :param instance_type: new instance_type being resized to
        :param limits: Dict of oversubscription limits for memory, disk,
        and CPUs
        :returns: A Claim ticket representing the reserved resources.  This
        should be turned into finalize  a resource claim or free
        resources after the compute operation is finished.
        """
        image_meta = image_meta or {}

        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # generate the migration record and continue the resize:
            migration = self._create_migration(context, instance,
                                               instance_type)
            return claims.NopClaim(migration=migration)

        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance_type)
        LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
                  "MB", {'flavor': instance_type['memory_mb'],
                          'overhead': overhead['memory_mb']})

        instance_ref = obj_base.obj_to_primitive(instance)
        try:
            claim = h_claims.HuaweiResizeClaim(context, instance_ref,
                                               instance_type, image_meta,
                                               self, self.compute_node,
                                               overhead=overhead,
                                               limits=limits)
        except Exception as e:
            # Update partial stats locally and populate them to Scheduler
            self._update(context.elevated(), self.compute_node, force=True)
            LOG.exception("Failed to claim when resize %s." % instance['uuid'])
            raise exception.InstanceFaultRollback(inner_exception=e)
        migration = self._create_migration(context, instance_ref,
                                           instance_type)
        # save pci_requests
        if claim.pci_requests:
            claim.pci_requests.save(context)
        claim.migration = migration
        system_metadata = instance.system_metadata
        if claim.claimed_numa_topology:
            system_metadata['new_numa_topo'] = jsonutils.dumps(
                claim.claimed_numa_topology)
        if claim.bind_info:
            system_metadata['new_bind_info'] = jsonutils.dumps(claim.bind_info)
        instance.system_metadata = system_metadata
        instance.save()
        # Mark the resources in-use for the resize landing on this
        # compute host:
        self._update_usage_from_migration(context, instance_ref, image_meta,
                                          self.compute_node, migration)
        elevated = context.elevated()
        self._update(elevated, self.compute_node)

        return claim