Exemple #1
0
    def test_unshelve_volume_backed(self, mock_notify, mock_image_meta):
        instance = self._create_fake_instance_obj()
        node = test_compute.NODENAME
        limits = {}
        filter_properties = {'limits': limits}
        instance.task_state = task_states.UNSHELVING
        instance.save()
        image_meta = {'properties': {'base_image_ref': uuids.image_id}}
        mock_image_meta.return_value = image_meta

        self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
        self.mox.StubOutWithMock(self.compute, '_prep_block_device')
        self.mox.StubOutWithMock(self.compute.driver, 'spawn')
        self.mox.StubOutWithMock(self.compute, '_get_power_state')
        self.mox.StubOutWithMock(self.rt, 'instance_claim')
        self.mox.StubOutWithMock(self.compute.network_api,
                                 'setup_instance_network_on_host')

        tracking = {'last_state': instance.task_state}

        def check_save(expected_task_state=None):
            if tracking['last_state'] == task_states.UNSHELVING:
                self.assertEqual(task_states.SPAWNING, instance.task_state)
                tracking['last_state'] = instance.task_state
            elif tracking['last_state'] == task_states.SPAWNING:
                self.assertEqual(123, instance.power_state)
                self.assertEqual(vm_states.ACTIVE, instance.vm_state)
                self.assertIsNone(instance.task_state)
                self.assertIsNone(instance.key_data)
                self.assertFalse(instance.auto_disk_config)
                self.assertIsNone(instance.task_state)
                tracking['last_state'] = instance.task_state
            else:
                self.fail('Unexpected save!')

        self.compute._notify_about_instance_usage(self.context, instance,
                'unshelve.start')

        self.compute._prep_block_device(self.context, instance,
                mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
        self.compute.network_api.setup_instance_network_on_host(
                self.context, instance, self.compute.host)
        self.rt.instance_claim(self.context, instance, node, limits).AndReturn(
                claims.Claim(self.context, instance, self.rt,
                             _fake_resources(),
                             objects.InstancePCIRequests(requests=[])))
        self.compute.driver.spawn(self.context, instance,
                mox.IsA(objects.ImageMeta),
                injected_files=[], admin_password=None,
                network_info=[],
                block_device_info='fake_bdm')
        self.compute._get_power_state(self.context, instance).AndReturn(123)
        self.compute._notify_about_instance_usage(self.context, instance,
                'unshelve.end')
        self.mox.ReplayAll()

        with mock.patch.object(instance, 'save') as mock_save:
            mock_save.side_effect = check_save
            self.compute.unshelve_instance(self.context, instance, image=None,
                    filter_properties=filter_properties, node=node)
Exemple #2
0
 def fake_claim(context, instance, node, allocations, limits):
     instance.host = self.compute.host
     instance.node = node
     requests = objects.InstancePCIRequests(requests=[])
     return claims.Claim(context, instance, node,
                         self.rt, _fake_resources(),
                         requests, limits=limits)
Exemple #3
0
 def _claim(self, limits=None, overhead=None, **kwargs):
     numa_topology = kwargs.pop('numa_topology', None)
     instance = self._fake_instance(**kwargs)
     if numa_topology:
         db_numa_topology = {
             'id': 1,
             'created_at': None,
             'updated_at': None,
             'deleted_at': None,
             'deleted': None,
             'instance_uuid': instance['uuid'],
             'numa_topology': numa_topology.to_json()
         }
     else:
         db_numa_topology = None
     if overhead is None:
         overhead = {'memory_mb': 0}
     with mock.patch.object(db,
                            'instance_extra_get_by_instance_uuid',
                            return_value=db_numa_topology):
         return claims.Claim('context',
                             instance,
                             self.tracker,
                             self.resources,
                             overhead=overhead,
                             limits=limits)
    def instance_claim(self, context, instance_ref, limits=None):
        """Indicate that some resources are needed for an upcoming compute
        instance build operation.

        This should be called before the compute node is about to perform
        an instance build operation that will consume additional resources.

        :param context: security context
        :param instance_ref: instance to reserve resources for
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  It can
                  be used to revert the resource usage if an error occurs
                  during the instance build.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # set the 'host' and node fields and continue the build:
            self._set_instance_host_and_node(context, instance_ref)
            return claims.NopClaim()

        # sanity checks:
        if instance_ref['host']:
            LOG.warning(_("Host field should not be set on the instance until "
                          "resources have been claimed."),
                        instance=instance_ref)

        if instance_ref['node']:
            LOG.warning(_("Node field should not be set on the instance "
                          "until resources have been claimed."),
                        instance=instance_ref)

        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance_ref)
        LOG.debug(
            "Memory overhead for %(flavor)d MB instance; %(overhead)d "
            "MB", {
                'flavor': instance_ref['memory_mb'],
                'overhead': overhead['memory_mb']
            })

        claim = claims.Claim(context,
                             instance_ref,
                             self,
                             self.compute_node,
                             overhead=overhead,
                             limits=limits)

        self._set_instance_host_and_node(context, instance_ref)
        instance_ref['numa_topology'] = claim.claimed_numa_topology

        # Mark resources in-use and update stats
        self._update_usage_from_instance(context, self.compute_node,
                                         instance_ref)

        elevated = context.elevated()
        # persist changes to the compute node:
        self._update(elevated, self.compute_node)

        return claim
Exemple #5
0
 def get_claim(mock_extra_get, mock_pci_get):
     return claims.Claim(self.context,
                         instance,
                         self.tracker,
                         self.resources,
                         overhead=overhead,
                         limits=limits)
Exemple #6
0
 def get_claim(mock_extra_get):
     return claims.Claim(self.context,
                         instance,
                         _NODENAME,
                         self.tracker,
                         self.resources,
                         requests,
                         limits=limits)
Exemple #7
0
    def instance_claim(self, context, instance_ref, limits=None):
        """Indicate that some resources are needed for an upcoming compute
        instance build operation.

        This should be called before the compute node is about to perform
        an instance build operation that will consume additional resources.

        :param context: security context
        :param instance_ref: instance to reserve resources for.
        :type instance_ref: nova.objects.instance.Instance object
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  It can
                  be used to revert the resource usage if an error occurs
                  during the instance build.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # set the 'host' and node fields and continue the build:
            self._set_instance_host_and_node(context, instance_ref)
            return claims.NopClaim()

        # sanity checks:
        if instance_ref.host:
            LOG.warning(_LW("Host field should not be set on the instance "
                            "until resources have been claimed."),
                        instance=instance_ref)

        if instance_ref.node:
            LOG.warning(_LW("Node field should not be set on the instance "
                            "until resources have been claimed."),
                        instance=instance_ref)

        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance_ref)
        LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
                  "MB", {'flavor': instance_ref.memory_mb,
                          'overhead': overhead['memory_mb']})

        claim = claims.Claim(context, instance_ref, self, self.compute_node,
                             overhead=overhead, limits=limits)

        # self._set_instance_host_and_node() will save instance_ref to the DB
        # so set instance_ref['numa_topology'] first.  We need to make sure
        # that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
        # so that the resource audit knows about any cpus we've pinned.
        instance_ref.numa_topology = claim.claimed_numa_topology
        self._set_instance_host_and_node(context, instance_ref)

        # Mark resources in-use and update stats
        self._update_usage_from_instance(context, instance_ref)

        elevated = context.elevated()
        # persist changes to the compute node:
        self._update(elevated)

        return claim
Exemple #8
0
 def _claim(self, limits=None, overhead=None, **kwargs):
     instance = self._fake_instance(**kwargs)
     if overhead is None:
         overhead = {'memory_mb': 0}
     return claims.Claim(instance,
                         self.tracker,
                         self.resources,
                         overhead=overhead,
                         limits=limits)
    def instance_claim(self, context, instance_ref, limits=None):
        """Indicate that some resources are needed for an upcoming compute
        instance build operation.

        This should be called before the compute node is about to perform
        an instance build operation that will consume additional resources.

        :param context: security context
        :param instance_ref: instance to reserve resources for
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  It can
                  be used to revert the resource usage if an error occurs
                  during the instance build.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # set the 'host' and node fields and continue the build:
            self._set_instance_host_and_node(context, instance_ref)
            return claims.NopClaim()

        # sanity checks:
        if instance_ref['host']:
            LOG.warning(_("Host field should not be set on the instance until "
                          "resources have been claimed."),
                        instance=instance_ref)

        if instance_ref['node']:
            LOG.warning(_("Node field should be not be set on the instance "
                          "until resources have been claimed."),
                        instance=instance_ref)

        claim = claims.Claim(instance_ref, self)

        if claim.test(self.compute_node, limits):

            self._set_instance_host_and_node(context, instance_ref)

            # Mark resources in-use and update stats
            self._update_usage_from_instance(self.compute_node, instance_ref)

            # persist changes to the compute node:
            self._update(context, self.compute_node)

            return claim

        else:
            raise exception.ComputeResourcesUnavailable()
Exemple #10
0
 def fake_claim(context, instance, limits):
     instance.host = self.compute.host
     return claims.Claim(context, instance, self.rt, _fake_resources())
Exemple #11
0
    def instance_claim(self, context, instance, nodename, limits=None):
        """Indicate that some resources are needed for an upcoming compute
        instance build operation.

        This should be called before the compute node is about to perform
        an instance build operation that will consume additional resources.

        :param context: security context
        :param instance: instance to reserve resources for.
        :type instance: nova.objects.instance.Instance object
        :param nodename: The Ironic nodename selected by the scheduler
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  It can
                  be used to revert the resource usage if an error occurs
                  during the instance build.
        """
        if self.disabled(nodename):
            # instance_claim() was called before update_available_resource()
            # (which ensures that a compute node exists for nodename). We
            # shouldn't get here but in case we do, just set the instance's
            # host and nodename attribute (probably incorrect) and return a
            # NoopClaim.
            # TODO(jaypipes): Remove all the disabled junk from the resource
            # tracker. Servicegroup API-level active-checking belongs in the
            # nova-compute manager.
            self._set_instance_host_and_node(instance, nodename)
            return claims.NopClaim()

        # sanity checks:
        if instance.host:
            LOG.warning(_LW("Host field should not be set on the instance "
                            "until resources have been claimed."),
                        instance=instance)

        if instance.node:
            LOG.warning(_LW("Node field should not be set on the instance "
                            "until resources have been claimed."),
                        instance=instance)

        # get the overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance)
        LOG.debug(
            "Memory overhead for %(flavor)d MB instance; %(overhead)d "
            "MB", {
                'flavor': instance.flavor.memory_mb,
                'overhead': overhead['memory_mb']
            })
        LOG.debug(
            "Disk overhead for %(flavor)d GB instance; %(overhead)d "
            "GB", {
                'flavor': instance.flavor.root_gb,
                'overhead': overhead.get('disk_gb', 0)
            })
        LOG.debug(
            "CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
            "vCPU(s)", {
                'flavor': instance.flavor.vcpus,
                'overhead': overhead.get('vcpus', 0)
            })

        cn = self.compute_nodes[nodename]
        pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
            context, instance.uuid)
        claim = claims.Claim(context,
                             instance,
                             nodename,
                             self,
                             cn,
                             pci_requests,
                             overhead=overhead,
                             limits=limits)

        # self._set_instance_host_and_node() will save instance to the DB
        # so set instance.numa_topology first.  We need to make sure
        # that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
        # so that the resource audit knows about any cpus we've pinned.
        instance_numa_topology = claim.claimed_numa_topology
        instance.numa_topology = instance_numa_topology
        self._set_instance_host_and_node(instance, nodename)

        if self.pci_tracker:
            # NOTE(jaypipes): ComputeNode.pci_device_pools is set below
            # in _update_usage_from_instance().
            self.pci_tracker.claim_instance(context, pci_requests,
                                            instance_numa_topology)

        # Mark resources in-use and update stats
        self._update_usage_from_instance(context, instance, nodename)

        elevated = context.elevated()
        # persist changes to the compute node:
        self._update(elevated, cn)

        return claim
Exemple #12
0
    def instance_claim(self,
                       context,
                       instance,
                       limits=None,
                       filter_properties=None):
        """Indicate that some resources are needed for an upcoming compute
        instance build operation.

        This should be called before the compute node is about to perform
        an instance build operation that will consume additional resources.

        :param context: security context
        :param instance: instance to reserve resources for.
        :type instance: nova.objects.instance.Instance object
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :param filter_properties: Dict of the scheduler hints for the instance.
        :returns: A Claim ticket representing the reserved resources.  It can
                  be used to revert the resource usage if an error occurs
                  during the instance build.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # set the 'host' and node fields and continue the build:
            self._set_instance_host_and_node(instance)
            return claims.NopClaim()

        # sanity checks:
        if instance.host:
            LOG.warning(_LW("Host field should not be set on the instance "
                            "until resources have been claimed."),
                        instance=instance)

        if instance.node:
            LOG.warning(_LW("Node field should not be set on the instance "
                            "until resources have been claimed."),
                        instance=instance)

        # get the overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance)
        LOG.debug(
            "Memory overhead for %(flavor)d MB instance; %(overhead)d "
            "MB", {
                'flavor': instance.flavor.memory_mb,
                'overhead': overhead['memory_mb']
            })
        LOG.debug(
            "Disk overhead for %(flavor)d GB instance; %(overhead)d "
            "GB", {
                'flavor': instance.flavor.root_gb,
                'overhead': overhead.get('disk_gb', 0)
            })

        pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
            context, instance.uuid)
        claim = claims.Claim(context,
                             instance,
                             self,
                             self.compute_node,
                             pci_requests,
                             overhead=overhead,
                             limits=limits,
                             filter_properties=filter_properties)

        # self._set_instance_host_and_node() will save instance to the DB
        # so set instance.numa_topology first.  We need to make sure
        # that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
        # so that the resource audit knows about any cpus we've pinned.
        instance_numa_topology = claim.claimed_numa_topology
        instance.numa_topology = instance_numa_topology
        self._set_instance_host_and_node(instance)

        if self.pci_tracker:
            # NOTE(jaypipes): ComputeNode.pci_device_pools is set below
            # in _update_usage_from_instance().
            self.pci_tracker.claim_instance(context, pci_requests,
                                            instance_numa_topology)

        # Mark resources in-use and update stats
        self._update_usage_from_instance(context, instance)

        elevated = context.elevated()
        # persist changes to the compute node:
        self._update(elevated)

        return claim
Exemple #13
0
 def _claim(self, **kwargs):
     instance = self._fake_instance(**kwargs)
     return claims.Claim(instance, self.tracker)
Exemple #14
0
 def fake_claim(context, instance, node, limits):
     instance.host = self.compute.host
     requests = objects.InstancePCIRequests(requests=[])
     return claims.Claim(context, instance, test_compute.NODENAME,
                         self.rt, _fake_resources(),
                         requests)
Exemple #15
0
    def test_unshelve_volume_backed(self):
        db_instance = jsonutils.to_primitive(self._create_fake_instance())
        host = 'fake-mini'
        node = test_compute.NODENAME
        limits = {}
        filter_properties = {'limits': limits}
        cur_time = timeutils.utcnow()
        cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
        timeutils.set_time_override(cur_time)
        self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
                                  None, True, None, False)
        instance = instance_obj.Instance.get_by_uuid(
            self.context,
            db_instance['uuid'],
            expected_attrs=['metadata', 'system_metadata'])
        instance.task_state = task_states.UNSHELVING
        instance.save()
        sys_meta = dict(instance.system_metadata)
        sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
        sys_meta['shelved_image_id'] = None
        sys_meta['shelved_host'] = host

        self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
        self.mox.StubOutWithMock(self.compute, '_prep_block_device')
        self.mox.StubOutWithMock(self.compute.driver, 'spawn')
        self.mox.StubOutWithMock(self.compute, '_get_power_state')
        self.mox.StubOutWithMock(self.rt, 'instance_claim')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')

        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'unshelve.start')
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'], {
                'task_state': task_states.SPAWNING
            },
            update_cells=False,
            columns_to_join=['metadata', 'system_metadata']).AndReturn(
                (db_instance, db_instance))
        self.compute._prep_block_device(self.context, instance,
                                        mox.IgnoreArg()).AndReturn('fake_bdm')
        db_instance['key_data'] = None
        db_instance['auto_disk_config'] = None
        self.rt.instance_claim(self.context, instance, limits).AndReturn(
            claims.Claim(db_instance, self.rt, _fake_resources()))
        self.compute.driver.spawn(self.context,
                                  instance,
                                  None,
                                  injected_files=[],
                                  admin_password=None,
                                  network_info=[],
                                  block_device_info='fake_bdm')
        self.compute._get_power_state(self.context, instance).AndReturn(123)
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'], {
                'power_state': 123,
                'vm_state': vm_states.ACTIVE,
                'task_state': None,
                'key_data': None,
                'auto_disk_config': False,
                'expected_task_state': task_states.SPAWNING,
                'launched_at': cur_time_tz
            },
            update_cells=False,
            columns_to_join=['metadata', 'system_metadata']).AndReturn(
                (db_instance, db_instance))
        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'unshelve.end')
        self.mox.ReplayAll()

        self.compute.unshelve_instance(self.context,
                                       instance,
                                       image=None,
                                       filter_properties=filter_properties,
                                       node=node)
Exemple #16
0
    def test_unshelve_volume_backed(self):
        instance = self._create_fake_instance_obj()
        db_instance = obj_base.obj_to_primitive(instance)
        node = test_compute.NODENAME
        limits = {}
        filter_properties = {'limits': limits}
        cur_time = timeutils.utcnow()
        cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
        timeutils.set_time_override(cur_time)
        instance.task_state = task_states.UNSHELVING
        instance.save()

        self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
        self.mox.StubOutWithMock(self.compute, '_prep_block_device')
        self.mox.StubOutWithMock(self.compute.driver, 'spawn')
        self.mox.StubOutWithMock(self.compute, '_get_power_state')
        self.mox.StubOutWithMock(self.rt, 'instance_claim')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(self.compute.network_api,
                                 'migrate_instance_finish')

        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'unshelve.start')
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'], {
                'task_state': task_states.SPAWNING
            },
            update_cells=False,
            columns_to_join=[
                'metadata', 'system_metadata', 'info_cache', 'security_groups'
            ]).AndReturn((db_instance, db_instance))
        self.compute._prep_block_device(
            self.context, instance, mox.IgnoreArg(),
            do_check_attach=False).AndReturn('fake_bdm')
        db_instance['key_data'] = None
        db_instance['auto_disk_config'] = None
        self.compute.network_api.migrate_instance_finish(
            self.context, instance, {
                'source_compute': '',
                'dest_compute': self.compute.host
            })
        self.rt.instance_claim(self.context, instance, limits).AndReturn(
            claims.Claim(self.context, db_instance, self.rt,
                         _fake_resources()))
        self.compute.driver.spawn(self.context,
                                  instance,
                                  None,
                                  injected_files=[],
                                  admin_password=None,
                                  network_info=[],
                                  block_device_info='fake_bdm',
                                  instance_type=None)
        self.compute._get_power_state(self.context, instance).AndReturn(123)
        db.instance_update_and_get_original(
            self.context,
            instance['uuid'], {
                'power_state': 123,
                'vm_state': vm_states.ACTIVE,
                'task_state': None,
                'key_data': None,
                'auto_disk_config': False,
                'expected_task_state': task_states.SPAWNING,
                'launched_at': cur_time_tz
            },
            update_cells=False,
            columns_to_join=[
                'metadata', 'system_metadata', 'info_cache', 'security_groups'
            ]).AndReturn((db_instance, db_instance))
        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'unshelve.end')
        self.mox.ReplayAll()

        self.compute.unshelve_instance(self.context,
                                       instance,
                                       image=None,
                                       filter_properties=filter_properties,
                                       node=node)
Exemple #17
0
 def _claim(self, overhead=None, **kwargs):
     instance = self._fake_instance(**kwargs)
     if overhead is None:
         overhead = {'memory_mb': 0}
     return claims.Claim(instance, self.tracker, overhead=overhead)