コード例 #1
0
ファイル: utils.py プロジェクト: grwl/nova
def notify_usage_exists(context, instance_ref, current_period=False,
                        ignore_missing_network_data=True,
                        system_metadata=None, extra_usage_info=None):
    """Generates 'exists' notification for an instance for usage auditing
    purposes.

    :param current_period: if True, this will generate a usage for the
        current usage period; if False, this will generate a usage for the
        previous audit period.

    :param ignore_missing_network_data: if True, log any exceptions generated
        while getting network info; if False, raise the exception.
    :param system_metadata: system_metadata DB entries for the instance,
        if not None.  *NOTE*: Currently unused here in trunk, but needed for
        potential custom modifications.
    :param extra_usage_info: Dictionary containing extra values to add or
        override in the notification if not None.
    """

    audit_start, audit_end = notifications.audit_period_bounds(current_period)

    bw = notifications.bandwidth_usage(instance_ref, audit_start,
            ignore_missing_network_data)

    if system_metadata is None:
        try:
            if instance_ref.get('deleted'):
                with utils.temporary_mutation(context, read_deleted='yes'):
                    system_metadata = db.instance_system_metadata_get(
                            context, instance_ref.uuid)
            else:
                system_metadata = db.instance_system_metadata_get(
                        context, instance_ref.uuid)
        except exception.NotFound:
            system_metadata = {}

    # add image metadata to the notification:
    image_meta = notifications.image_meta(system_metadata)

    extra_info = dict(audit_period_beginning=str(audit_start),
                      audit_period_ending=str(audit_end),
                      bandwidth=bw, image_meta=image_meta)

    if extra_usage_info:
        extra_info.update(extra_usage_info)

    notify_about_instance_usage(context, instance_ref, 'exists',
            system_metadata=system_metadata, extra_usage_info=extra_info)
コード例 #2
0
ファイル: test_manager.py プロジェクト: alanmeadows/cobalt
    def test_bless_instance_exception(self):
        self.vmsconn.set_return_val("bless", utils.TestInducedException())
        self.vmsconn.set_return_val("get_instance_info",
            {'state': power_state.RUNNING})
        self.vmsconn.set_return_val("unpause_instance", None)

        blessed_uuid = utils.create_pre_blessed_instance(self.context)

        blessed_instance = db.instance_get_by_uuid(self.context, blessed_uuid)
        self.assertTrue(blessed_instance['disable_terminate'])

        try:
            self.cobalt.bless_instance(self.context,
                                       instance_uuid=blessed_uuid,
                                       migration_url=None)
            self.fail("The bless error should have been re-raised up.")
        except utils.TestInducedException:
            pass

        blessed_instance = db.instance_get_by_uuid(self.context, blessed_uuid)
        self.assertEquals(vm_states.ERROR, blessed_instance['vm_state'])
        system_metadata = db.instance_system_metadata_get(self.context, blessed_uuid)
        self.assertEquals(None, system_metadata.get('images', None))
        self.assertEquals(None, system_metadata.get('blessed', None))
        self.assertEquals(None, blessed_instance['launched_at'])
        self.assertTrue(blessed_instance['disable_terminate'])
コード例 #3
0
    def test_instance_update_with_instance_uuid(self):
        """ test instance_update() works when an instance UUID is passed """
        ctxt = context.get_admin_context()

        # Create an instance with some metadata
        values = {
            'metadata': {
                'host': 'foo'
            },
            'system_metadata': {
                'original_image_ref': 'blah'
            }
        }
        instance = db.instance_create(ctxt, values)

        # Update the metadata
        values = {
            'metadata': {
                'host': 'bar'
            },
            'system_metadata': {
                'original_image_ref': 'baz'
            }
        }
        db.instance_update(ctxt, instance.uuid, values)

        # Retrieve the user-provided metadata to ensure it was successfully
        # updated
        instance_meta = db.instance_metadata_get(ctxt, instance.id)
        self.assertEqual('bar', instance_meta['host'])

        # Retrieve the system metadata to ensure it was successfully updated
        system_meta = db.instance_system_metadata_get(ctxt, instance.uuid)
        self.assertEqual('baz', system_meta['original_image_ref'])
コード例 #4
0
ファイル: test_api.py プロジェクト: peterfeiner/cobalt
    def test_launch_instance(self):

        instance_uuid = utils.create_instance(self.context)
        blessed_instance = self.cobalt_api.bless_instance(self.context, instance_uuid)
        blessed_instance_uuid = blessed_instance["uuid"]

        launched_instance = self.cobalt_api.launch_instance(self.context, blessed_instance_uuid)

        launched_instance_uuid = launched_instance["uuid"]
        metadata = db.instance_metadata_get(self.context, launched_instance["uuid"])
        self.assertTrue(
            metadata.has_key("launched_from"),
            "The instance should have a 'launched from' metadata after being launched.",
        )
        self.assertTrue(
            metadata["launched_from"] == "%s" % (blessed_instance_uuid),
            "The instance should have the 'launched from' metadata set to blessed instanced id after being launched. "
            + "(value=%s)" % (metadata["launched_from"]),
        )

        system_metadata = db.instance_system_metadata_get(self.context, launched_instance["uuid"])
        self.assertTrue(
            system_metadata.has_key("launched_from"),
            "The instance should have a 'launched from' system_metadata after being launched.",
        )
        self.assertTrue(
            system_metadata["launched_from"] == "%s" % (blessed_instance_uuid),
            "The instance should have the 'launched from' system_metadata set to blessed instanced id after being launched. "
            + "(value=%s)" % (system_metadata["launched_from"]),
        )
コード例 #5
0
    def test_bless_instance(self):
        instance_uuid = utils.create_instance(self.context)

        num_instance_before = len(db.instance_get_all(self.context))

        blessed_instance = self.cobalt_api.bless_instance(self.context, instance_uuid)

        self.assertEquals(vm_states.BUILDING, blessed_instance['vm_state'])
        # Ensure that we have a 2nd instance in the database that is a "clone"
        # of our original instance.
        instances = db.instance_get_all(self.context)
        self.assertTrue(len(instances) == (num_instance_before + 1),
                        "There should be one new instance after blessing.")

        # The virtual machine should be marked that it is now blessed.
        metadata = db.instance_metadata_get(self.context, blessed_instance['uuid'])
        self.assertTrue(metadata.has_key('blessed_from'),
                        "The instance should have a bless metadata after being blessed.")
        self.assertTrue(metadata['blessed_from'] == '%s' % instance_uuid,
            "The instance should have the blessed_from metadata set to true after being blessed. " \
          + "(value=%s)" % (metadata['blessed_from']))

        system_metadata = db.instance_system_metadata_get(self.context, blessed_instance['uuid'])
        self.assertTrue(system_metadata.has_key('blessed_from'),
            "The instance should have a bless system_metadata after being blessed.")
        self.assertTrue(system_metadata['blessed_from'] == '%s' % instance_uuid,
            "The instance should have the blessed_from system_metadata set to true after being blessed. "\
            + "(value=%s)" % (system_metadata['blessed_from']))

        db_blessed_instance = db.instance_get_by_uuid(self.context,
                                                      blessed_instance['uuid'])
        self.assertTrue(db_blessed_instance['info_cache'])
        self.assertIsNotNone(db_blessed_instance['info_cache']['network_info'])
コード例 #6
0
    def test_bless_instance_migrate(self):
        self.vmsconn.set_return_val(
            "bless",
            ("newname", "migration_url", ["file1", "file2", "file3"], []))
        self.vmsconn.set_return_val("post_bless",
                                    ["file1_ref", "file2_ref", "file3_ref"])
        self.vmsconn.set_return_val("bless_cleanup", None)
        self.vmsconn.set_return_val("get_instance_info",
                                    {'state': power_state.RUNNING})

        blessed_uuid = utils.create_instance(self.context)
        pre_bless_instance = db.instance_get_by_uuid(self.context,
                                                     blessed_uuid)
        migration_url, instance_ref = self.cobalt.bless_instance(
            self.context,
            instance_uuid=blessed_uuid,
            migration_url="mcdist://migrate_addr")
        post_bless_instance = db.instance_get_by_uuid(self.context,
                                                      blessed_uuid)

        self.assertEquals(pre_bless_instance['vm_state'],
                          post_bless_instance['vm_state'])
        self.assertEquals("migration_url", migration_url)
        system_metadata = db.instance_system_metadata_get(
            self.context, blessed_uuid)
        self.assertEquals("file1_ref,file2_ref,file3_ref",
                          system_metadata['images'])
        self.assertEquals(pre_bless_instance['launched_at'],
                          post_bless_instance['launched_at'])
        self.assertFalse(pre_bless_instance.get('disable_terminate', None),
                         post_bless_instance.get('disable_terminate', None))
コード例 #7
0
    def test_bless_instance_exception(self):
        self.vmsconn.set_return_val("bless", utils.TestInducedException())
        self.vmsconn.set_return_val("get_instance_info",
                                    {'state': power_state.RUNNING})
        self.vmsconn.set_return_val("unpause_instance", None)

        blessed_uuid = utils.create_pre_blessed_instance(self.context)

        blessed_instance = db.instance_get_by_uuid(self.context, blessed_uuid)
        self.assertTrue(blessed_instance['disable_terminate'])

        try:
            self.cobalt.bless_instance(self.context,
                                       instance_uuid=blessed_uuid,
                                       migration_url=None)
            self.fail("The bless error should have been re-raised up.")
        except utils.TestInducedException:
            pass

        blessed_instance = db.instance_get_by_uuid(self.context, blessed_uuid)
        self.assertEquals(vm_states.ERROR, blessed_instance['vm_state'])
        system_metadata = db.instance_system_metadata_get(
            self.context, blessed_uuid)
        self.assertEquals(None, system_metadata.get('images', None))
        self.assertEquals(None, system_metadata.get('blessed', None))
        self.assertEquals(None, blessed_instance['launched_at'])
        self.assertTrue(blessed_instance['disable_terminate'])
コード例 #8
0
ファイル: test_manager.py プロジェクト: alanmeadows/cobalt
    def test_bless_instance(self):

        self.vmsconn.set_return_val("bless",
                                    ("newname", "migration_url", ["file1", "file2", "file3"],[]))
        self.vmsconn.set_return_val("post_bless", ["file1_ref", "file2_ref", "file3_ref"])
        self.vmsconn.set_return_val("bless_cleanup", None)
        self.vmsconn.set_return_val("get_instance_info",
                                    {'state': power_state.RUNNING})

        pre_bless_time = datetime.utcnow()
        blessed_uuid = utils.create_pre_blessed_instance(self.context)
        migration_url, instance_ref = self.cobalt.bless_instance(
                                                    self.context,
                                                    instance_uuid=blessed_uuid,
                                                    migration_url=None)

        blessed_instance = db.instance_get_by_uuid(self.context, blessed_uuid)
        self.assertEquals("blessed", blessed_instance['vm_state'])
        self.assertEquals("migration_url", migration_url)
        system_metadata = db.instance_system_metadata_get(self.context, blessed_uuid)
        self.assertEquals("file1_ref,file2_ref,file3_ref", system_metadata['images'])

        self.assertTrue(pre_bless_time <= blessed_instance['launched_at'])

        self.assertTrue(blessed_instance['disable_terminate'])
コード例 #9
0
ファイル: resource_tracker.py プロジェクト: xww/nova-old
def get_instance_network_qos(instance_type, instance_uuid):
    qos_json_file = FLAGS.network_qos_config
    qos_info = None
    private_qos = 0
    public_qos = 0

    # Firstly, get default qos configure from qos_json_file.
    if os.path.exists(qos_json_file):
        with open(qos_json_file, 'r') as f:
            qos_info = jsonutils.load(f)

    if qos_info:
        qos_key = qos_info['policy']['key']
        qos_inst = qos_info['shaping']

        inst_type = instance_type

        if qos_key == 'ecu':
            key = (inst_type["vcpus"] *
                        int(inst_type["extra_specs"]["ecus_per_vcpu:"]))
        else:
            key = inst_type["vcpus"]

        key = str(key)

        if qos_inst['private'].get(key):
            private_qos = qos_inst['private'][key]['rate']
        else:
            private_qos = qos_inst['private']['default']['rate']

        if qos_inst['public'].get(key):
            public_qos = qos_inst['public'][key]['rate']
        else:
            public_qos = qos_inst['public']['default']['rate']

    # If instance_uuid is None, just take the default qos configure.
    if instance_uuid is None:
        return dict(private_qos=private_qos, public_qos=public_qos)

    # If qos is set manully, use the manul configure instead.
    system_meta = db.instance_system_metadata_get(context.get_admin_context(),
                                                  instance_uuid)
    network_qos = system_meta.get('network-qos')
    if network_qos:
        network_qos_info = jsonutils.loads(network_qos)
        for info in network_qos_info:
            if info['type'] == 'private':
                private_qos = int(info['rate'])
            else:
                public_qos = int(info['rate'])

    return dict(private_qos=private_qos, public_qos=public_qos)
コード例 #10
0
ファイル: notifications.py プロジェクト: Nesrine85/nova
def _send_instance_update_notification(context,
                                       instance,
                                       old_vm_state,
                                       old_task_state,
                                       new_vm_state,
                                       new_task_state,
                                       service=None,
                                       host=None):
    """Send 'compute.instance.exists' notification to inform observers
    about instance state changes"""

    payload = usage_from_instance(context, instance, None, None)

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    try:
        system_metadata = db.instance_system_metadata_get(
            context, instance.uuid)
    except exception.NotFound:
        system_metadata = {}

    # add image metadata
    image_meta_props = image_meta(system_metadata)
    payload["image_meta"] = image_meta_props

    # if the service name (e.g. api/scheduler/compute) is not provided, default
    # to "compute"
    if not service:
        service = "compute"

    publisher_id = notifier_api.publisher_id(service, host)

    notifier_api.notify(context, publisher_id, 'compute.instance.update',
                        notifier_api.INFO, payload)
コード例 #11
0
ファイル: utils.py プロジェクト: linets/nova
def notify_usage_exists(context,
                        instance_ref,
                        current_period=False,
                        ignore_missing_network_data=True,
                        system_metadata=None,
                        extra_usage_info=None):
    """Generates 'exists' notification for an instance for usage auditing
    purposes.

    :param current_period: if True, this will generate a usage for the
        current usage period; if False, this will generate a usage for the
        previous audit period.

    :param ignore_missing_network_data: if True, log any exceptions generated
        while getting network info; if False, raise the exception.
    :param system_metadata: system_metadata DB entries for the instance,
        if not None.  *NOTE*: Currently unused here in trunk, but needed for
        potential custom modifications.
    :param extra_usage_info: Dictionary containing extra values to add or
        override in the notification if not None.
    """

    audit_start, audit_end = notifications.audit_period_bounds(current_period)

    bw = notifications.bandwidth_usage(instance_ref, audit_start,
                                       ignore_missing_network_data)

    if system_metadata is None:
        try:
            system_metadata = db.instance_system_metadata_get(
                context, instance_ref['uuid'])
        except exception.NotFound:
            system_metadata = {}

    # add image metadata to the notification:
    image_meta = notifications.image_meta(system_metadata)

    extra_info = dict(audit_period_beginning=str(audit_start),
                      audit_period_ending=str(audit_end),
                      bandwidth=bw,
                      image_meta=image_meta)

    if extra_usage_info:
        extra_info.update(extra_usage_info)

    notify_about_instance_usage(context,
                                instance_ref,
                                'exists',
                                system_metadata=system_metadata,
                                extra_usage_info=extra_info)
コード例 #12
0
ファイル: test_manager.py プロジェクト: alanmeadows/cobalt
    def test_launch_instance_images(self):
        self.vmsconn.set_return_val("launch", None)
        blessed_uuid = utils.create_blessed_instance(self.context,
            instance={'system_metadata':{'images':'image1'}})

        instance = db.instance_get_by_uuid(self.context, blessed_uuid)
        system_metadata = db.instance_system_metadata_get(self.context, instance['uuid'])
        self.assertEquals('image1', system_metadata.get('images', ''))

        launched_uuid = utils.create_pre_launched_instance(self.context, source_uuid=blessed_uuid)

        self.cobalt.launch_instance(self.context, instance_uuid=launched_uuid)

        # Ensure that image1 was passed to vmsconn.launch
        self.assertEquals(['image1'], self.vmsconn.params_passed[0]['kwargs']['image_refs'])
コード例 #13
0
ファイル: resource_tracker.py プロジェクト: stuartbyma/nova
    def update_available_resource(self, context):
        """Override in-memory calculations of compute node resource usage based
        on data audited from the hypervisor layer.

        Add in resource claims in progress to account for operations that have
        declared a need for resources, but not necessarily retrieved them from
        the hypervisor layer yet.
        """
        if self.nodename is None:
            resources = self.driver.get_available_resource()
        else:
            resources = self.driver.get_available_node_resource(self.nodename)
        if not resources:
            # The virt driver does not support this function
            method = 'get_available_resource'
            if self.nodename is not None:
                method = 'get_available_node_resource'
            LOG.audit(
                _("Virt driver does not support "
                  "'%s'  Compute tracking is disabled.") % method)
            self.compute_node = None
            self.claims = {}
            return

        self._verify_resources(resources)

        self._report_hypervisor_resource_view(resources)

        self._purge_expired_claims()

        # Grab all instances assigned to this host:
        instances = db.instance_get_all_by_host(context, self.host)
        if self.nodename is not None:
            # Collect instances belong to the node
            node_instances = []
            for instance in instances:
                smd = db.instance_system_metadata_get(context,
                                                      instance['uuid'])
                if smd.get('node') == self.nodename:
                    node_instances.append(instance)
            instances = node_instances

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(resources, instances)
        self._report_final_resource_view(resources)

        self._sync_compute_node(context, resources)
コード例 #14
0
ファイル: resource_tracker.py プロジェクト: hesamrahimi/nova
    def update_available_resource(self, context):
        """Override in-memory calculations of compute node resource usage based
        on data audited from the hypervisor layer.

        Add in resource claims in progress to account for operations that have
        declared a need for resources, but not necessarily retrieved them from
        the hypervisor layer yet.
        """
        if self.nodename is None:
            resources = self.driver.get_available_resource()
        else:
            resources = self.driver.get_available_node_resource(self.nodename)
        if not resources:
            # The virt driver does not support this function
            method = 'get_available_resource'
            if self.nodename is not None:
                method = 'get_available_node_resource'
            LOG.audit(_("Virt driver does not support "
                "'%s'  Compute tracking is disabled.") % method)
            self.compute_node = None
            self.claims = {}
            return

        self._verify_resources(resources)

        self._report_hypervisor_resource_view(resources)

        self._purge_expired_claims()

        # Grab all instances assigned to this host:
        instances = db.instance_get_all_by_host(context, self.host)
        if self.nodename is not None:
            # Collect instances belong to the node
            node_instances = []
            for instance in instances:
                smd = db.instance_system_metadata_get(context,
                                                      instance['uuid'])
                if smd.get('node') == self.nodename:
                    node_instances.append(instance)
            instances = node_instances

        # Now calculate usage based on instance utilization:
        self._update_usage_from_instances(resources, instances)
        self._report_final_resource_view(resources)

        self._sync_compute_node(context, resources)
コード例 #15
0
def get_network_qos(request, instance):
    ctxt = context.get_admin_context()
    sys_metadata = db.instance_system_metadata_get(ctxt, instance['uuid'])

    qos_pub = {}
    qos_pvt = {}
    qos_info = sys_metadata.get('network-qos', None)
    if qos_info:
        qos_info = json.loads(qos_info)
        for i in qos_info:
            if i['type'] == 'public':
                i.pop('type')
                qos_pub = i
            else:
                i.pop('type')
                qos_pvt = i

    return (qos_pub, qos_pvt)
コード例 #16
0
ファイル: notifications.py プロジェクト: AartiKriplani/nova
def _send_instance_update_notification(context, instance, old_vm_state,
        old_task_state, new_vm_state, new_task_state, service=None, host=None):
    """Send 'compute.instance.exists' notification to inform observers
    about instance state changes"""

    payload = usage_from_instance(context, instance, None, None)

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    try:
        system_metadata = db.instance_system_metadata_get(
                context, instance.uuid)
    except exception.NotFound:
        system_metadata = {}

    # add image metadata
    image_meta_props = image_meta(system_metadata)
    payload["image_meta"] = image_meta_props

    # if the service name (e.g. api/scheduler/compute) is not provided, default
    # to "compute"
    if not service:
        service = "compute"

    publisher_id = notifier_api.publisher_id(service, host)

    notifier_api.notify(context, publisher_id, 'compute.instance.update',
            notifier_api.INFO, payload)
コード例 #17
0
ファイル: test_manager.py プロジェクト: peterfeiner/cobalt
    def test_bless_instance_migrate(self):
        self.vmsconn.set_return_val("bless",
                                    ("newname", "migration_url", ["file1", "file2", "file3"], []))
        self.vmsconn.set_return_val("post_bless", ["file1_ref", "file2_ref", "file3_ref"])
        self.vmsconn.set_return_val("bless_cleanup", None)

        blessed_uuid = utils.create_instance(self.context)
        pre_bless_instance = db.instance_get_by_uuid(self.context, blessed_uuid)
        migration_url = self.cobalt.bless_instance(self.context, instance_uuid=blessed_uuid,
                                                        migration_url="mcdist://migrate_addr")
        post_bless_instance = db.instance_get_by_uuid(self.context, blessed_uuid)

        self.assertEquals(pre_bless_instance['vm_state'], post_bless_instance['vm_state'])
        self.assertEquals("migration_url", migration_url)
        system_metadata = db.instance_system_metadata_get(self.context, blessed_uuid)
        self.assertEquals("file1_ref,file2_ref,file3_ref", system_metadata['images'])
        self.assertEquals(pre_bless_instance['launched_at'], post_bless_instance['launched_at'])
        self.assertFalse(pre_bless_instance.get('disable_terminate', None),
                         post_bless_instance.get('disable_terminate', None))
コード例 #18
0
ファイル: servers.py プロジェクト: xww/nova-old
def get_network_qos(request, instance):
    ctxt = context.get_admin_context()
    sys_metadata = db.instance_system_metadata_get(ctxt,
                                                   instance['uuid'])

    qos_pub = {}
    qos_pvt = {}
    qos_info = sys_metadata.get('network-qos', None)
    if qos_info:
        qos_info = json.loads(qos_info)
        for i in qos_info:
            if i['type'] == 'public':
                i.pop('type')
                qos_pub = i
            else:
                i.pop('type')
                qos_pvt = i

    return (qos_pub, qos_pvt)
コード例 #19
0
    def test_launch_instance_images(self):
        self.vmsconn.set_return_val("launch", None)
        blessed_uuid = utils.create_blessed_instance(
            self.context, instance={'system_metadata': {
                'images': 'image1'
            }})

        instance = db.instance_get_by_uuid(self.context, blessed_uuid)
        system_metadata = db.instance_system_metadata_get(
            self.context, instance['uuid'])
        self.assertEquals('image1', system_metadata.get('images', ''))

        launched_uuid = utils.create_pre_launched_instance(
            self.context, source_uuid=blessed_uuid)

        self.cobalt.launch_instance(self.context, instance_uuid=launched_uuid)

        # Ensure that image1 was passed to vmsconn.launch
        self.assertEquals(
            ['image1'], self.vmsconn.params_passed[0]['kwargs']['image_refs'])
コード例 #20
0
ファイル: test_db_api.py プロジェクト: edwardt/nova
    def test_instance_update_with_instance_uuid(self):
        """ test instance_update() works when an instance UUID is passed """
        ctxt = context.get_admin_context()

        # Create an instance with some metadata
        values = {"metadata": {"host": "foo"}, "system_metadata": {"original_image_ref": "blah"}}
        instance = db.instance_create(ctxt, values)

        # Update the metadata
        values = {"metadata": {"host": "bar"}, "system_metadata": {"original_image_ref": "baz"}}
        db.instance_update(ctxt, instance.uuid, values)

        # Retrieve the user-provided metadata to ensure it was successfully
        # updated
        instance_meta = db.instance_metadata_get(ctxt, instance.uuid)
        self.assertEqual("bar", instance_meta["host"])

        # Retrieve the system metadata to ensure it was successfully updated
        system_meta = db.instance_system_metadata_get(ctxt, instance.uuid)
        self.assertEqual("baz", system_meta["original_image_ref"])
コード例 #21
0
ファイル: notifications.py プロジェクト: AsylumCorp/nova
def _send_instance_update_notification(context, instance, old_vm_state,
        old_task_state, new_vm_state, new_task_state, host=None):
    """Send 'compute.instance.exists' notification to inform observers
    about instance state changes"""

    payload = usage_from_instance(context, instance, None, None)

    states_payload = {
        "old_state": old_vm_state,
        "state": new_vm_state,
        "old_task_state": old_task_state,
        "new_task_state": new_task_state,
    }

    payload.update(states_payload)

    # add audit fields:
    (audit_start, audit_end) = audit_period_bounds(current_period=True)
    payload["audit_period_beginning"] = audit_start
    payload["audit_period_ending"] = audit_end

    # add bw usage info:
    bw = bandwidth_usage(instance, audit_start)
    payload["bandwidth"] = bw

    try:
        system_metadata = db.instance_system_metadata_get(
                context, instance.uuid)
    except exception.NotFound:
        system_metadata = {}

    # add image metadata
    image_meta_props = image_meta(system_metadata)
    payload["image_meta"] = image_meta_props

    if not host:
        host = FLAGS.host

    notifier_api.notify(context, host, 'compute.instance.update',
            notifier_api.INFO, payload)
コード例 #22
0
ファイル: test_db_api.py プロジェクト: matiu2/nova
    def test_instance_update_with_instance_uuid(self):
        """ test instance_update() works when an instance UUID is passed """
        ctxt = context.get_admin_context()

        # Create an instance with some metadata
        values = {'metadata': {'host': 'foo'},
                  'system_metadata': {'original_image_ref': 'blah'}}
        instance = db.instance_create(ctxt, values)

        # Update the metadata
        values = {'metadata': {'host': 'bar'},
                  'system_metadata': {'original_image_ref': 'baz'}}
        db.instance_update(ctxt, instance['uuid'], values)

        # Retrieve the user-provided metadata to ensure it was successfully
        # updated
        instance_meta = db.instance_metadata_get(ctxt, instance.uuid)
        self.assertEqual('bar', instance_meta['host'])

        # Retrieve the system metadata to ensure it was successfully updated
        system_meta = db.instance_system_metadata_get(ctxt, instance.uuid)
        self.assertEqual('baz', system_meta['original_image_ref'])
コード例 #23
0
    def test_launch_instance(self):

        instance_uuid = utils.create_instance(self.context)
        blessed_instance = self.cobalt_api.bless_instance(self.context, instance_uuid)
        blessed_instance_uuid = blessed_instance['uuid']

        launched_instance = self.cobalt_api.launch_instance(self.context, blessed_instance_uuid)

        launched_instance_uuid = launched_instance['uuid']
        metadata = db.instance_metadata_get(self.context, launched_instance['uuid'])
        self.assertTrue(metadata.has_key('launched_from'),
                        "The instance should have a 'launched from' metadata after being launched.")
        self.assertTrue(metadata['launched_from'] == '%s' % (blessed_instance_uuid),
            "The instance should have the 'launched from' metadata set to blessed instanced id after being launched. " \
          + "(value=%s)" % (metadata['launched_from']))

        system_metadata = db.instance_system_metadata_get(self.context, launched_instance['uuid'])
        self.assertTrue(system_metadata.has_key('launched_from'),
            "The instance should have a 'launched from' system_metadata after being launched.")
        self.assertTrue(system_metadata['launched_from'] == '%s' % (blessed_instance_uuid),
            "The instance should have the 'launched from' system_metadata set to blessed instanced id after being launched. "\
            + "(value=%s)" % (system_metadata['launched_from']))
コード例 #24
0
    def _provision_resource(self, context, weighted_host, request_spec,
            filter_properties, requested_networks, injected_files,
            admin_password, is_first_time, instance_uuid=None):
        """Create the requested resource in this Zone."""
        # Add a retry entry for the selected compute host:
        self._add_retry_host(filter_properties, weighted_host.host_state.host)

        self._add_oversubscription_policy(filter_properties,
                weighted_host.host_state)

        payload = dict(request_spec=request_spec,
                       weighted_host=weighted_host.to_dict(),
                       instance_id=instance_uuid)
        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.run_instance.scheduled', notifier.INFO,
                        payload)

        if weighted_host.host_state.nodename is not None:
            smd_dic = db.instance_system_metadata_get(context, instance_uuid)
            smd_dic['node'] = weighted_host.host_state.nodename
        else:
            # update is not needed
            smd_dic = None

        updated_instance = driver.instance_update_db(context,
                instance_uuid, weighted_host.host_state.host,
                system_metadata=smd_dic)
        # Ensure system_metadata is loaded and included in rpc payload
        updated_instance.get('system_metadata')

        self.compute_rpcapi.run_instance(context, instance=updated_instance,
                host=weighted_host.host_state.host,
                request_spec=request_spec, filter_properties=filter_properties,
                requested_networks=requested_networks,
                injected_files=injected_files,
                admin_password=admin_password, is_first_time=is_first_time)
コード例 #25
0
ファイル: utils.py プロジェクト: bn-emailops/nova
def notify_usage_exists(context, instance_ref, current_period=False,
                        ignore_missing_network_data=True,
                        system_metadata=None, extra_usage_info=None):
    """Generates 'exists' notification for an instance for usage auditing
    purposes.

    :param current_period: if True, this will generate a usage for the
        current usage period; if False, this will generate a usage for the
        previous audit period.

    :param ignore_missing_network_data: if True, log any exceptions generated
        while getting network info; if False, raise the exception.
    :param system_metadata: system_metadata DB entries for the instance,
        if not None.  *NOTE*: Currently unused here in trunk, but needed for
        potential custom modifications.
    :param extra_usage_info: Dictionary containing extra values to add or
        override in the notification if not None.
    """

    admin_context = nova.context.get_admin_context(read_deleted='yes')
    begin, end = utils.last_completed_audit_period()
    bw = {}
    if current_period:
        audit_start = end
        audit_end = utils.utcnow()
    else:
        audit_start = begin
        audit_end = end

    if (instance_ref.get('info_cache') and
        instance_ref['info_cache'].get('network_info')):

        cached_info = instance_ref['info_cache']['network_info']
        nw_info = network_model.NetworkInfo.hydrate(cached_info)
    else:
        try:
            nw_info = network.API().get_instance_nw_info(admin_context,
                                                         instance_ref)
        except Exception:
            LOG.exception('Failed to get nw_info', instance=instance_ref)
            if ignore_missing_network_data:
                return
            raise

    macs = [vif['address'] for vif in nw_info]
    uuids = [instance_ref.uuid]

    bw_usages = db.bw_usage_get_by_uuids(admin_context, uuids, audit_start)
    bw_usages = [b for b in bw_usages if b.mac in macs]

    for b in bw_usages:
        label = 'net-name-not-found-%s' % b['mac']
        for vif in nw_info:
            if vif['address'] == b['mac']:
                label = vif['network']['label']
                break

        bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)

    if system_metadata is None:
        try:
            system_metadata = db.instance_system_metadata_get(
                    context, instance_ref.uuid)
        except exception.NotFound:
            system_metadata = {}

    # add image metadata to the notification:
    image_meta = {}
    for md_key, md_value in system_metadata.iteritems():
        if md_key.startswith('image_'):
            image_meta[md_key[6:]] = md_value

    extra_info = dict(audit_period_beginning=str(audit_start),
                      audit_period_ending=str(audit_end),
                      bandwidth=bw, image_meta=image_meta)

    if extra_usage_info:
        extra_info.update(extra_usage_info)

    notify_about_instance_usage(context, instance_ref, 'exists',
            system_metadata=system_metadata, extra_usage_info=extra_info)
コード例 #26
0
def info_from_instance(context, instance_ref, network_info,
                system_metadata, **kw):
    """Get detailed instance information for an instance which is common to all
    notifications.

    :param network_info: network_info provided if not None
    :param system_metadata: system_metadata DB entries for the instance,
    if not None.  *NOTE*: Currently unused here in trunk, but needed for
    potential custom modifications.
    """

    def null_safe_str(s):
        return str(s) if s else ''

    image_ref_url = utils.generate_image_url(instance_ref['image_ref'])

    instance_type_name = instance_ref.get('instance_type', {}).get('name', '')

    if system_metadata is None:
        try:
            system_metadata = db.instance_system_metadata_get(
                    context, instance_ref['uuid'])

        except exception.NotFound:
            system_metadata = {}

    instance_info = dict(
        # Owner properties
        tenant_id=instance_ref['project_id'],
        user_id=instance_ref['user_id'],

        # Identity properties
        instance_id=instance_ref['uuid'],
        display_name=instance_ref['display_name'],
        reservation_id=instance_ref['reservation_id'],

        # Type properties
        instance_type=instance_type_name,
        instance_type_id=instance_ref['instance_type_id'],
        architecture=instance_ref['architecture'],

        # Capacity properties
        memory_mb=instance_ref['memory_mb'],
        disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'],
        vcpus=instance_ref['vcpus'],
        # Note(dhellmann): This makes the disk_gb value redundant, but
        # we are keeping it for backwards-compatibility with existing
        # users of notifications.
        root_gb=instance_ref['root_gb'],
        ephemeral_gb=instance_ref['ephemeral_gb'],

        # Location properties
        host=instance_ref['host'],
        availability_zone=instance_ref['availability_zone'],

        # Date properties
        created_at=str(instance_ref['created_at']),
        # Nova's deleted vs terminated instance terminology is confusing,
        # this should be when the instance was deleted (i.e. terminated_at),
        # not when the db record was deleted. (mdragon)
        deleted_at=null_safe_str(instance_ref.get('terminated_at')),
        launched_at=null_safe_str(instance_ref.get('launched_at')),

        # Image properties
        image_ref_url=image_ref_url,
        os_type=instance_ref['os_type'],
        kernel_id=instance_ref['kernel_id'],
        ramdisk_id=instance_ref['ramdisk_id'],

        # Status properties
        state=instance_ref['vm_state'],
        state_description=null_safe_str(instance_ref.get('task_state')),

        # accessIPs
        access_ip_v4=instance_ref['access_ip_v4'],
        access_ip_v6=instance_ref['access_ip_v6'],
        )

    if network_info is not None:
        fixed_ips = []
        for vif in network_info:
            for ip in vif.fixed_ips():
                ip["label"] = vif["network"]["label"]
                fixed_ips.append(ip)
        instance_info['fixed_ips'] = fixed_ips

    # add image metadata
    image_meta_props = image_meta(system_metadata)
    instance_info["image_meta"] = image_meta_props

    # add instance metadata
    instance_info['metadata'] = instance_ref['metadata']

    instance_info.update(kw)
    return instance_info
コード例 #27
0
def notify_usage_exists(context,
                        instance_ref,
                        current_period=False,
                        ignore_missing_network_data=True,
                        system_metadata=None,
                        extra_usage_info=None):
    """Generates 'exists' notification for an instance for usage auditing
    purposes.

    :param current_period: if True, this will generate a usage for the
        current usage period; if False, this will generate a usage for the
        previous audit period.

    :param ignore_missing_network_data: if True, log any exceptions generated
        while getting network info; if False, raise the exception.
    :param system_metadata: system_metadata DB entries for the instance,
        if not None.  *NOTE*: Currently unused here in trunk, but needed for
        potential custom modifications.
    :param extra_usage_info: Dictionary containing extra values to add or
        override in the notification if not None.
    """

    admin_context = nova.context.get_admin_context(read_deleted='yes')
    begin, end = utils.last_completed_audit_period()
    bw = {}
    if current_period:
        audit_start = end
        audit_end = utils.utcnow()
    else:
        audit_start = begin
        audit_end = end

    if (instance_ref.get('info_cache')
            and instance_ref['info_cache'].get('network_info')):

        cached_info = instance_ref['info_cache']['network_info']
        nw_info = network_model.NetworkInfo.hydrate(cached_info)
    else:
        try:
            nw_info = network.API().get_instance_nw_info(
                admin_context, instance_ref)
        except Exception:
            LOG.exception('Failed to get nw_info', instance=instance_ref)
            if ignore_missing_network_data:
                return
            raise

    macs = [vif['address'] for vif in nw_info]
    uuids = [instance_ref.uuid]

    bw_usages = db.bw_usage_get_by_uuids(admin_context, uuids, audit_start)
    bw_usages = [b for b in bw_usages if b.mac in macs]

    for b in bw_usages:
        label = 'net-name-not-found-%s' % b['mac']
        for vif in nw_info:
            if vif['address'] == b['mac']:
                label = vif['network']['label']
                break

        bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)

    if system_metadata is None:
        try:
            system_metadata = db.instance_system_metadata_get(
                context, instance_ref.uuid)
        except exception.NotFound:
            system_metadata = {}

    # add image metadata to the notification:
    image_meta = {}
    for md_key, md_value in system_metadata.iteritems():
        if md_key.startswith('image_'):
            image_meta[md_key[6:]] = md_value

    extra_info = dict(audit_period_beginning=str(audit_start),
                      audit_period_ending=str(audit_end),
                      bandwidth=bw,
                      image_meta=image_meta)

    if extra_usage_info:
        extra_info.update(extra_usage_info)

    notify_about_instance_usage(context,
                                instance_ref,
                                'exists',
                                system_metadata=system_metadata,
                                extra_usage_info=extra_info)
コード例 #28
0
    def _test_delete(self, delete_type, **attrs):
        inst = self._create_instance_obj()
        inst.update(attrs)
        inst._context = self.context
        delete_time = datetime.datetime(1955,
                                        11,
                                        5,
                                        9,
                                        30,
                                        tzinfo=iso8601.iso8601.Utc())
        timeutils.set_time_override(delete_time)
        task_state = (delete_type == 'soft_delete'
                      and task_states.SOFT_DELETING or task_states.DELETING)
        db_inst = obj_base.obj_to_primitive(inst)
        updates = {'progress': 0, 'task_state': task_state}
        if delete_type == 'soft_delete':
            updates['deleted_at'] = delete_time
        self.mox.StubOutWithMock(inst, 'save')
        self.mox.StubOutWithMock(db,
                                 'block_device_mapping_get_all_by_instance')
        self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
        self.mox.StubOutWithMock(self.context, 'elevated')
        self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
        self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
                                 'service_is_up')
        self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status')
        self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(inst.info_cache, 'delete')
        self.mox.StubOutWithMock(self.compute_api.network_api,
                                 'deallocate_for_instance')
        self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
        self.mox.StubOutWithMock(db, 'instance_destroy')
        self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
        self.mox.StubOutWithMock(quota.QUOTAS, 'commit')

        db.block_device_mapping_get_all_by_instance(self.context,
                                                    inst.uuid).AndReturn([])
        inst.save()
        self.compute_api._create_reservations(
            self.context, inst, inst.instance_type_id, inst.project_id,
            inst.user_id).AndReturn('fake-resv')

        if inst.vm_state == vm_states.RESIZED:
            self._test_delete_resized_part(inst)

        self.context.elevated().MultipleTimes().AndReturn(self.context)
        db.service_get_by_compute_host(self.context,
                                       inst.host).AndReturn('fake-service')
        self.compute_api.servicegroup_api.service_is_up(
            'fake-service').AndReturn(inst.host != 'down-host')

        if self.is_cells:
            rpcapi = self.compute_api.cells_rpcapi
        else:
            rpcapi = self.compute_api.compute_rpcapi

        self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
        self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')

        if inst.host == 'down-host':
            inst.info_cache.delete()
            compute_utils.notify_about_instance_usage(self.context, inst,
                                                      '%s.start' % delete_type)
            self.compute_api.network_api.deallocate_for_instance(
                self.context, inst)
            db.instance_system_metadata_get(self.context,
                                            inst.uuid).AndReturn('sys-meta')
            state = ('soft' in delete_type and vm_states.SOFT_DELETED
                     or vm_states.DELETED)
            updates.update({
                'vm_state': state,
                'task_state': None,
                'terminated_at': delete_time
            })
            inst.save()
            if self.is_cells:
                if delete_type == 'soft_delete':
                    rpcapi.soft_delete_instance(self.context,
                                                inst,
                                                reservations=None)
                else:
                    rpcapi.terminate_instance(self.context,
                                              inst, [],
                                              reservations=None)
            db.instance_destroy(self.context, inst.uuid, constraint=None)
            compute_utils.notify_about_instance_usage(
                self.context,
                inst,
                '%s.end' % delete_type,
                system_metadata='sys-meta')

        if inst.host == 'down-host':
            quota.QUOTAS.commit(self.context,
                                'fake-resv',
                                project_id=inst.project_id,
                                user_id=inst.user_id)
        elif delete_type == 'soft_delete':
            self.compute_api._record_action_start(self.context, inst,
                                                  instance_actions.DELETE)
            rpcapi.soft_delete_instance(self.context,
                                        inst,
                                        reservations='fake-resv')
        elif delete_type in ['delete', 'force_delete']:
            self.compute_api._record_action_start(self.context, inst,
                                                  instance_actions.DELETE)
            rpcapi.terminate_instance(self.context,
                                      inst, [],
                                      reservations='fake-resv')

        self.mox.ReplayAll()

        getattr(self.compute_api, delete_type)(self.context, inst)
        for k, v in updates.items():
            self.assertEqual(inst[k], v)
コード例 #29
0
ファイル: test_compute_api.py プロジェクト: raidwang/nova
    def _test_delete(self, delete_type, **attrs):
        inst = self._create_instance_obj()
        inst.update(attrs)
        inst._context = self.context
        delete_time = datetime.datetime(1955, 11, 5, 9, 30,
                                        tzinfo=iso8601.iso8601.Utc())
        timeutils.set_time_override(delete_time)
        task_state = (delete_type == 'soft_delete' and
                      task_states.SOFT_DELETING or task_states.DELETING)
        db_inst = obj_base.obj_to_primitive(inst)
        updates = {'progress': 0, 'task_state': task_state}
        if delete_type == 'soft_delete':
            updates['deleted_at'] = delete_time
        self.mox.StubOutWithMock(inst, 'save')
        self.mox.StubOutWithMock(db,
                                 'block_device_mapping_get_all_by_instance')
        self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
        self.mox.StubOutWithMock(self.context, 'elevated')
        self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
        self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
                                 'service_is_up')
        self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status')
        self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
        self.mox.StubOutWithMock(self.compute_api.network_api,
                                 'deallocate_for_instance')
        self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
        self.mox.StubOutWithMock(db, 'instance_destroy')
        self.mox.StubOutWithMock(compute_utils,
                                 'notify_about_instance_usage')
        self.mox.StubOutWithMock(quota.QUOTAS, 'commit')

        db.block_device_mapping_get_all_by_instance(
            self.context, inst.uuid).AndReturn([])
        inst.save()
        self.compute_api._create_reservations(
            self.context, inst, inst.instance_type_id, inst.project_id,
            inst.user_id).AndReturn('fake-resv')

        if inst.vm_state == vm_states.RESIZED:
            self._test_delete_resized_part(inst)

        self.context.elevated().MultipleTimes().AndReturn(self.context)
        db.service_get_by_compute_host(self.context, inst.host).AndReturn(
            'fake-service')
        self.compute_api.servicegroup_api.service_is_up(
            'fake-service').AndReturn(inst.host != 'down-host')

        if self.is_cells:
            rpcapi = self.compute_api.cells_rpcapi
        else:
            rpcapi = self.compute_api.compute_rpcapi

        self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
        self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')

        if inst.host == 'down-host':
            db.instance_info_cache_delete(self.context, inst.uuid)
            compute_utils.notify_about_instance_usage(self.context,
                                                      inst,
                                                      '%s.start' % delete_type)
            self.compute_api.network_api.deallocate_for_instance(
                self.context, inst)
            db.instance_system_metadata_get(self.context, inst.uuid
                                            ).AndReturn('sys-meta')
            state = ('soft' in delete_type and vm_states.SOFT_DELETED or
                     vm_states.DELETED)
            updates.update({'vm_state': state,
                            'task_state': None,
                            'terminated_at': delete_time})
            inst.save()
            if self.is_cells:
                if delete_type == 'soft_delete':
                    rpcapi.soft_delete_instance(self.context, inst,
                                                reservations=None)
                else:
                    rpcapi.terminate_instance(self.context, inst, [],
                                              reservations=None)
            db.instance_destroy(self.context, inst.uuid)
            compute_utils.notify_about_instance_usage(
                self.context, inst, '%s.end' % delete_type,
                system_metadata='sys-meta')

        if inst.host == 'down-host':
            quota.QUOTAS.commit(self.context, 'fake-resv',
                                project_id=inst.project_id,
                                user_id=inst.user_id)
        elif delete_type == 'soft_delete':
            self.compute_api._record_action_start(self.context, inst,
                                                  instance_actions.DELETE)
            rpcapi.soft_delete_instance(self.context, inst,
                                        reservations='fake-resv')
        elif delete_type in ['delete', 'force_delete']:
            self.compute_api._record_action_start(self.context, inst,
                                                  instance_actions.DELETE)
            rpcapi.terminate_instance(self.context, inst, [],
                                      reservations='fake-resv')

        self.mox.ReplayAll()

        getattr(self.compute_api, delete_type)(self.context, inst)
        for k, v in updates.items():
            self.assertEqual(inst[k], v)
コード例 #30
0
ファイル: test_compute_api.py プロジェクト: xqueralt/nova
    def _test_delete(self, delete_type, **attrs):
        inst = self._create_instance_obj()
        inst.update(attrs)
        delete_time = datetime.datetime(1955, 11, 5, 9, 30)
        timeutils.set_time_override(delete_time)
        task_state = (delete_type == 'soft_delete' and
                      task_states.SOFT_DELETING or task_states.DELETING)
        db_inst = obj_base.obj_to_primitive(inst)
        updates = {'progress': 0, 'task_state': task_state}
        if delete_type == 'soft_delete':
            updates['deleted_at'] = delete_time
        new_inst = dict(db_inst, **updates)
        self.mox.StubOutWithMock(db,
                                 'block_device_mapping_get_all_by_instance')
        self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
        self.mox.StubOutWithMock(self.context, 'elevated')
        self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
        self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
                                 'service_is_up')
        self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status')
        self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
        self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
        self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
        self.mox.StubOutWithMock(self.compute_api.network_api,
                                 'deallocate_for_instance')
        self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
        self.mox.StubOutWithMock(db, 'instance_destroy')
        self.mox.StubOutWithMock(compute_utils,
                                 'notify_about_instance_usage')
        self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
        self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
                                 'terminate_instance')
        self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
                                 'soft_delete_instance')

        db.block_device_mapping_get_all_by_instance(
            self.context, inst.uuid).AndReturn([])
        db.instance_update_and_get_original(
            self.context, inst.uuid, updates).AndReturn((db_inst, new_inst))
        self.compute_api._create_reservations(
            self.context, db_inst, new_inst, inst.project_id, inst.user_id
            ).AndReturn('fake-resv')

        if inst.vm_state == vm_states.RESIZED:
            self._test_delete_resized_part(db_inst)

        self.context.elevated().MultipleTimes().AndReturn(self.context)
        db.service_get_by_compute_host(self.context, inst.host).AndReturn(
            'fake-service')
        self.compute_api.servicegroup_api.service_is_up(
            'fake-service').AndReturn(inst.host != 'down-host')

        if inst.host == 'down-host' and (
                not self.is_cells or not inst.cell_name):
            db.instance_info_cache_delete(self.context, inst.uuid)
            compute_utils.notify_about_instance_usage(self.context,
                                                      db_inst, 'delete.start')
            self.compute_api.network_api.deallocate_for_instance(
                self.context, db_inst)
            db.instance_system_metadata_get(self.context, inst.uuid
                                            ).AndReturn('sys-meta')
            updates = {'vm_state': vm_states.DELETED,
                       'task_state': None,
                       'terminated_at': delete_time}
            del_inst = dict(new_inst, **updates)
            db.instance_update_and_get_original(
                self.context, inst.uuid, updates
                ).AndReturn((db_inst, del_inst))
            db.instance_destroy(self.context, inst.uuid)
            compute_utils.notify_about_instance_usage(
                self.context, del_inst, 'delete.end',
                system_metadata='sys-meta')
        if inst.host == 'down-host':
            quota.QUOTAS.commit(self.context, 'fake-resv',
                                project_id=inst.project_id,
                                user_id=inst.user_id)
        elif delete_type == 'soft_delete':
            self.compute_api._record_action_start(self.context, db_inst,
                                                  instance_actions.DELETE)
            self.compute_api.compute_rpcapi.soft_delete_instance(
                self.context, db_inst, reservations='fake-resv')
        elif delete_type in ['delete', 'force_delete']:
            self.compute_api._record_action_start(self.context, db_inst,
                                                  instance_actions.DELETE)
            self.compute_api.compute_rpcapi.terminate_instance(
                self.context, db_inst, [], reservations='fake-resv')

        if self.is_cells:
            self.mox.StubOutWithMock(self.compute_api, '_cast_to_cells')
            self.compute_api._cast_to_cells(
                self.context, db_inst, delete_type)

        self.mox.ReplayAll()

        getattr(self.compute_api, delete_type)(self.context, db_inst)