コード例 #1
0
ファイル: test_vm_utils.py プロジェクト: noorul/nova
    def test_zero_root_gb_disables_check(self):
        self.mox.StubOutWithMock(flavors, "extract_flavor")
        flavors.extract_flavor(self.instance).AndReturn(dict(root_gb=0))

        self.mox.ReplayAll()

        vm_utils._check_vdi_size(self.context, self.session, self.instance, self.vdi_uuid)
コード例 #2
0
    def test_find_destination_retry_with_invalid_livem_checks(self):
        self.flags(migrate_max_retries=1)
        self.mox.StubOutWithMock(self.task.image_service, 'show')
        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        self.mox.StubOutWithMock(self.task.scheduler_rpcapi, 'select_hosts')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        self.task.image_service.show(self.context,
                                     self.instance_image).AndReturn("image")
        flavors.extract_flavor(self.instance).AndReturn("inst_type")
        self.task.scheduler_rpcapi.select_hosts(self.context, mox.IgnoreArg(),
                mox.IgnoreArg()).AndReturn(["host1"])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")\
                .AndRaise(exception.Invalid)

        self.task.scheduler_rpcapi.select_hosts(self.context, mox.IgnoreArg(),
                mox.IgnoreArg()).AndReturn(["host2"])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        self.assertEqual("host2", self.task._find_destination())
コード例 #3
0
ファイル: test_vm_utils.py プロジェクト: noorul/nova
    def test_not_too_large(self):
        self.mox.StubOutWithMock(flavors, "extract_flavor")
        flavors.extract_flavor(self.instance).AndReturn(dict(root_gb=1))

        self.mox.StubOutWithMock(vm_utils, "_get_vdi_chain_size")
        vm_utils._get_vdi_chain_size(self.session, self.vdi_uuid).AndReturn(1073741824)

        self.mox.ReplayAll()

        vm_utils._check_vdi_size(self.context, self.session, self.instance, self.vdi_uuid)
コード例 #4
0
ファイル: test_scheduler_utils.py プロジェクト: pcaruana/nova
    def test_build_request_spec_without_image(self):
        image = None
        instance = {'uuid': 'fake-uuid'}
        instance_type = {'flavorid': 'fake-id'}

        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
        self.mox.ReplayAll()

        request_spec = scheduler_utils.build_request_spec(self.context, image,
                                                          [instance])
        self.assertEqual({}, request_spec['image'])
コード例 #5
0
    def test_build_request_spec_without_image(self, mock_get):
        image = None
        instance = {'uuid': 'fake-uuid'}
        instance_type = objects.Flavor(**test_flavor.fake_flavor)

        mock_get.return_value = objects.Flavor(extra_specs={})

        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
        self.mox.ReplayAll()

        request_spec = scheduler_utils.build_request_spec(self.context, image,
                                                          [instance])
        self.assertEqual({}, request_spec['image'])
コード例 #6
0
ファイル: test_vm_utils.py プロジェクト: DavidYan/nova
    def test_too_large(self):
        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        flavors.extract_flavor(self.instance).AndReturn(
                dict(root_gb=1))

        self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
        vm_utils._get_vdi_chain_size(self.session,
                self.vdi_uuid).AndReturn(1073741825)

        self.mox.ReplayAll()

        self.assertRaises(exception.InstanceTypeDiskTooSmall,
                vm_utils._check_vdi_size, self.context, self.session,
                self.instance, self.vdi_uuid)
コード例 #7
0
    def test_find_destination_works(self):
        self.mox.StubOutWithMock(self.task.image_service, 'show')
        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        self.task.image_service.show(self.context,
                                     self.instance_image).AndReturn("image")
        flavors.extract_flavor(self.instance).AndReturn("inst_type")
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")

        self.mox.ReplayAll()
        self.assertEqual("host1", self.task._find_destination())
コード例 #8
0
ファイル: test_compute_api.py プロジェクト: wingo1990/nova
    def test_resize_quota_exceeds_fails(self):
        self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
        self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
        # Should never reach these.
        self.mox.StubOutWithMock(self.compute_api, 'update')
        self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
        self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
        self.mox.StubOutWithMock(self.compute_api.compute_task_api,
                                 'migrate_server')

        fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
        current_flavor = flavors.extract_flavor(fake_inst)
        fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
                           disabled=False)
        flavors.get_flavor_by_flavor_id(
                'flavor-id', read_deleted='no').AndReturn(fake_flavor)
        deltas = dict(resource=0)
        self.compute_api._upsize_quota_delta(
                self.context, fake_flavor,
                current_flavor).AndReturn(deltas)
        usage = dict(in_use=0, reserved=0)
        over_quota_args = dict(quotas={'resource': 0},
                               usages={'resource': usage},
                               overs=['resource'])
        self.compute_api._reserve_quota_delta(self.context, deltas,
                project_id=fake_inst['project_id']).AndRaise(
                        exception.OverQuota(**over_quota_args))

        self.mox.ReplayAll()

        self.assertRaises(exception.TooManyInstances,
                          self.compute_api.resize, self.context,
                          fake_inst, flavor_id='flavor-id')
コード例 #9
0
ファイル: utils.py プロジェクト: NxtCloud/nova
def build_request_spec(ctxt, image, instances, instance_type=None):
    """Build a request_spec for the scheduler.

    The request_spec assumes that all instances to be scheduled are the same
    type.
    """
    instance = instances[0]
    if isinstance(instance, obj_base.NovaObject):
        instance = obj_base.obj_to_primitive(instance)

    if instance_type is None:
        instance_type = flavors.extract_flavor(instance)
        # NOTE(danms): This won't have extra_specs, so fill in the gaps
        _instance_type = objects.Flavor.get_by_flavor_id(
            ctxt, instance_type['flavorid'])
        instance_type.extra_specs = instance_type.get('extra_specs', {})
        instance_type.extra_specs.update(_instance_type.extra_specs)

    if isinstance(instance_type, objects.Flavor):
        instance_type = obj_base.obj_to_primitive(instance_type)

    request_spec = {
            'image': image or {},
            'instance_properties': instance,
            'instance_type': instance_type,
            'num_instances': len(instances)}
    return jsonutils.to_primitive(request_spec)
コード例 #10
0
ファイル: api.py プロジェクト: bcwaldon/nova
    def allocate_for_instance(self, context, instance, vpn,
                              requested_networks, macs=None,
                              conductor_api=None, security_groups=None,
                              dhcp_options=None):
        """Allocates all network structures for an instance.

        TODO(someone): document the rest of these parameters.

        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
        :param dhcp_options: None or a set of key/value pairs that should
            determine the DHCP BOOTP response, eg. for PXE booting an instance
            configured with the baremetal hypervisor. It is expected that these
            are already formatted for the neutron v2 api.
            See nova/virt/driver.py:dhcp_options_for_instance for an example.
        :returns: network info as from get_instance_nw_info() below
        """
        # NOTE(vish): We can't do the floating ip allocation here because
        #             this is called from compute.manager which shouldn't
        #             have db access so we do it on the other side of the
        #             rpc.
        instance_type = flavors.extract_flavor(instance)
        args = {}
        args['vpn'] = vpn
        args['requested_networks'] = requested_networks
        args['instance_id'] = instance['uuid']
        args['project_id'] = instance['project_id']
        args['host'] = instance['host']
        args['rxtx_factor'] = instance_type['rxtx_factor']
        args['macs'] = macs
        args['dhcp_options'] = dhcp_options
        nw_info = self.network_rpcapi.allocate_for_instance(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #11
0
ファイル: driver.py プロジェクト: ChaosCloud/nova
    def finish_migration(self, context, migration, instance, disk_info,
                         network_info, image_meta, resize_instance,
                         block_device_info=None, power_on=True):
        """Completes a resize, turning on the migrated instance

        :param network_info:
           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
        :param image_meta: image object returned by nova.image.glance that
                           defines the image from which this instance
                           was created
        """
        lpar_obj = self._powervm._create_lpar_instance(instance, network_info)

        instance_type = flavors.extract_flavor(instance)
        new_lv_size = instance_type['root_gb']
        old_lv_size = disk_info['old_lv_size']
        if 'root_disk_file' in disk_info:
            disk_size = max(int(new_lv_size), int(old_lv_size))
            disk_size_bytes = disk_size * 1024 * 1024 * 1024
            self._powervm.deploy_from_migrated_file(
                    lpar_obj, disk_info['root_disk_file'], disk_size_bytes,
                    power_on)
        else:
            # this shouldn't get hit unless someone forgot to handle
            # a certain migration type
            raise exception.PowerVMUnrecognizedRootDevice(disk_info=disk_info)
コード例 #12
0
ファイル: imagecache.py プロジェクト: cloudbau/nova
 def _get_root_vhd_size_gb(self, instance):
     try:
         # In case of resizes we need the old root disk size
         old_instance_type = flavors.extract_flavor(instance, prefix="old_")
         return old_instance_type["root_gb"]
     except KeyError:
         return instance["root_gb"]
コード例 #13
0
ファイル: test_compute_api.py プロジェクト: raidwang/nova
    def test_resize(self):
        self.mox.StubOutWithMock(self.compute_api, 'update')
        self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
        self.mox.StubOutWithMock(self.compute_api.compute_task_api,
                                 'migrate_server')

        if self.is_cells:
            self.mox.StubOutWithMock(self.compute_api.db, 'migration_create')
            self.compute_api.db.migration_create(mox.IgnoreArg(),
                                                 mox.IgnoreArg())

        inst = self._create_instance_obj()
        self.compute_api.update(self.context, inst, expected_task_state=None,
                                progress=0,
                                task_state='resize_prep').AndReturn(inst)
        self.compute_api._record_action_start(self.context, inst, 'resize')

        filter_properties = {'ignore_hosts': ['fake_host', 'fake_host']}
        scheduler_hint = {'filter_properties': filter_properties}
        flavor = flavors.extract_flavor(inst)
        self.compute_api.compute_task_api.migrate_server(
                self.context, inst, scheduler_hint=scheduler_hint,
                live=False, rebuild=False, flavor=flavor,
                block_migration=None, disk_over_commit=None,
                reservations=None)

        self.mox.ReplayAll()
        self.compute_api.resize(self.context, inst)
コード例 #14
0
ファイル: utils.py プロジェクト: YankunLi/nova
def build_request_spec(ctxt, image, instances, instance_type=None):
    """Build a request_spec for the scheduler.

    The request_spec assumes that all instances to be scheduled are the same
    type.
    """
    instance = instances[0]
    if isinstance(instance, obj_base.NovaObject):
        instance = obj_base.obj_to_primitive(instance)

    if instance_type is None:
        instance_type = flavors.extract_flavor(instance)
    # NOTE(comstud): This is a bit ugly, but will get cleaned up when
    # we're passing an InstanceType internal object.
    extra_specs = db.flavor_extra_specs_get(ctxt, instance_type['flavorid'])
    instance_type['extra_specs'] = extra_specs
    request_spec = {
            'image': image or {},
            'instance_properties': instance,
            'instance_type': instance_type,
            'num_instances': len(instances),
            # NOTE(alaski): This should be removed as logic moves from the
            # scheduler to conductor.  Provides backwards compatibility now.
            'instance_uuids': [inst['uuid'] for inst in instances]}
    return jsonutils.to_primitive(request_spec)
コード例 #15
0
ファイル: api.py プロジェクト: ChaosCloud/nova
    def allocate_for_instance(self, context, instance, vpn,
                              requested_networks, macs=None,
                              conductor_api=None, security_groups=None):
        """Allocates all network structures for an instance.

        TODO(someone): document the rest of these parameters.

        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
        :returns: network info as from get_instance_nw_info() below
        """
        # NOTE(vish): We can't do the floating ip allocation here because
        #             this is called from compute.manager which shouldn't
        #             have db access so we do it on the other side of the
        #             rpc.
        instance_type = flavors.extract_flavor(instance)
        args = {}
        args['vpn'] = vpn
        args['requested_networks'] = requested_networks
        args['instance_id'] = instance['uuid']
        args['project_id'] = instance['project_id']
        args['host'] = instance['host']
        args['rxtx_factor'] = instance_type['rxtx_factor']
        args['macs'] = macs
        nw_info = self.network_rpcapi.allocate_for_instance(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #16
0
ファイル: utils.py プロジェクト: Krylon360/nova
def get_image_metadata(context, image_api, image_id_or_uri, instance):
    image_system_meta = {}
    # In case of boot from volume, image_id_or_uri may be None
    if image_id_or_uri is not None:
        # If the base image is still available, get its metadata
        try:
            image = image_api.get(context, image_id_or_uri)
        except (exception.ImageNotAuthorized,
                exception.ImageNotFound,
                exception.Invalid) as e:
            LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"),
                        {"image_id": image_id_or_uri, "error": e},
                        instance=instance)
        else:
            flavor = flavors.extract_flavor(instance)
            image_system_meta = utils.get_system_metadata_from_image(image,
                                                                     flavor)

    # Get the system metadata from the instance
    system_meta = utils.instance_sys_meta(instance)

    # Merge the metadata from the instance with the image's, if any
    system_meta.update(image_system_meta)

    # Convert the system metadata to image metadata
    return utils.get_image_from_system_metadata(system_meta)
コード例 #17
0
    def test_find_destination_retry_exceeds_max(self):
        self.flags(scheduler_max_attempts=1)
        self.mox.StubOutWithMock(self.task.image_service, 'show')
        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        self.task.image_service.show(self.context,
                                     self.instance_image).AndReturn("image")
        flavors.extract_flavor(self.instance).AndReturn("inst_type")
        self.task._check_compatible_with_source_hypervisor("host1")\
                .AndRaise(exception.DestinationHypervisorTooOld)

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
コード例 #18
0
    def _find_destination(self):
        #TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = None
        if self.instance.image_ref:
            image = compute_utils.get_image_metadata(self.context,
                                                     self.image_service,
                                                     self.instance.image_ref,
                                                     self.instance)
        instance_type = flavors.extract_flavor(self.instance)

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)

            host = self._get_candidate_destination(image,
                    instance_type, attempted_hosts)
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except exception.Invalid as e:
                LOG.debug(_("Skipping host: %(host)s because: %(e)s") %
                    {"host": host, "e": e})
                attempted_hosts.append(host)
                host = None
        return host
コード例 #19
0
ファイル: utils.py プロジェクト: Dynavisor/nova
def build_request_spec(ctxt, image, instances, instance_type=None):
    """Build a request_spec for the scheduler.

    The request_spec assumes that all instances to be scheduled are the same
    type.
    """
    instance = instances[0]
    if instance_type is None:
        if isinstance(instance, objects.Instance):
            instance_type = instance.get_flavor()
        else:
            instance_type = flavors.extract_flavor(instance)

    if isinstance(instance, objects.Instance):
        instance = instance_obj.compat_instance(instance)

    if isinstance(instance_type, objects.Flavor):
        instance_type = obj_base.obj_to_primitive(instance_type)

    request_spec = {
            'image': image or {},
            'instance_properties': instance,
            'instance_type': instance_type,
            'num_instances': len(instances)}
    return jsonutils.to_primitive(request_spec)
コード例 #20
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(self.task.image_service, 'show')
        self.mox.StubOutWithMock(flavors, 'extract_flavor')
        self.mox.StubOutWithMock(self.task.scheduler_rpcapi, 'select_hosts')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        self.task.image_service.show(self.context,
                                     self.instance_image).AndReturn("image")
        flavors.extract_flavor(self.instance).AndReturn("inst_type")
        self.task.scheduler_rpcapi.select_hosts(self.context, mox.IgnoreArg(),
                mox.IgnoreArg()).AndRaise(exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
コード例 #21
0
ファイル: utils.py プロジェクト: el3m3nt4ry/nova
def build_request_spec(ctxt, image, instances, instance_type=None):
    """Build a request_spec for the scheduler.

    The request_spec assumes that all instances to be scheduled are the same
    type.
    """
    instance = instances[0]
    if isinstance(instance, obj_base.NovaObject):
        instance = obj_base.obj_to_primitive(instance)

    if instance_type is None:
        instance_type = flavors.extract_flavor(instance)

    if isinstance(instance_type, objects.Flavor):
        instance_type = obj_base.obj_to_primitive(instance_type)

    request_spec = {
            'image': image or {},
            'instance_properties': instance,
            'instance_type': instance_type,
            'num_instances': len(instances),
            # NOTE(alaski): This should be removed as logic moves from the
            # scheduler to conductor.  Provides backwards compatibility now.
            'instance_uuids': [inst['uuid'] for inst in instances]}
    return jsonutils.to_primitive(request_spec)
コード例 #22
0
 def _set_default_ephemeral_device(self, instance):
     flavor = flavors.extract_flavor(instance)
     if flavor['ephemeral_gb']:
         self.virtapi.instance_update(
             nova_context.get_admin_context(), instance['uuid'],
             {'default_ephemeral_device':
                 '/dev/sda1'})
コード例 #23
0
    def setUp(self):
        super(SimpleTenantUsageControllerTest, self).setUp()
        self.controller = simple_tenant_usage.SimpleTenantUsageController()

        class FakeComputeAPI:
            def get_instance_type(self, context, flavor_type):
                if flavor_type == 1:
                    return flavors.get_default_flavor()
                else:
                    raise exception.InstanceTypeNotFound(flavor_type)

        self.compute_api = FakeComputeAPI()
        self.context = None

        now = timeutils.utcnow()
        self.baseinst = dict(display_name='foo',
                             launched_at=now - datetime.timedelta(1),
                             terminated_at=now,
                             instance_type_id=1,
                             vm_state='deleted',
                             deleted=0)
        basetype = flavors.get_default_flavor()
        sys_meta = utils.dict_to_metadata(
            flavors.save_flavor_info({}, basetype))
        self.baseinst['system_metadata'] = sys_meta
        self.basetype = flavors.extract_flavor(self.baseinst)
コード例 #24
0
def get_device_name_for_instance(context, instance, bdms, device):
    """Validates (or generates) a device name for instance.

    If device is not set, it will generate a unique device appropriate
    for the instance. It uses the block device mapping table to find
    valid device names. If the device name is valid but applicable to
    a different backend (for example /dev/vdc is specified but the
    backend uses /dev/xvdc), the device name will be converted to the
    appropriate format.
    """
    req_prefix = None
    req_letter = None

    if device:
        try:
            req_prefix, req_letter = block_device.match_device(device)
        except (TypeError, AttributeError, ValueError):
            raise exception.InvalidDevicePath(path=device)

    mappings = block_device.instance_block_mapping(instance, bdms)

    try:
        prefix = block_device.match_device(mappings['root'])[0]
    except (TypeError, AttributeError, ValueError):
        raise exception.InvalidDevicePath(path=mappings['root'])

    # NOTE(vish): remove this when xenapi is setting default_root_device
    if driver.compute_driver_matches('xenapi.XenAPIDriver'):
        prefix = '/dev/xvd'

    if req_prefix != prefix:
        LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s"),
                  {'prefix': prefix, 'req_prefix': req_prefix})

    used_letters = set()
    for device_path in mappings.itervalues():
        letter = block_device.strip_prefix(device_path)
        # NOTE(vish): delete numbers in case we have something like
        #             /dev/sda1
        letter = re.sub("\d+", "", letter)
        used_letters.add(letter)

    # NOTE(vish): remove this when xenapi is properly setting
    #             default_ephemeral_device and default_swap_device
    if driver.compute_driver_matches('xenapi.XenAPIDriver'):
        instance_type = flavors.extract_flavor(instance)
        if instance_type['ephemeral_gb']:
            used_letters.add('b')

        if instance_type['swap']:
            used_letters.add('c')

    if not req_letter:
        req_letter = _get_unused_letter(used_letters)

    if req_letter in used_letters:
        raise exception.DevicePathInUse(path=device)

    device_name = prefix + req_letter
    return device_name
コード例 #25
0
ファイル: filter_scheduler.py プロジェクト: ChaosCloud/nova
    def _assert_compute_node_has_enough_memory(self, context,
                                              instance_ref, dest):
        """Checks if destination host has enough memory for live migration.


        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """
        compute = self._get_compute_info(context, dest)
        node = compute.get('hypervisor_hostname')
        host_state = self.host_manager.host_state_cls(dest, node)
        host_state.update_from_compute_node(compute)

        instance_type = flavors.extract_flavor(instance_ref)
        filter_properties = {'instance_type': instance_type}

        hosts = self.host_manager.get_filtered_hosts([host_state],
                                                     filter_properties,
                                                     'RamFilter')
        if not hosts:
            instance_uuid = instance_ref['uuid']
            reason = (_("Unable to migrate %(instance_uuid)s to %(dest)s: "
                        "Lack of memory")
                      % {'instance_uuid': instance_uuid,
                         'dest': dest})
            raise exception.MigrationPreCheckError(reason=reason)
コード例 #26
0
ファイル: utils.py プロジェクト: nuubou/nova
def get_next_device_name(instance, device_name_list, root_device_name=None, device=None):
    """Validates (or generates) a device name for instance.

    If device is not set, it will generate a unique device appropriate
    for the instance. It uses the root_device_name (if provided) and
    the list of used devices to find valid device names. If the device
    name is valid but applicable to a different backend (for example
    /dev/vdc is specified but the backend uses /dev/xvdc), the device
    name will be converted to the appropriate format.
    """
    req_prefix = None
    req_letter = None

    if device:
        try:
            req_prefix, req_letter = block_device.match_device(device)
        except (TypeError, AttributeError, ValueError):
            raise exception.InvalidDevicePath(path=device)

    if not root_device_name:
        root_device_name = block_device.DEFAULT_ROOT_DEV_NAME

    try:
        prefix = block_device.match_device(root_device_name)[0]
    except (TypeError, AttributeError, ValueError):
        raise exception.InvalidDevicePath(path=root_device_name)

    # NOTE(vish): remove this when xenapi is setting default_root_device
    if driver.compute_driver_matches("xenapi.XenAPIDriver"):
        prefix = "/dev/xvd"

    if req_prefix != prefix:
        LOG.debug("Using %(prefix)s instead of %(req_prefix)s", {"prefix": prefix, "req_prefix": req_prefix})

    used_letters = set()
    for device_path in device_name_list:
        letter = block_device.strip_prefix(device_path)
        # NOTE(vish): delete numbers in case we have something like
        #             /dev/sda1
        letter = re.sub("\d+", "", letter)
        used_letters.add(letter)

    # NOTE(vish): remove this when xenapi is properly setting
    #             default_ephemeral_device and default_swap_device
    if driver.compute_driver_matches("xenapi.XenAPIDriver"):
        flavor = flavors.extract_flavor(instance)
        if flavor["ephemeral_gb"]:
            used_letters.add("b")

        if flavor["swap"]:
            used_letters.add("c")

    if not req_letter:
        req_letter = _get_unused_letter(used_letters)

    if req_letter in used_letters:
        raise exception.DevicePathInUse(path=device)

    return prefix + req_letter
コード例 #27
0
ファイル: instance.py プロジェクト: bigloupe/nova
    def get_flavor(self, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''

        db_flavor = flavors.extract_flavor(self, prefix)
        flavor = objects.Flavor(self._context)
        for key in flavors.system_metadata_flavor_props:
            flavor[key] = db_flavor[key]
        return flavor
コード例 #28
0
ファイル: instance.py プロジェクト: jevonyeoh/nova-simulator
    def get_flavor(self, namespace=None):
        prefix = ("%s_" % namespace) if namespace is not None else ""

        db_flavor = flavors.extract_flavor(self, prefix)
        flavor = flavor_obj.Flavor()
        for key in flavors.system_metadata_flavor_props:
            flavor[key] = db_flavor[key]
        return flavor
コード例 #29
0
ファイル: api.py プロジェクト: Redosen/nova
 def add_fixed_ip_to_instance(self, context, instance, network_id):
     """Adds a fixed ip to instance from specified network."""
     flavor = flavors.extract_flavor(instance)
     args = {'instance_id': instance['uuid'],
             'rxtx_factor': flavor['rxtx_factor'],
             'host': instance['host'],
             'network_id': network_id}
     self.network_rpcapi.add_fixed_ip_to_instance(context, **args)
コード例 #30
0
 def _get_root_vhd_size_gb(self, instance):
     try:
         # In case of resizes we need the old root disk size
         old_flavor = flavors.extract_flavor(
             instance, prefix='old_')
         return old_flavor['root_gb']
     except KeyError:
         return instance['root_gb']
コード例 #31
0
    def _get_instance_type(self,
                           context,
                           instance,
                           prefix,
                           instance_type_id=None):
        """Get the instance type from sys metadata if it's stashed.  If not,
        fall back to fetching it via the conductor API.

        See bug 1164110
        """
        if not instance_type_id:
            instance_type_id = instance['instance_type_id']

        try:
            return flavors.extract_flavor(instance, prefix)
        except KeyError:
            return self.conductor_api.instance_type_get(
                context, instance_type_id)
コード例 #32
0
 def test_flavor_numa_extras_are_saved(self):
     instance_type = flavors.get_default_flavor()
     instance_type['extra_specs'] = {
         'hw:numa_mem.0': '123',
         'hw:numa_cpus.0': '456',
         'hw:numa_mem.1': '789',
         'hw:numa_cpus.1': 'ABC',
         'foo': 'bar',
     }
     sysmeta = flavors.save_flavor_info({}, instance_type)
     _instance_type = flavors.extract_flavor({'system_metadata': sysmeta})
     expected_extra_specs = {
         'hw:numa_mem.0': '123',
         'hw:numa_cpus.0': '456',
         'hw:numa_mem.1': '789',
         'hw:numa_cpus.1': 'ABC',
     }
     self.assertEqual(expected_extra_specs, _instance_type['extra_specs'])
コード例 #33
0
    def migrate_instance_finish(self, context, instance, migration):
        """Finish migrating the network of an instance."""
        flavor = flavors.extract_flavor(instance)
        args = dict(
            instance_uuid=instance['uuid'],
            rxtx_factor=flavor['rxtx_factor'],
            project_id=instance['project_id'],
            source_compute=migration['source_compute'],
            dest_compute=migration['dest_compute'],
            floating_addresses=None,
        )

        if self._is_multi_host(context, instance):
            args['floating_addresses'] = \
                self._get_floating_ip_addresses(context, instance)
            args['host'] = migration['dest_compute']

        self.network_rpcapi.migrate_instance_finish(context, **args)
コード例 #34
0
 def _create_migration(self, context, instance, instance_type):
     """Create a migration record for the upcoming resize.  This should
     be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
     claim will not be lost if the audit process starts.
     """
     old_instance_type = flavors.extract_flavor(instance)
     migration = objects.Migration(context=context.elevated())
     migration.dest_compute = self.host
     migration.dest_node = self.nodename
     migration.dest_host = self.driver.get_host_ip_addr()
     migration.old_instance_type_id = old_instance_type['id']
     migration.new_instance_type_id = instance_type['id']
     migration.status = 'pre-migrating'
     migration.instance_uuid = instance['uuid']
     migration.source_compute = instance['host']
     migration.source_node = instance['node']
     migration.create()
     return migration
コード例 #35
0
    def allocate_for_instance(self,
                              context,
                              instance,
                              vpn,
                              requested_networks,
                              macs=None,
                              security_groups=None,
                              dhcp_options=None):
        """Allocates all network structures for an instance.

        :param context: The request context.
        :param instance: nova.objects.instance.Instance object.
        :param vpn: A boolean, if True, indicate a vpn to access the instance.
        :param requested_networks: A dictionary of requested_networks,
            Optional value containing network_id, fixed_ip, and port_id.
        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
        :param security_groups: None or security groups to allocate for
            instance.
        :param dhcp_options: None or a set of key/value pairs that should
            determine the DHCP BOOTP response, eg. for PXE booting an instance
            configured with the baremetal hypervisor. It is expected that these
            are already formatted for the neutron v2 api.
            See nova/virt/driver.py:dhcp_options_for_instance for an example.
        :returns: network info as from get_instance_nw_info() below
        """
        # NOTE(vish): We can't do the floating ip allocation here because
        #             this is called from compute.manager which shouldn't
        #             have db access so we do it on the other side of the
        #             rpc.
        flavor = flavors.extract_flavor(instance)
        args = {}
        args['vpn'] = vpn
        args['requested_networks'] = requested_networks
        args['instance_id'] = instance.uuid
        args['project_id'] = instance.project_id
        args['host'] = instance.host
        args['rxtx_factor'] = flavor['rxtx_factor']
        args['macs'] = macs
        args['dhcp_options'] = dhcp_options
        nw_info = self.network_rpcapi.allocate_for_instance(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #36
0
def build_request_spec(ctxt, image, instances):
    """Build a request_spec for the scheduler.

    The request_spec assumes that all instances to be scheduled are the same
    type.
    """
    instance = instances[0]
    instance_type = flavors.extract_flavor(instance)
    # NOTE(comstud): This is a bit ugly, but will get cleaned up when
    # we're passing an InstanceType internal object.
    extra_specs = db.instance_type_extra_specs_get(ctxt,
                                                   instance_type['flavorid'])
    instance_type['extra_specs'] = extra_specs
    request_spec = {
            'image': image,
            'instance_properties': instance,
            'instance_type': instance_type,
            'instance_uuids': [inst['uuid'] for inst in instances]}
    return jsonutils.to_primitive(request_spec)
コード例 #37
0
def get_image_metadata(context, image_service, image_id, instance):
    # If the base image is still available, get its metadata
    try:
        image = image_service.show(context, image_id)
    except Exception as e:
        LOG.warning(_("Can't access image %(image_id)s: %(error)s"),
                    {"image_id": image_id, "error": e}, instance=instance)
        image_system_meta = {}
    else:
        flavor = flavors.extract_flavor(instance)
        image_system_meta = utils.get_system_metadata_from_image(image, flavor)

    # Get the system metadata from the instance
    system_meta = utils.instance_sys_meta(instance)

    # Merge the metadata from the instance with the image's, if any
    system_meta.update(image_system_meta)

    # Convert the system metadata to image metadata
    return utils.get_image_from_system_metadata(system_meta)
コード例 #38
0
    def _get_instance_type(self,
                           context,
                           instance,
                           prefix,
                           instance_type_id=None):
        """Get the instance type from sys metadata if it's stashed.  If not,
        fall back to fetching it via the object API.

        See bug 1164110
        """
        try:
            extracted_flavor = flavors.extract_flavor(instance, prefix)
        except KeyError:
            if not instance_type_id:
                instance_type_id = instance['instance_type_id']
            return objects.Flavor.get_by_id(context, instance_type_id)
        # NOTE (ndipanov): Make sure we don't try to lazy-load extra_specs
        # from the object, if there were none stashed in system_metadata
        extracted_flavor.setdefault('extra_specs', {})
        return objects.Flavor(context, **extracted_flavor)
コード例 #39
0
    def _get_cpu_shares(self, instance):
        """Get allocated CPUs from configured flavor.

        Docker/lxc supports relative CPU allocation.

        cgroups specifies following:
         /sys/fs/cgroup/lxc/cpu.shares = 1024
         /sys/fs/cgroup/cpu.shares = 1024

        For that reason we use 1024 as multiplier.
        This multiplier allows to divide the CPU
        resources fair with containers started by
        the user (e.g. docker registry) which has
        the default CpuShares value of zero.
        """
        if isinstance(instance, objects.Instance):
            flavor = instance.get_flavor()
        else:
            flavor = flavors.extract_flavor(instance)
        return int(flavor['vcpus']) * 1024
コード例 #40
0
def build_request_spec(ctxt, image, instances, instance_type=None):
    """Build a request_spec for the scheduler.

    The request_spec assumes that all instances to be scheduled are the same
    type.
    """
    instance = instances[0]
    if instance_type is None:
        if isinstance(instance, obj_instance.Instance):
            instance_type = instance.get_flavor()
        else:
            instance_type = flavors.extract_flavor(instance)

    if isinstance(instance, obj_instance.Instance):
        instance = obj_base.obj_to_primitive(instance)
        # obj_to_primitive doesn't copy this enough, so be sure
        # to detach our metadata blob because we modify it below.
        instance['system_metadata'] = dict(instance.get('system_metadata', {}))

    if isinstance(instance_type, objects.Flavor):
        instance_type = obj_base.obj_to_primitive(instance_type)
        # NOTE(danms): Replicate this old behavior because the
        # scheduler RPC interface technically expects it to be
        # there. Remove this when we bump the scheduler RPC API to
        # v5.0
        try:
            flavors.save_flavor_info(instance.get('system_metadata', {}),
                                     instance_type)
        except KeyError:
            # If the flavor isn't complete (which is legit with a
            # flavor object, just don't put it in the request spec
            pass

    request_spec = {
        'image': image or {},
        'instance_properties': instance,
        'instance_type': instance_type,
        'num_instances': len(instances)
    }
    return jsonutils.to_primitive(request_spec)
コード例 #41
0
def build_request_spec(ctxt, image, instances):
    """Build a request_spec for the scheduler.

    The request_spec assumes that all instances to be scheduled are the same
    type.
    """
    instance = instances[0]
    instance_type = flavors.extract_flavor(instance)
    # NOTE(comstud): This is a bit ugly, but will get cleaned up when
    # we're passing an InstanceType internal object.
    extra_specs = db.flavor_extra_specs_get(ctxt,
                                                   instance_type['flavorid'])
    instance_type['extra_specs'] = extra_specs
    request_spec = {
            'image': image,
            'instance_properties': instance,
            'instance_type': instance_type,
            'num_instances': len(instances),
            # NOTE(alaski): This should be removed as logic moves from the
            # scheduler to conductor.  Provides backwards compatibility now.
            'instance_uuids': [inst['uuid'] for inst in instances]}
    return jsonutils.to_primitive(request_spec)
コード例 #42
0
ファイル: api.py プロジェクト: HybridCloud-dew/hws
    def _populate_neutron_extension_values(self, context, instance,
                                           pci_request_id, port_req_body):
        """Populate neutron extension values for the instance.

        If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
        """
        self._refresh_neutron_extensions_cache(context)
        if constants.QOS_QUEUE in self.extensions:
            flavor = flavors.extract_flavor(instance)
            rxtx_factor = flavor.get('rxtx_factor')
            port_req_body['port']['rxtx_factor'] = rxtx_factor
        if self._has_port_binding_extension(context):
            tmp_host = instance.get('host')
            LOG.info("to create network: orig_host=%s" % tmp_host)
            try:
                if CONF.host_postfix is not None:
                    tmp_host = tmp_host.rstrip(CONF.host_postfix)
            except Exception, e:
                tmp_host = instance.get('host')
            LOG.info("to create network: after_host=%s" % tmp_host)
            port_req_body['port']['binding:host_id'] = tmp_host
            self._populate_neutron_binding_profile(instance, pci_request_id,
                                                   port_req_body)
コード例 #43
0
ファイル: test_compute_api.py プロジェクト: itpudge/nova
    def test_resize_same_flavor_fails(self):
        self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
        # Should never reach these.
        self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, 'update')
        self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
        self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
        self.mox.StubOutWithMock(self.compute_api.compute_task_api,
                                 'migrate_server')

        fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
        fake_flavor = flavors.extract_flavor(fake_inst)

        flavors.get_flavor_by_flavor_id(
                fake_flavor['flavorid'],
                read_deleted='no').AndReturn(fake_flavor)

        self.mox.ReplayAll()

        # Pass in flavor_id.. same as current flavor.
        self.assertRaises(exception.CannotResizeToSameFlavor,
                          self.compute_api.resize, self.context,
                          fake_inst, flavor_id=fake_flavor['flavorid'])
コード例 #44
0
    def test_resize_quota_exceeds_fails(self):
        self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
        self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
        # Should never reach these.
        self.mox.StubOutWithMock(self.compute_api, 'update')
        self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
        self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
        self.mox.StubOutWithMock(self.compute_api.compute_task_api,
                                 'migrate_server')

        fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
        current_flavor = flavors.extract_flavor(fake_inst)
        fake_flavor = dict(id=200,
                           flavorid='flavor-id',
                           name='foo',
                           disabled=False)
        flavors.get_flavor_by_flavor_id(
            'flavor-id', read_deleted='no').AndReturn(fake_flavor)
        deltas = dict(resource=0)
        self.compute_api._upsize_quota_delta(self.context, fake_flavor,
                                             current_flavor).AndReturn(deltas)
        usage = dict(in_use=0, reserved=0)
        over_quota_args = dict(quotas={'resource': 0},
                               usages={'resource': usage},
                               overs=['resource'])
        self.compute_api._reserve_quota_delta(
            self.context, deltas, project_id=fake_inst['project_id']).AndRaise(
                exception.OverQuota(**over_quota_args))

        self.mox.ReplayAll()

        self.assertRaises(exception.TooManyInstances,
                          self.compute_api.resize,
                          self.context,
                          fake_inst,
                          flavor_id='flavor-id')
コード例 #45
0
def get_image_metadata(context, image_api, image_id_or_uri, instance):
    # If the base image is still available, get its metadata
    try:
        image = image_api.get(context, image_id_or_uri)
    except (exception.ImageNotAuthorized, exception.ImageNotFound,
            exception.Invalid) as e:
        LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"), {
            "image_id": image_id_or_uri,
            "error": e
        },
                    instance=instance)
        image_system_meta = {}
    else:
        flavor = flavors.extract_flavor(instance)
        image_system_meta = utils.get_system_metadata_from_image(image, flavor)

    # Get the system metadata from the instance
    system_meta = utils.instance_sys_meta(instance)

    # Merge the metadata from the instance with the image's, if any
    system_meta.update(image_system_meta)

    # Convert the system metadata to image metadata
    return utils.get_image_from_system_metadata(system_meta)
コード例 #46
0
    def _get_flavor(self, context, compute_api, instance, flavors_cache):
        """Get flavor information from the instance's system_metadata,
        allowing a fallback to lookup by-id for deleted instances only.
        """
        try:
            return flavors.extract_flavor(instance)
        except KeyError:
            if not instance['deleted']:
                # Only support the fallback mechanism for deleted instances
                # that would have been skipped by migration #153
                raise

        flavor_type = instance['instance_type_id']
        if flavor_type in flavors_cache:
            return flavors_cache[flavor_type]

        try:
            it_ref = compute_api.get_instance_type(context, flavor_type)
            flavors_cache[flavor_type] = it_ref
        except exception.FlavorNotFound:
            # can't bill if there is no instance type
            it_ref = None

        return it_ref
コード例 #47
0
ファイル: driver.py プロジェクト: bopopescu/nova-scheduler-1
 def _set_default_ephemeral_device(self, instance):
     flavor = flavors.extract_flavor(instance)
     if flavor['ephemeral_gb']:
         self.virtapi.instance_update(
             nova_context.get_admin_context(), instance['uuid'],
             {'default_ephemeral_device': '/dev/sda1'})
コード例 #48
0
ファイル: blockinfo.py プロジェクト: ykwon8651/project-e
def get_disk_mapping(virt_type, instance,
                     disk_bus, cdrom_bus,
                     block_device_info=None,
                     image_meta=None, rescue=False):
    """Determine how to map default disks to the virtual machine.

       This is about figuring out whether the default 'disk',
       'disk.local', 'disk.swap' and 'disk.config' images have
       been overridden by the block device mapping.

       Returns the guest disk mapping for the devices.
    """

    inst_type = flavors.extract_flavor(instance)

    mapping = {}

    pre_assigned_device_names = \
    [block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
        driver.block_device_info_get_ephemerals(block_device_info),
        [driver.block_device_info_get_swap(block_device_info)],
        driver.block_device_info_get_mapping(block_device_info))
     if get_device_name(bdm)]

    if virt_type == "lxc":
        # NOTE(zul): This information is not used by the libvirt driver
        # however we need to populate mapping so the image can be
        # created when the instance is started. This can
        # be removed when we convert LXC to use block devices.
        root_disk_bus = disk_bus
        root_device_type = 'disk'

        root_info = get_next_disk_info(mapping,
                                       root_disk_bus,
                                       root_device_type,
                                       boot_index=1)
        mapping['root'] = root_info
        mapping['disk'] = root_info

        return mapping

    if rescue:
        rescue_info = get_next_disk_info(mapping,
                                         disk_bus, boot_index=1)
        mapping['disk.rescue'] = rescue_info
        mapping['root'] = rescue_info

        os_info = get_next_disk_info(mapping,
                                     disk_bus)
        mapping['disk'] = os_info

        return mapping

    # NOTE (ndipanov): root_bdm can be None when we boot from image
    # as there is no driver represenation of local targeted images
    # and they will not be in block_device_info list.
    root_bdm = block_device.get_root_bdm(
        driver.block_device_info_get_mapping(block_device_info))

    root_device_name = block_device.strip_dev(
        driver.block_device_info_get_root(block_device_info))
    root_info = get_root_info(virt_type, image_meta, root_bdm,
                              disk_bus, cdrom_bus, root_device_name)

    mapping['root'] = root_info
    # NOTE (ndipanov): This implicitly relies on image->local BDMs not
    #                  being considered in the driver layer - so missing
    #                  bdm with boot_index 0 means - use image, unless it was
    #                  overridden. This can happen when using legacy syntax and
    #                  no root_device_name is set on the instance.
    if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
                                                           block_device_info):
        mapping['disk'] = root_info

    default_eph = has_default_ephemeral(instance, disk_bus, block_device_info,
                                        mapping)
    if default_eph:
        mapping['disk.local'] = default_eph

    for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
            block_device_info)):
        eph_info = get_info_from_bdm(
            virt_type, eph, mapping, disk_bus,
            assigned_devices=pre_assigned_device_names)
        mapping[get_eph_disk(idx)] = eph_info
        update_bdm(eph, eph_info)

    swap = driver.block_device_info_get_swap(block_device_info)
    if swap and swap.get('swap_size', 0) > 0:
        swap_info = get_info_from_bdm(virt_type, swap, mapping, disk_bus)
        mapping['disk.swap'] = swap_info
        update_bdm(swap, swap_info)
    elif inst_type['swap'] > 0:
        swap_info = get_next_disk_info(mapping,
                                       disk_bus)
        if not block_device.volume_in_mapping(swap_info['dev'],
                                              block_device_info):
            mapping['disk.swap'] = swap_info

    block_device_mapping = driver.block_device_info_get_mapping(
        block_device_info)

    for vol in block_device_mapping:
        vol_info = get_info_from_bdm(
            virt_type, vol, mapping,
            assigned_devices=pre_assigned_device_names)
        mapping[block_device.prepend_dev(vol_info['dev'])] = vol_info
        update_bdm(vol, vol_info)

    if configdrive.required_by(instance):
        device_type = get_config_drive_type()
        disk_bus = get_disk_bus_for_device_type(virt_type,
                                                image_meta,
                                                device_type)
        config_info = get_next_disk_info(mapping,
                                         disk_bus,
                                         device_type,
                                         last_device=True)
        mapping['disk.config'] = config_info

    return mapping
コード例 #49
0
    def test_extract_flavor_no_sysmeta(self):
        instance = {}
        prefix = ''
        result = flavors.extract_flavor(instance, prefix)

        self.assertIsNone(result)
コード例 #50
0
ファイル: utils.py プロジェクト: yulinz888888/nova
def get_next_device_name(instance,
                         device_name_list,
                         root_device_name=None,
                         device=None):
    """Validates (or generates) a device name for instance.

    If device is not set, it will generate a unique device appropriate
    for the instance. It uses the root_device_name (if provided) and
    the list of used devices to find valid device names. If the device
    name is valid but applicable to a different backend (for example
    /dev/vdc is specified but the backend uses /dev/xvdc), the device
    name will be converted to the appropriate format.
    """
    req_prefix = None
    req_letter = None

    if device:
        try:
            req_prefix, req_letter = block_device.match_device(device)
        except (TypeError, AttributeError, ValueError):
            raise exception.InvalidDevicePath(path=device)

    if not root_device_name:
        root_device_name = block_device.DEFAULT_ROOT_DEV_NAME

    try:
        prefix = block_device.match_device(root_device_name)[0]
    except (TypeError, AttributeError, ValueError):
        raise exception.InvalidDevicePath(path=root_device_name)

    # NOTE(vish): remove this when xenapi is setting default_root_device
    if driver.compute_driver_matches('xenapi.XenAPIDriver'):
        prefix = '/dev/xvd'

    if req_prefix != prefix:
        LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s"), {
            'prefix': prefix,
            'req_prefix': req_prefix
        })

    used_letters = set()
    for device_path in device_name_list:
        letter = block_device.strip_prefix(device_path)
        # NOTE(vish): delete numbers in case we have something like
        #             /dev/sda1
        letter = re.sub("\d+", "", letter)
        used_letters.add(letter)

    # NOTE(vish): remove this when xenapi is properly setting
    #             default_ephemeral_device and default_swap_device
    if driver.compute_driver_matches('xenapi.XenAPIDriver'):
        flavor = flavors.extract_flavor(instance)
        if flavor['ephemeral_gb']:
            used_letters.add('b')

        if flavor['swap']:
            used_letters.add('c')

    if not req_letter:
        req_letter = _get_unused_letter(used_letters)

    if req_letter in used_letters:
        raise exception.DevicePathInUse(path=device)

    return prefix + req_letter
コード例 #51
0
ファイル: blockinfo.py プロジェクト: noorul/nova
def get_disk_mapping(virt_type,
                     instance,
                     disk_bus,
                     cdrom_bus,
                     block_device_info=None,
                     image_meta=None,
                     rescue=False):
    """Determine how to map default disks to the virtual machine.

       This is about figuring out whether the default 'disk',
       'disk.local', 'disk.swap' and 'disk.config' images have
       been overriden by the block device mapping.

       Returns the guest disk mapping for the devices.
    """

    inst_type = flavors.extract_flavor(instance)

    mapping = {}

    if virt_type == "lxc":
        # NOTE(zul): This information is not used by the libvirt driver
        # however we need to populate mapping so the image can be
        # created when the instance is started. This can
        # be removed when we convert LXC to use block devices.
        root_disk_bus = disk_bus
        root_device_type = 'disk'

        root_info = get_next_disk_info(mapping, root_disk_bus,
                                       root_device_type)
        mapping['root'] = root_info
        mapping['disk'] = root_info

        return mapping

    if rescue:
        rescue_info = get_next_disk_info(mapping, disk_bus)
        mapping['disk.rescue'] = rescue_info
        mapping['root'] = rescue_info

        os_info = get_next_disk_info(mapping, disk_bus)
        mapping['disk'] = os_info

        return mapping

    if image_meta and image_meta.get('disk_format') == 'iso':
        root_disk_bus = cdrom_bus
        root_device_type = 'cdrom'
    else:
        root_disk_bus = disk_bus
        root_device_type = 'disk'

    root_device_name = driver.block_device_info_get_root(block_device_info)
    if root_device_name is not None:
        root_device = block_device.strip_dev(root_device_name)
        root_info = {
            'bus': get_disk_bus_for_disk_dev(virt_type, root_device),
            'dev': root_device,
            'type': root_device_type
        }
    else:
        root_info = get_next_disk_info(mapping, root_disk_bus,
                                       root_device_type)
    mapping['root'] = root_info
    if not block_device.volume_in_mapping(root_info['dev'], block_device_info):
        mapping['disk'] = root_info

    eph_info = get_next_disk_info(mapping, disk_bus)
    ephemeral_device = False
    if not (block_device.volume_in_mapping(eph_info['dev'], block_device_info)
            or 0 in [
                eph['num'] for eph in driver.block_device_info_get_ephemerals(
                    block_device_info)
            ]):
        if instance['ephemeral_gb'] > 0:
            ephemeral_device = True

    if ephemeral_device:
        mapping['disk.local'] = eph_info

    for eph in driver.block_device_info_get_ephemerals(block_device_info):
        disk_dev = block_device.strip_dev(eph['device_name'])
        disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)

        mapping[get_eph_disk(eph)] = {
            'bus': disk_bus,
            'dev': disk_dev,
            'type': 'disk'
        }

    swap = driver.block_device_info_get_swap(block_device_info)
    if driver.swap_is_usable(swap):
        disk_dev = block_device.strip_dev(swap['device_name'])
        disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)

        mapping['disk.swap'] = {
            'bus': disk_bus,
            'dev': disk_dev,
            'type': 'disk'
        }
    elif inst_type['swap'] > 0:
        swap_info = get_next_disk_info(mapping, disk_bus)
        if not block_device.volume_in_mapping(swap_info['dev'],
                                              block_device_info):
            mapping['disk.swap'] = swap_info

    block_device_mapping = driver.block_device_info_get_mapping(
        block_device_info)

    for vol in block_device_mapping:
        disk_dev = vol['mount_device'].rpartition("/")[2]
        disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)

        mapping[vol['mount_device']] = {
            'bus': disk_bus,
            'dev': disk_dev,
            'type': 'disk'
        }

    if configdrive.required_by(instance):
        device_type = get_config_drive_type()
        disk_bus = get_disk_bus_for_device_type(virt_type, image_meta,
                                                device_type)
        config_info = get_next_disk_info(mapping,
                                         disk_bus,
                                         device_type,
                                         last_device=True)
        mapping['disk.config'] = config_info

    return mapping
コード例 #52
0
ファイル: utils.py プロジェクト: ychen2u/stx-nova
def build_request_spec(ctxt, image, instances, instance_type=None):
    """Build a request_spec for the scheduler.

    The request_spec assumes that all instances to be scheduled are the same
    type.
    """
    instance = instances[0]
    if instance_type is None:
        if isinstance(instance, obj_instance.Instance):
            instance_type = instance.get_flavor()
        else:
            instance_type = flavors.extract_flavor(instance)

    # WRS: The request_spec requires an updated requested numa_topology,
    # otherwise we use numa_topology from when instance was first created.
    # The required numa topology changes when we do resize.
    requested_topology = None
    if isinstance(instance, obj_instance.Instance):
        instance = obj_base.obj_to_primitive(instance)
        # obj_to_primitive doesn't copy this enough, so be sure
        # to detach our metadata blob because we modify it below.
        instance['system_metadata'] = dict(instance.get('system_metadata', {}))

        if isinstance(instance_type, objects.Flavor):
            if isinstance(image, dict) and 'properties' in image:
                image_meta = objects.ImageMeta.from_dict(image)
            else:
                image_meta = objects.ImageMeta.from_dict(
                    utils.get_image_from_system_metadata(
                        instance['system_metadata']))
            try:
                requested_topology = hardware.numa_get_constraints(
                    instance_type, image_meta)
                instance['numa_topology'] = requested_topology
            except Exception as ex:
                LOG.error("Cannot get numa constraints, error=%(err)r",
                          {'err': ex})

    if isinstance(instance_type, objects.Flavor):
        instance_type = obj_base.obj_to_primitive(instance_type)
        # NOTE(danms): Replicate this old behavior because the
        # scheduler RPC interface technically expects it to be
        # there. Remove this when we bump the scheduler RPC API to
        # v5.0
        try:
            flavors.save_flavor_info(instance.get('system_metadata', {}),
                                     instance_type)
        except KeyError:
            # If the flavor isn't complete (which is legit with a
            # flavor object, just don't put it in the request spec
            pass

    request_spec = {
        'image': image or {},
        'instance_properties': instance,
        'instance_type': instance_type,
        'num_instances': len(instances)
    }

    # WRS: Update requested numa topology, needed for resize.
    if requested_topology is not None:
        request_spec.update({'numa_topology': requested_topology})

    return jsonutils.to_primitive(request_spec)
コード例 #53
0
 def _set_default_ephemeral_device(self, instance):
     flavor = flavors.extract_flavor(instance)
     if flavor['ephemeral_gb']:
         instance.default_ephemeral_device = '/dev/sda1'
         instance.save()
コード例 #54
0
    def _test_resize(self,
                     flavor_id_passed=True,
                     same_host=False,
                     allow_same_host=False,
                     allow_mig_same_host=False,
                     project_id=None,
                     extra_kwargs=None):
        if extra_kwargs is None:
            extra_kwargs = {}

        self.flags(allow_resize_to_same_host=allow_same_host,
                   allow_migrate_to_same_host=allow_mig_same_host)

        params = {}
        if project_id is not None:
            # To test instance w/ different project id than context (admin)
            params['project_id'] = project_id
        fake_inst = obj_base.obj_to_primitive(
            self._create_instance_obj(params=params))

        self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
        self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
        self.mox.StubOutWithMock(self.compute_api, 'update')
        self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
        self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
        self.mox.StubOutWithMock(self.compute_api.compute_task_api,
                                 'migrate_server')

        current_flavor = flavors.extract_flavor(fake_inst)
        if flavor_id_passed:
            new_flavor = dict(id=200,
                              flavorid='new-flavor-id',
                              name='new_flavor',
                              disabled=False)
            flavors.get_flavor_by_flavor_id(
                'new-flavor-id', read_deleted='no').AndReturn(new_flavor)
        else:
            new_flavor = current_flavor

        resvs = ['resvs']

        self.compute_api._upsize_quota_delta(
            self.context, new_flavor, current_flavor).AndReturn('deltas')
        self.compute_api._reserve_quota_delta(
            self.context, 'deltas',
            project_id=fake_inst['project_id']).AndReturn(resvs)
        self.compute_api.update(self.context,
                                fake_inst,
                                task_state=task_states.RESIZE_PREP,
                                expected_task_state=None,
                                progress=0,
                                **extra_kwargs).AndReturn(fake_inst)

        if allow_same_host:
            filter_properties = {'ignore_hosts': []}
        else:
            filter_properties = {'ignore_hosts': [fake_inst['host']]}

        if not flavor_id_passed and not allow_mig_same_host:
            filter_properties['ignore_hosts'].append(fake_inst['host'])

        if self.is_cells:
            quota.QUOTAS.commit(self.context,
                                resvs,
                                project_id=fake_inst['project_id'])
            resvs = []

        self.compute_api._record_action_start(self.context, fake_inst,
                                              'resize')

        scheduler_hint = {'filter_properties': filter_properties}

        self.compute_api.compute_task_api.migrate_server(
            self.context,
            fake_inst,
            scheduler_hint=scheduler_hint,
            live=False,
            rebuild=False,
            flavor=new_flavor,
            block_migration=None,
            disk_over_commit=None,
            reservations=resvs)

        if self.is_cells:
            self.mox.StubOutWithMock(self.context, 'elevated')
            self.mox.StubOutWithMock(self.compute_api.db, 'migration_create')
            self.mox.StubOutWithMock(self.compute_api, '_cast_to_cells')
            if flavor_id_passed:
                flavors.get_flavor_by_flavor_id(
                    'new-flavor-id', read_deleted='no').AndReturn(new_flavor)
            self.context.elevated().AndReturn(self.context)
            self.compute_api.db.migration_create(
                self.context,
                dict(instance_uuid=fake_inst['uuid'],
                     old_instance_type_id=current_flavor['id'],
                     new_instance_type_id=new_flavor['id'],
                     status='finished'))
            self.compute_api._cast_to_cells(self.context,
                                            fake_inst,
                                            'resize',
                                            flavor_id=new_flavor['flavorid'],
                                            **extra_kwargs)

        self.mox.ReplayAll()

        if flavor_id_passed:
            self.compute_api.resize(self.context,
                                    fake_inst,
                                    flavor_id='new-flavor-id',
                                    **extra_kwargs)
        else:
            self.compute_api.resize(self.context, fake_inst, **extra_kwargs)
コード例 #55
0
ファイル: notifications.py プロジェクト: tdp100/nova
def info_from_instance(context, instance_ref, network_info, system_metadata,
                       **kw):
    """Get detailed instance information for an instance which is common to all
    notifications.

    :param:network_info: network_info provided if not None
    :param:system_metadata: system_metadata DB entries for the instance,
    if not None

    .. note::

        Currently unused here in trunk, but needed for potential custom
        modifications.

    """
    def null_safe_str(s):
        return str(s) if s else ''

    def null_safe_isotime(s):
        if isinstance(s, datetime.datetime):
            return timeutils.strtime(s)
        else:
            return str(s) if s else ''

    image_ref_url = glance.generate_image_url(instance_ref['image_ref'])

    instance_type = flavors.extract_flavor(instance_ref)
    instance_type_name = instance_type.get('name', '')
    instance_flavorid = instance_type.get('flavorid', '')

    if system_metadata is None:
        system_metadata = utils.instance_sys_meta(instance_ref)

    instance_info = dict(
        # Owner properties
        tenant_id=instance_ref['project_id'],
        user_id=instance_ref['user_id'],

        # Identity properties
        instance_id=instance_ref['uuid'],
        display_name=instance_ref['display_name'],
        reservation_id=instance_ref['reservation_id'],
        hostname=instance_ref['hostname'],

        # Type properties
        instance_type=instance_type_name,
        instance_type_id=instance_ref['instance_type_id'],
        instance_flavor_id=instance_flavorid,
        architecture=instance_ref['architecture'],

        # Capacity properties
        memory_mb=instance_ref['memory_mb'],
        disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'],
        vcpus=instance_ref['vcpus'],
        # Note(dhellmann): This makes the disk_gb value redundant, but
        # we are keeping it for backwards-compatibility with existing
        # users of notifications.
        root_gb=instance_ref['root_gb'],
        ephemeral_gb=instance_ref['ephemeral_gb'],

        # Location properties
        host=instance_ref['host'],
        node=instance_ref['node'],
        availability_zone=instance_ref['availability_zone'],

        # Date properties
        created_at=str(instance_ref['created_at']),
        # Terminated and Deleted are slightly different (although being
        # terminated and not deleted is a transient state), so include
        # both and let the recipient decide which they want to use.
        terminated_at=null_safe_isotime(instance_ref.get('terminated_at')),
        deleted_at=null_safe_isotime(instance_ref.get('deleted_at')),
        launched_at=null_safe_isotime(instance_ref.get('launched_at')),

        # Image properties
        image_ref_url=image_ref_url,
        os_type=instance_ref['os_type'],
        kernel_id=instance_ref['kernel_id'],
        ramdisk_id=instance_ref['ramdisk_id'],

        # Status properties
        state=instance_ref['vm_state'],
        state_description=null_safe_str(instance_ref.get('task_state')),

        # accessIPs
        access_ip_v4=instance_ref['access_ip_v4'],
        access_ip_v6=instance_ref['access_ip_v6'],
    )

    if network_info is not None:
        fixed_ips = []
        for vif in network_info:
            for ip in vif.fixed_ips():
                ip["label"] = vif["network"]["label"]
                ip["vif_mac"] = vif["address"]
                fixed_ips.append(ip)
        instance_info['fixed_ips'] = fixed_ips

    # add image metadata
    image_meta_props = image_meta(system_metadata)
    instance_info["image_meta"] = image_meta_props

    # add instance metadata
    instance_info['metadata'] = utils.instance_meta(instance_ref)

    instance_info.update(kw)
    return instance_info
コード例 #56
0
    def get_ec2_metadata(self, version):
        if version == "latest":
            version = VERSIONS[-1]

        if version not in VERSIONS:
            raise InvalidMetadataVersion(version)

        hostname = self._get_hostname()

        floating_ips = self.ip_info['floating_ips']
        floating_ip = floating_ips and floating_ips[0] or ''

        fixed_ips = self.ip_info['fixed_ips']
        fixed_ip = fixed_ips and fixed_ips[0] or ''

        fmt_sgroups = [x['name'] for x in self.security_groups]

        meta_data = {
            'ami-id': self.ec2_ids['ami-id'],
            'ami-launch-index': self.instance['launch_index'],
            'ami-manifest-path': 'FIXME',
            'instance-id': self.ec2_ids['instance-id'],
            'hostname': hostname,
            'local-ipv4': self.address or fixed_ip,
            'reservation-id': self.instance['reservation_id'],
            'security-groups': fmt_sgroups
        }

        # public keys are strangely rendered in ec2 metadata service
        #  meta-data/public-keys/ returns '0=keyname' (with no trailing /)
        # and only if there is a public key given.
        # '0=keyname' means there is a normally rendered dict at
        #  meta-data/public-keys/0
        #
        # meta-data/public-keys/ : '0=%s' % keyname
        # meta-data/public-keys/0/ : 'openssh-key'
        # meta-data/public-keys/0/openssh-key : '%s' % publickey
        if self.instance['key_name']:
            meta_data['public-keys'] = {
                '0': {
                    '_name': "0=" + self.instance['key_name'],
                    'openssh-key': self.instance['key_data']
                }
            }

        if self._check_version('2007-01-19', version):
            meta_data['local-hostname'] = hostname
            meta_data['public-hostname'] = hostname
            meta_data['public-ipv4'] = floating_ip

        if False and self._check_version('2007-03-01', version):
            # TODO(vish): store product codes
            meta_data['product-codes'] = []

        if self._check_version('2007-08-29', version):
            instance_type = flavors.extract_flavor(self.instance)
            meta_data['instance-type'] = instance_type['name']

        if False and self._check_version('2007-10-10', version):
            # TODO(vish): store ancestor ids
            meta_data['ancestor-ami-ids'] = []

        if self._check_version('2007-12-15', version):
            meta_data['block-device-mapping'] = self.mappings
            if 'kernel-id' in self.ec2_ids:
                meta_data['kernel-id'] = self.ec2_ids['kernel-id']
            if 'ramdisk-id' in self.ec2_ids:
                meta_data['ramdisk-id'] = self.ec2_ids['ramdisk-id']

        if self._check_version('2008-02-01', version):
            meta_data['placement'] = {
                'availability-zone': self.availability_zone
            }

        if self._check_version('2008-09-01', version):
            meta_data['instance-action'] = 'none'

        data = {'meta-data': meta_data}
        if self.userdata_raw is not None:
            data['user-data'] = self.userdata_raw

        return data
コード例 #57
0
def info_from_instance(context, instance_ref, network_info, system_metadata,
                       **kw):
    """Get detailed instance information for an instance which is common to all
    notifications.

    :param network_info: network_info provided if not None
    :param system_metadata: system_metadata DB entries for the instance,
    if not None.  *NOTE*: Currently unused here in trunk, but needed for
    potential custom modifications.
    """
    def null_safe_str(s):
        return str(s) if s else ''

    image_ref_url = glance.generate_image_url(instance_ref['image_ref'])

    instance_type = flavors.extract_flavor(instance_ref)
    instance_type_name = instance_type.get('name', '')

    if system_metadata is None:
        system_metadata = utils.instance_sys_meta(instance_ref)

    instance_info = dict(
        # Owner properties
        tenant_id=instance_ref['project_id'],
        user_id=instance_ref['user_id'],

        # Identity properties
        instance_id=instance_ref['uuid'],
        display_name=instance_ref['display_name'],
        reservation_id=instance_ref['reservation_id'],
        hostname=instance_ref['hostname'],

        # Type properties
        instance_type=instance_type_name,
        instance_type_id=instance_ref['instance_type_id'],
        architecture=instance_ref['architecture'],

        # Capacity properties
        memory_mb=instance_ref['memory_mb'],
        disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'],
        vcpus=instance_ref['vcpus'],
        # Note(dhellmann): This makes the disk_gb value redundant, but
        # we are keeping it for backwards-compatibility with existing
        # users of notifications.
        root_gb=instance_ref['root_gb'],
        ephemeral_gb=instance_ref['ephemeral_gb'],

        # Location properties
        host=instance_ref['host'],
        node=instance_ref['node'],
        availability_zone=instance_ref['availability_zone'],

        # Date properties
        created_at=str(instance_ref['created_at']),
        # Nova's deleted vs terminated instance terminology is confusing,
        # this should be when the instance was deleted (i.e. terminated_at),
        # not when the db record was deleted. (mdragon)
        deleted_at=null_safe_str(instance_ref.get('terminated_at')),
        launched_at=null_safe_str(instance_ref.get('launched_at')),

        # Image properties
        image_ref_url=image_ref_url,
        os_type=instance_ref['os_type'],
        kernel_id=instance_ref['kernel_id'],
        ramdisk_id=instance_ref['ramdisk_id'],

        # Status properties
        state=instance_ref['vm_state'],
        state_description=null_safe_str(instance_ref.get('task_state')),

        # accessIPs
        access_ip_v4=instance_ref['access_ip_v4'],
        access_ip_v6=instance_ref['access_ip_v6'],
    )

    if network_info is not None:
        fixed_ips = []
        for vif in network_info:
            for ip in vif.fixed_ips():
                ip["label"] = vif["network"]["label"]
                fixed_ips.append(ip)
        instance_info['fixed_ips'] = fixed_ips

    # add image metadata
    image_meta_props = image_meta(system_metadata)
    instance_info["image_meta"] = image_meta_props

    # add instance metadata
    instance_info['metadata'] = instance_ref['metadata']

    instance_info.update(kw)
    return instance_info