コード例 #1
0
    def test_live_migration_dest_check_auto_set_host(self):
        instance = self._live_migration_instance()

        # Confirm dest is picked by scheduler if not set.
        self.mox.StubOutWithMock(self.driver, 'select_hosts')
        self.mox.StubOutWithMock(instance_types, 'extract_instance_type')

        request_spec = {
            'instance_properties': instance,
            'instance_type': {},
            'instance_uuids': [instance['uuid']],
            'image': self.image_service.show(self.context,
                                             instance['image_ref'])
        }
        ignore_hosts = [instance['host']]
        filter_properties = {'ignore_hosts': ignore_hosts}

        instance_types.extract_instance_type(instance).AndReturn({})
        self.driver.select_hosts(self.context, request_spec,
                                 filter_properties).AndReturn(['fake_host2'])

        self.mox.ReplayAll()
        result = self.driver._live_migration_dest_check(
            self.context, instance, None, ignore_hosts)
        self.assertEqual('fake_host2', result)
コード例 #2
0
    def finish_migration(self,
                         context,
                         migration,
                         instance,
                         disk_info,
                         network_info,
                         image_meta,
                         resize_instance,
                         block_device_info=None):
        """Completes a resize, turning on the migrated instance

        :param network_info:
           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
        :param image_meta: image object returned by nova.image.glance that
                           defines the image from which this instance
                           was created
        """
        lpar_obj = self._powervm._create_lpar_instance(instance, network_info)

        instance_type = instance_types.extract_instance_type(instance)
        new_lv_size = instance_type['root_gb']
        old_lv_size = disk_info['old_lv_size']
        if 'root_disk_file' in disk_info:
            disk_size = max(int(new_lv_size), int(old_lv_size))
            disk_size_bytes = disk_size * 1024 * 1024 * 1024
            self._powervm.deploy_from_migrated_file(
                lpar_obj, disk_info['root_disk_file'], disk_size_bytes)
        else:
            # this shouldn't get hit unless someone forgot to handle
            # a certain migration type
            raise Exception(
                _('Unrecognized root disk information: %s') % disk_info)
コード例 #3
0
ファイル: filter_scheduler.py プロジェクト: k-i-t-e/nova
    def _assert_compute_node_has_enough_memory(self, context,
                                              instance_ref, dest):
        """Checks if destination host has enough memory for live migration.


        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """
        compute = self._get_compute_info(context, dest)
        node = compute.get('hypervisor_hostname')
        host_state = self.host_manager.host_state_cls(dest, node)
        host_state.update_from_compute_node(compute)

        instance_type = instance_types.extract_instance_type(instance_ref)
        filter_properties = {'instance_type': instance_type}

        hosts = self.host_manager.get_filtered_hosts([host_state],
                                                     filter_properties,
                                                     'RamFilter')
        if not hosts:
            instance_uuid = instance_ref['uuid']
            reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
                       "Lack of memory")
            raise exception.MigrationError(reason=reason % locals())
コード例 #4
0
ファイル: driver.py プロジェクト: amalaba/nova
    def finish_migration(self, context, migration, instance, disk_info,
                         network_info, image_meta, resize_instance,
                         block_device_info=None):
        """Completes a resize, turning on the migrated instance

        :param network_info:
           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
        :param image_meta: image object returned by nova.image.glance that
                           defines the image from which this instance
                           was created
        """
        lpar_obj = self._powervm._create_lpar_instance(instance)

        instance_type = instance_types.extract_instance_type(instance)
        new_lv_size = instance_type['root_gb']
        old_lv_size = disk_info['old_lv_size']
        if 'root_disk_file' in disk_info:
            disk_size = max(int(new_lv_size), int(old_lv_size))
            disk_size_bytes = disk_size * 1024 * 1024 * 1024
            self._powervm.deploy_from_migrated_file(
                    lpar_obj, disk_info['root_disk_file'], disk_size_bytes)
        else:
            # this shouldn't get hit unless someone forgot to handle
            # a certain migration type
            raise Exception(
                    _('Unrecognized root disk information: %s') %
                    disk_info)
コード例 #5
0
ファイル: test_scheduler.py プロジェクト: AnyBucket/nova
    def test_live_migration_dest_check_auto_set_host(self):
        instance = self._live_migration_instance()

        # Confirm dest is picked by scheduler if not set.
        self.mox.StubOutWithMock(self.driver, 'select_hosts')
        self.mox.StubOutWithMock(db, 'instance_type_get')

        instance_type = instance_types.extract_instance_type(instance)
        request_spec = {'instance_properties': instance,
                        'instance_type': instance_type,
                        'instance_uuids': [instance['uuid']],
                        'image': self.image_service.show(self.context,
                                                         instance['image_ref'])
                        }
        ignore_hosts = [instance['host']]
        filter_properties = {'ignore_hosts': ignore_hosts}

        db.instance_type_get(self.context, instance_type['id']).AndReturn(
            instance_type)
        self.driver.select_hosts(self.context, request_spec,
                                 filter_properties).AndReturn(['fake_host2'])

        self.mox.ReplayAll()
        result = self.driver._live_migration_dest_check(self.context, instance,
                                                        None, ignore_hosts)
        self.assertEqual('fake_host2', result)
コード例 #6
0
    def setUp(self):
        super(SimpleTenantUsageControllerTest, self).setUp()
        self.controller = simple_tenant_usage.SimpleTenantUsageController()

        class FakeComputeAPI:
            def get_instance_type(self, context, flavor_type):
                if flavor_type == 1:
                    return instance_types.get_default_instance_type()
                else:
                    raise exception.InstanceTypeNotFound(flavor_type)

        self.compute_api = FakeComputeAPI()
        self.context = None

        now = datetime.datetime.now()
        self.baseinst = dict(display_name='foo',
                             launched_at=now - datetime.timedelta(1),
                             terminated_at=now,
                             instance_type_id=1,
                             vm_state='deleted',
                             deleted=0)
        basetype = instance_types.get_default_instance_type()
        sys_meta = utils.dict_to_metadata(
            instance_types.save_instance_type_info({}, basetype))
        self.baseinst['system_metadata'] = sys_meta
        self.basetype = instance_types.extract_instance_type(self.baseinst)
コード例 #7
0
    def allocate_for_instance(self,
                              context,
                              instance,
                              vpn,
                              requested_networks,
                              macs=None,
                              conductor_api=None,
                              security_groups=None,
                              **kwargs):
        """Allocates all network structures for an instance.

        TODO(someone): document the rest of these parameters.

        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
            NB: macs is ignored by nova-network.
        :returns: network info as from get_instance_nw_info() below
        """
        instance_type = instance_types.extract_instance_type(instance)
        args = {}
        args['vpn'] = vpn
        args['requested_networks'] = requested_networks
        args['instance_id'] = instance['uuid']
        args['project_id'] = instance['project_id']
        args['host'] = instance['host']
        args['rxtx_factor'] = instance_type['rxtx_factor']
        nw_info = self.network_rpcapi.allocate_for_instance(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #8
0
    def allocate_for_instance(self, context, instance, vpn,
                              requested_networks, macs=None,
                              conductor_api=None, security_groups=None,
                              **kwargs):
        """Allocates all network structures for an instance.

        TODO(someone): document the rest of these parameters.

        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
            NB: macs is ignored by nova-network.
        :returns: network info as from get_instance_nw_info() below
        """
        instance_type = instance_types.extract_instance_type(instance)
        args = {}
        args['vpn'] = vpn
        args['requested_networks'] = requested_networks
        args['instance_id'] = instance['uuid']
        args['project_id'] = instance['project_id']
        args['host'] = instance['host']
        args['rxtx_factor'] = instance_type['rxtx_factor']
        nw_info = self.network_rpcapi.allocate_for_instance(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #9
0
    def setUp(self):
        super(SimpleTenantUsageControllerTest, self).setUp()
        self.controller = simple_tenant_usage.SimpleTenantUsageController()

        class FakeComputeAPI:
            def get_instance_type(self, context, flavor_type):
                if flavor_type == 1:
                    return instance_types.get_default_instance_type()
                else:
                    raise exception.InstanceTypeNotFound(flavor_type)

        self.compute_api = FakeComputeAPI()
        self.context = None

        now = datetime.datetime.now()
        self.baseinst = dict(display_name='foo',
                             launched_at=now - datetime.timedelta(1),
                             terminated_at=now,
                             instance_type_id=1,
                             vm_state='deleted',
                             deleted=0)
        basetype = instance_types.get_default_instance_type()
        sys_meta = utils.dict_to_metadata(
            instance_types.save_instance_type_info({}, basetype))
        self.baseinst['system_metadata'] = sys_meta
        self.basetype = instance_types.extract_instance_type(self.baseinst)
コード例 #10
0
ファイル: api.py プロジェクト: Balakrishnan-Vivek/nova
    def allocate_for_instance(self, context, instance, vpn,
                              requested_networks, macs=None,
                              conductor_api=None, security_groups=None):
        """Allocates all network structures for an instance.

        TODO(someone): document the rest of these parameters.

        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
        :returns: network info as from get_instance_nw_info() below
        """
        # NOTE(vish): We can't do the floating ip allocation here because
        #             this is called from compute.manager which shouldn't
        #             have db access so we do it on the other side of the
        #             rpc.
        instance_type = instance_types.extract_instance_type(instance)
        args = {}
        args['vpn'] = vpn
        args['requested_networks'] = requested_networks
        args['instance_id'] = instance['uuid']
        args['project_id'] = instance['project_id']
        args['host'] = instance['host']
        args['rxtx_factor'] = instance_type['rxtx_factor']
        args['macs'] = macs
        nw_info = self.network_rpcapi.allocate_for_instance(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #11
0
    def _assert_compute_node_has_enough_memory(self, context, instance_ref,
                                               dest):
        """Checks if destination host has enough memory for live migration.


        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host

        """
        compute = self._get_compute_info(context, dest)
        node = compute.get('hypervisor_hostname')
        host_state = self.host_manager.host_state_cls(dest, node)
        host_state.update_from_compute_node(compute)

        instance_type = instance_types.extract_instance_type(instance_ref)
        filter_properties = {'instance_type': instance_type}

        hosts = self.host_manager.get_filtered_hosts([host_state],
                                                     filter_properties,
                                                     'RamFilter')
        if not hosts:
            instance_uuid = instance_ref['uuid']
            reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
                       "Lack of memory")
            raise exception.MigrationError(reason=reason % locals())
コード例 #12
0
ファイル: api.py プロジェクト: zestrada/nova-cs498cc
    def allocate_for_instance(self, context, instance, vpn,
                              requested_networks, macs=None,
                              conductor_api=None, security_groups=None):
        """Allocates all network structures for an instance.

        TODO(someone): document the rest of these parameters.

        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
        :returns: network info as from get_instance_nw_info() below
        """
        # NOTE(vish): We can't do the floating ip allocation here because
        #             this is called from compute.manager which shouldn't
        #             have db access so we do it on the other side of the
        #             rpc.
        instance_type = instance_types.extract_instance_type(instance)
        args = {}
        args['vpn'] = vpn
        args['requested_networks'] = requested_networks
        args['instance_id'] = instance['uuid']
        args['project_id'] = instance['project_id']
        args['host'] = instance['host']
        args['rxtx_factor'] = instance_type['rxtx_factor']
        args['macs'] = macs
        nw_info = self.network_rpcapi.allocate_for_instance(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #13
0
ファイル: utils.py プロジェクト: AnyBucket/nova
def get_device_name_for_instance(context, instance, bdms, device):
    """Validates (or generates) a device name for instance.

    If device is not set, it will generate a unique device appropriate
    for the instance. It uses the block device mapping table to find
    valid device names. If the device name is valid but applicable to
    a different backend (for example /dev/vdc is specified but the
    backend uses /dev/xvdc), the device name will be converted to the
    appropriate format.
    """
    req_prefix = None
    req_letter = None

    if device:
        try:
            req_prefix, req_letter = block_device.match_device(device)
        except (TypeError, AttributeError, ValueError):
            raise exception.InvalidDevicePath(path=device)

    mappings = block_device.instance_block_mapping(instance, bdms)

    try:
        prefix = block_device.match_device(mappings['root'])[0]
    except (TypeError, AttributeError, ValueError):
        raise exception.InvalidDevicePath(path=mappings['root'])

    # NOTE(vish): remove this when xenapi is setting default_root_device
    if driver.compute_driver_matches('xenapi.XenAPIDriver'):
        prefix = '/dev/xvd'

    if req_prefix != prefix:
        LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s") % locals())

    used_letters = set()
    for device_path in mappings.itervalues():
        letter = block_device.strip_prefix(device_path)
        # NOTE(vish): delete numbers in case we have something like
        #             /dev/sda1
        letter = re.sub("\d+", "", letter)
        used_letters.add(letter)

    # NOTE(vish): remove this when xenapi is properly setting
    #             default_ephemeral_device and default_swap_device
    if driver.compute_driver_matches('xenapi.XenAPIDriver'):
        instance_type = instance_types.extract_instance_type(instance)
        if instance_type['ephemeral_gb']:
            used_letters.add('b')

        if instance_type['swap']:
            used_letters.add('c')

    if not req_letter:
        req_letter = _get_unused_letter(used_letters)

    if req_letter in used_letters:
        raise exception.DevicePathInUse(path=device)

    device_name = prefix + req_letter
    return device_name
コード例 #14
0
def get_device_name_for_instance(context, instance, bdms, device):
    """Validates (or generates) a device name for instance.

    If device is not set, it will generate a unique device appropriate
    for the instance. It uses the block device mapping table to find
    valid device names. If the device name is valid but applicable to
    a different backend (for example /dev/vdc is specified but the
    backend uses /dev/xvdc), the device name will be converted to the
    appropriate format.
    """
    req_prefix = None
    req_letter = None

    if device:
        try:
            req_prefix, req_letter = block_device.match_device(device)
        except (TypeError, AttributeError, ValueError):
            raise exception.InvalidDevicePath(path=device)

    mappings = block_device.instance_block_mapping(instance, bdms)

    try:
        prefix = block_device.match_device(mappings['root'])[0]
    except (TypeError, AttributeError, ValueError):
        raise exception.InvalidDevicePath(path=mappings['root'])

    # NOTE(vish): remove this when xenapi is setting default_root_device
    if driver.compute_driver_matches('xenapi.XenAPIDriver'):
        prefix = '/dev/xvd'

    if req_prefix != prefix:
        LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s") % locals())

    used_letters = set()
    for device_path in mappings.itervalues():
        letter = block_device.strip_prefix(device_path)
        # NOTE(vish): delete numbers in case we have something like
        #             /dev/sda1
        letter = re.sub("\d+", "", letter)
        used_letters.add(letter)

    # NOTE(vish): remove this when xenapi is properly setting
    #             default_ephemeral_device and default_swap_device
    if driver.compute_driver_matches('xenapi.XenAPIDriver'):
        instance_type = instance_types.extract_instance_type(instance)
        if instance_type['ephemeral_gb']:
            used_letters.add('b')

        if instance_type['swap']:
            used_letters.add('c')

    if not req_letter:
        req_letter = _get_unused_letter(used_letters)

    if req_letter in used_letters:
        raise exception.DevicePathInUse(path=device)

    device_name = prefix + req_letter
    return device_name
コード例 #15
0
ファイル: imagecache.py プロジェクト: AnyBucket/nova
 def _get_root_vhd_size_gb(self, instance):
     try:
         # In case of resizes we need the old root disk size
         old_instance_type = instance_types.extract_instance_type(
             instance, prefix='old_')
         return old_instance_type['root_gb']
     except KeyError:
         return instance['root_gb']
コード例 #16
0
 def _get_root_vhd_size_gb(self, instance):
     try:
         # In case of resizes we need the old root disk size
         old_instance_type = instance_types.extract_instance_type(
             instance, prefix='old_')
         return old_instance_type['root_gb']
     except KeyError:
         return instance['root_gb']
コード例 #17
0
ファイル: api.py プロジェクト: Balakrishnan-Vivek/nova
 def add_fixed_ip_to_instance(self, context, instance, network_id,
                              conductor_api=None):
     """Adds a fixed ip to instance from specified network."""
     instance_type = instance_types.extract_instance_type(instance)
     args = {'instance_id': instance['uuid'],
             'rxtx_factor': instance_type['rxtx_factor'],
             'host': instance['host'],
             'network_id': network_id}
     self.network_rpcapi.add_fixed_ip_to_instance(context, **args)
コード例 #18
0
ファイル: api.py プロジェクト: zestrada/nova-cs498cc
 def add_fixed_ip_to_instance(self, context, instance, network_id,
                              conductor_api=None):
     """Adds a fixed ip to instance from specified network."""
     instance_type = instance_types.extract_instance_type(instance)
     args = {'instance_id': instance['uuid'],
             'rxtx_factor': instance_type['rxtx_factor'],
             'host': instance['host'],
             'network_id': network_id}
     self.network_rpcapi.add_fixed_ip_to_instance(context, **args)
コード例 #19
0
ファイル: tilera.py プロジェクト: CiscoAS/nova
def get_partition_sizes(instance):
    instance_type = instance_types.extract_instance_type(instance)
    root_mb = instance_type['root_gb'] * 1024
    swap_mb = instance_type['swap']

    if swap_mb < 1:
        swap_mb = 1

    return (root_mb, swap_mb)
コード例 #20
0
ファイル: tilera.py プロジェクト: zestrada/nova-cs498cc
def get_partition_sizes(instance):
    instance_type = instance_types.extract_instance_type(instance)
    root_mb = instance_type['root_gb'] * 1024
    swap_mb = instance_type['swap']

    if swap_mb < 1:
        swap_mb = 1

    return (root_mb, swap_mb)
コード例 #21
0
    def _populate_quantum_extension_values(self, instance, port_req_body):
        """Populate quantum extension values for the instance.

        If the extension contains nvp-qos then get the rxtx_factor.
        """
        self._refresh_quantum_extensions_cache()
        if 'nvp-qos' in self.extensions:
            instance_type = instance_types.extract_instance_type(instance)
            rxtx_factor = instance_type.get('rxtx_factor')
            port_req_body['port']['rxtx_factor'] = rxtx_factor
コード例 #22
0
ファイル: api.py プロジェクト: Balakrishnan-Vivek/nova
    def _get_instance_nw_info(self, context, instance):
        """Returns all network info related to an instance."""
        instance_type = instance_types.extract_instance_type(instance)
        args = {'instance_id': instance['uuid'],
                'rxtx_factor': instance_type['rxtx_factor'],
                'host': instance['host'],
                'project_id': instance['project_id']}
        nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #23
0
ファイル: api.py プロジェクト: ashkasugai/nova
    def _populate_quantum_extension_values(self, instance, port_req_body):
        """Populate quantum extension values for the instance.

        If the extension contains nvp-qos then get the rxtx_factor.
        """
        self._refresh_quantum_extensions_cache()
        if 'nvp-qos' in self.extensions:
            instance_type = instance_types.extract_instance_type(instance)
            rxtx_factor = instance_type.get('rxtx_factor')
            port_req_body['port']['rxtx_factor'] = rxtx_factor
コード例 #24
0
ファイル: api.py プロジェクト: zestrada/nova-cs498cc
    def _get_instance_nw_info(self, context, instance):
        """Returns all network info related to an instance."""
        instance_type = instance_types.extract_instance_type(instance)
        args = {'instance_id': instance['uuid'],
                'rxtx_factor': instance_type['rxtx_factor'],
                'host': instance['host'],
                'project_id': instance['project_id']}
        nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)

        return network_model.NetworkInfo.hydrate(nw_info)
コード例 #25
0
ファイル: api.py プロジェクト: zestrada/nova-cs498cc
    def remove_fixed_ip_from_instance(self, context, instance, address,
                                      conductor_api=None):
        """Removes a fixed ip from instance from specified network."""

        instance_type = instance_types.extract_instance_type(instance)
        args = {'instance_id': instance['uuid'],
                'rxtx_factor': instance_type['rxtx_factor'],
                'host': instance['host'],
                'address': address}
        self.network_rpcapi.remove_fixed_ip_from_instance(context, **args)
コード例 #26
0
ファイル: api.py プロジェクト: Balakrishnan-Vivek/nova
    def remove_fixed_ip_from_instance(self, context, instance, address,
                                      conductor_api=None):
        """Removes a fixed ip from instance from specified network."""

        instance_type = instance_types.extract_instance_type(instance)
        args = {'instance_id': instance['uuid'],
                'rxtx_factor': instance_type['rxtx_factor'],
                'host': instance['host'],
                'address': address}
        self.network_rpcapi.remove_fixed_ip_from_instance(context, **args)
コード例 #27
0
ファイル: cells_api.py プロジェクト: tr3buchet/nova
    def resize(self, context, instance, *args, **kwargs):
        """Resize (ie, migrate) a running instance.

        If flavor_id is None, the process is considered a migration, keeping
        the original flavor_id. If flavor_id is not None, the instance should
        be migrated to a new host and resized to the new flavor_id.
        """
        super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)

        # NOTE(johannes): If we get to this point, then we know the
        # specified flavor_id is valid and exists. We'll need to load
        # it again, but that should be safe.

        old_instance_type = instance_types.extract_instance_type(instance)

        flavor_id = kwargs.get("flavor_id")

        if not flavor_id:
            new_instance_type = old_instance_type
        else:
            new_instance_type = instance_types.extract_instance_type(instance, "new_")

        # NOTE(johannes): Later, when the resize is confirmed or reverted,
        # the superclass implementations of those methods will need access
        # to a local migration record for quota reasons. We don't need
        # source and/or destination information, just the old and new
        # instance_types. Status is set to 'finished' since nothing else
        # will update the status along the way.
        self.db.migration_create(
            context.elevated(),
            {
                "instance_uuid": instance["uuid"],
                "old_instance_type_id": old_instance_type["id"],
                "new_instance_type_id": new_instance_type["id"],
                "status": "finished",
            },
        )

        # FIXME(comstud): pass new instance_type object down to a method
        # that'll unfold it
        self._cast_to_cells(context, instance, "resize", *args, **kwargs)
コード例 #28
0
def get_partition_sizes(instance):
    instance_type = instance_types.extract_instance_type(instance)
    root_mb = instance_type['root_gb'] * 1024
    swap_mb = instance_type['swap']

    # NOTE(deva): For simpler code paths on the deployment side,
    #             we always create a swap partition. If the flavor
    #             does not specify any swap, we default to 1MB
    if swap_mb < 1:
        swap_mb = 1

    return (root_mb, swap_mb)
コード例 #29
0
ファイル: pxe.py プロジェクト: CiscoAS/nova
def get_partition_sizes(instance):
    instance_type = instance_types.extract_instance_type(instance)
    root_mb = instance_type['root_gb'] * 1024
    swap_mb = instance_type['swap']

    # NOTE(deva): For simpler code paths on the deployment side,
    #             we always create a swap partition. If the flavor
    #             does not specify any swap, we default to 1MB
    if swap_mb < 1:
        swap_mb = 1

    return (root_mb, swap_mb)
コード例 #30
0
    def resize(self, context, instance, *args, **kwargs):
        """Resize (ie, migrate) a running instance.

        If flavor_id is None, the process is considered a migration, keeping
        the original flavor_id. If flavor_id is not None, the instance should
        be migrated to a new host and resized to the new flavor_id.
        """
        super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)

        # NOTE(johannes): If we get to this point, then we know the
        # specified flavor_id is valid and exists. We'll need to load
        # it again, but that should be safe.

        old_instance_type = instance_types.extract_instance_type(instance)

        flavor_id = kwargs.get('flavor_id')

        if not flavor_id:
            new_instance_type = old_instance_type
        else:
            new_instance_type = instance_types.extract_instance_type(
                instance, 'new_')

        # NOTE(johannes): Later, when the resize is confirmed or reverted,
        # the superclass implementations of those methods will need access
        # to a local migration record for quota reasons. We don't need
        # source and/or destination information, just the old and new
        # instance_types. Status is set to 'finished' since nothing else
        # will update the status along the way.
        self.db.migration_create(
            context.elevated(), {
                'instance_uuid': instance['uuid'],
                'old_instance_type_id': old_instance_type['id'],
                'new_instance_type_id': new_instance_type['id'],
                'status': 'finished'
            })

        # FIXME(comstud): pass new instance_type object down to a method
        # that'll unfold it
        self._cast_to_cells(context, instance, 'resize', *args, **kwargs)
コード例 #31
0
ファイル: resource_tracker.py プロジェクト: emagana/nova
    def _update_usage_from_migration(self, instance, resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host and
                    migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host and
                    migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                migration['old_instance_type_id']):
                itype = instance_types.extract_instance_type(instance)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = instance_types.extract_instance_type(instance, 'old_')

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = instance_types.extract_instance_type(instance, 'new_')

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = instance_types.extract_instance_type(instance, 'old_')

        if itype:
            self.stats.update_stats_for_migration(itype)
            self._update_usage(resources, itype)
            resources['stats'] = self.stats
            self.tracked_migrations[uuid] = (migration, itype)
コード例 #32
0
    def _update_usage_from_migration(self, instance, resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host
                    and migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host
                    and migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                    migration['old_instance_type_id']):
                itype = instance_types.extract_instance_type(instance)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = instance_types.extract_instance_type(instance, 'old_')

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = instance_types.extract_instance_type(instance, 'new_')

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = instance_types.extract_instance_type(instance, 'old_')

        if itype:
            self.stats.update_stats_for_migration(itype)
            self._update_usage(resources, itype)
            resources['stats'] = self.stats
            self.tracked_migrations[uuid] = (migration, itype)
コード例 #33
0
ファイル: resource_tracker.py プロジェクト: mygoda/openstack
    def _get_instance_type(self, context, instance, prefix, instance_type_id=None):
        """Get the instance type from sys metadata if it's stashed.  If not,
        fall back to fetching it via the conductor API.

        See bug 1164110
        """
        if not instance_type_id:
            instance_type_id = instance["instance_type_id"]

        try:
            return instance_types.extract_instance_type(instance, prefix)
        except KeyError:
            return self.conductor_api.instance_type_get(context, instance_type_id)
コード例 #34
0
ファイル: blockdev.py プロジェクト: AnyBucket/nova
    def create_volume_from_image(self, context, instance, image_id):
        """Creates a Logical Volume and copies the specified image to it

        :param context: nova context used to retrieve image from glance
        :param instance: instance to create the volume for
        :param image_id: image_id reference used to locate image in glance
        :returns: dictionary with the name of the created
                  Logical Volume device in 'device_name' key
        """

        file_name = '.'.join([image_id, 'gz'])
        file_path = os.path.join(CONF.powervm_img_local_path,
                                 file_name)

        if not os.path.isfile(file_path):
            LOG.debug(_("Fetching image '%s' from glance") % image_id)
            images.fetch(context, image_id, file_path,
                        instance['user_id'],
                        instance['project_id'])
        else:
            LOG.debug((_("Using image found at '%s'") % file_path))

        LOG.debug(_("Ensuring image '%s' exists on IVM") % file_path)
        remote_path = CONF.powervm_img_remote_path
        remote_file_name, size = self._copy_image_file(file_path, remote_path)

        # calculate root device size in bytes
        # we respect the minimum root device size in constants
        instance_type = instance_types.extract_instance_type(instance)
        size_gb = max(instance_type['root_gb'], constants.POWERVM_MIN_ROOT_GB)
        size = size_gb * 1024 * 1024 * 1024

        try:
            LOG.debug(_("Creating logical volume of size %s bytes") % size)
            disk_name = self._create_logical_volume(size)

            LOG.debug(_("Copying image to the device '%s'") % disk_name)
            self._copy_file_to_device(remote_file_name, disk_name)
        except Exception:
            LOG.error(_("Error while creating logical volume from image. "
                        "Will attempt cleanup."))
            # attempt cleanup of logical volume before re-raising exception
            with excutils.save_and_reraise_exception():
                try:
                    self.delete_volume(disk_name)
                except Exception:
                    msg = _('Error while attempting cleanup of failed '
                            'deploy to logical volume.')
                    LOG.exception(msg)

        return {'device_name': disk_name}
コード例 #35
0
ファイル: test_instance_types.py プロジェクト: blahRus/nova
    def _test_extract_instance_type(self, prefix):
        instance_type = instance_types.get_default_instance_type()

        metadata = {}
        instance_types.save_instance_type_info(metadata, instance_type, prefix)
        instance = {'system_metadata': self._dict_to_metadata(metadata)}
        _instance_type = instance_types.extract_instance_type(instance, prefix)

        props = instance_types.system_metadata_instance_type_props.keys()
        for key in instance_type.keys():
            if key not in props:
                del instance_type[key]

        self.assertEqual(instance_type, _instance_type)
コード例 #36
0
    def _create_migration(self, context, instance, instance_type):
        """Create a migration record for the upcoming resize.  This should
        be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
        claim will not be lost if the audit process starts.
        """
        old_instance_type = instance_types.extract_instance_type(instance)

        return self.conductor_api.migration_create(context, instance,
                {'dest_compute': self.host,
                 'dest_node': self.nodename,
                 'dest_host': self.driver.get_host_ip_addr(),
                 'old_instance_type_id': old_instance_type['id'],
                 'new_instance_type_id': instance_type['id'],
                 'status': 'pre-migrating'})
コード例 #37
0
    def create_volume_from_image(self, context, instance, image_id):
        """Creates a Logical Volume and copies the specified image to it

        :param context: nova context used to retrieve image from glance
        :param instance: instance to create the volume for
        :param image_id: image_id reference used to locate image in glance
        :returns: dictionary with the name of the created
                  Logical Volume device in 'device_name' key
        """

        file_name = '.'.join([image_id, 'gz'])
        file_path = os.path.join(CONF.powervm_img_local_path, file_name)

        if not os.path.isfile(file_path):
            LOG.debug(_("Fetching image '%s' from glance") % image_id)
            images.fetch(context, image_id, file_path, instance['user_id'],
                         instance['project_id'])
        else:
            LOG.debug((_("Using image found at '%s'") % file_path))

        LOG.debug(_("Ensuring image '%s' exists on IVM") % file_path)
        remote_path = CONF.powervm_img_remote_path
        remote_file_name, size = self._copy_image_file(file_path, remote_path)

        # calculate root device size in bytes
        # we respect the minimum root device size in constants
        instance_type = instance_types.extract_instance_type(instance)
        size_gb = max(instance_type['root_gb'], constants.POWERVM_MIN_ROOT_GB)
        size = size_gb * 1024 * 1024 * 1024

        try:
            LOG.debug(_("Creating logical volume of size %s bytes") % size)
            disk_name = self._create_logical_volume(size)

            LOG.debug(_("Copying image to the device '%s'") % disk_name)
            self._copy_file_to_device(remote_file_name, disk_name)
        except Exception:
            LOG.error(
                _("Error while creating logical volume from image. "
                  "Will attempt cleanup."))
            # attempt cleanup of logical volume before re-raising exception
            with excutils.save_and_reraise_exception():
                try:
                    self.delete_volume(disk_name)
                except Exception:
                    msg = _('Error while attempting cleanup of failed '
                            'deploy to logical volume.')
                    LOG.exception(msg)

        return {'device_name': disk_name}
    def _test_extract_instance_type(self, prefix):
        instance_type = instance_types.get_default_instance_type()

        metadata = {}
        instance_types.save_instance_type_info(metadata, instance_type, prefix)
        instance = {"system_metadata": self._dict_to_metadata(metadata)}
        _instance_type = instance_types.extract_instance_type(instance, prefix)

        props = instance_types.system_metadata_instance_type_props.keys()
        for key in instance_type.keys():
            if key not in props:
                del instance_type[key]

        self.assertEqual(instance_type, _instance_type)
コード例 #39
0
ファイル: resource_tracker.py プロジェクト: AnyBucket/nova
    def _create_migration(self, context, instance, instance_type):
        """Create a migration record for the upcoming resize.  This should
        be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
        claim will not be lost if the audit process starts.
        """
        old_instance_type = instance_types.extract_instance_type(instance)

        return self.conductor_api.migration_create(context, instance,
                {'dest_compute': self.host,
                 'dest_node': self.nodename,
                 'dest_host': self.driver.get_host_ip_addr(),
                 'old_instance_type_id': old_instance_type['id'],
                 'new_instance_type_id': instance_type['id'],
                 'status': 'pre-migrating'})
コード例 #40
0
    def _live_migration_dest_check(self,
                                   context,
                                   instance_ref,
                                   dest,
                                   ignore_hosts=None):
        """Live migration check routine (for destination host).

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param ignore_hosts: hosts that should be avoided as dest host
        """

        # If dest is not specified, have scheduler pick one.
        if dest is None:
            instance_type = instance_types.extract_instance_type(instance_ref)
            image = self.image_service.show(context, instance_ref['image_ref'])
            request_spec = {
                'instance_properties': instance_ref,
                'instance_type': instance_type,
                'instance_uuids': [instance_ref['uuid']],
                'image': image
            }
            filter_properties = {'ignore_hosts': ignore_hosts}
            return self.select_hosts(context, request_spec,
                                     filter_properties)[0]

        # Checking whether The host where instance is running
        # and dest is not same.
        src = instance_ref['host']
        if dest == src:
            raise exception.UnableToMigrateToSelf(
                instance_id=instance_ref['uuid'], host=dest)

        # Checking dest exists and compute node.
        try:
            dservice_ref = db.service_get_by_compute_host(context, dest)
        except exception.NotFound:
            raise exception.ComputeServiceUnavailable(host=dest)

        # Checking dest host is alive.
        if not self.servicegroup_api.service_is_up(dservice_ref):
            raise exception.ComputeServiceUnavailable(host=dest)

        # Check memory requirements
        self._assert_compute_node_has_enough_memory(context, instance_ref,
                                                    dest)

        return dest
コード例 #41
0
    def _get_instance_type(self, context, instance, prefix,
            instance_type_id=None):
        """Get the instance type from sys metadata if it's stashed.  If not,
        fall back to fetching it via the conductor API.

        See bug 1164110
        """
        if not instance_type_id:
            instance_type_id = instance['instance_type_id']

        try:
            return instance_types.extract_instance_type(instance, prefix)
        except KeyError:
            return self.conductor_api.instance_type_get(context,
                    instance_type_id)
コード例 #42
0
ファイル: servers.py プロジェクト: zhangheng1442/openstack
 def _get_flavor(self, request, instance):
     instance_type = instance_types.extract_instance_type(instance)
     if not instance_type:
         LOG.warn(_("Instance has had its instance_type removed "
                 "from the DB"), instance=instance)
         return {}
     flavor_id = instance_type["flavorid"]
     flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
                                                               flavor_id,
                                                               "flavors")
     return {
         "id": str(flavor_id),
         "links": [{
             "rel": "bookmark",
             "href": flavor_bookmark,
         }],
     }
コード例 #43
0
ファイル: servers.py プロジェクト: zestrada/nova-cs498cc
 def _get_flavor(self, request, instance):
     instance_type = instance_types.extract_instance_type(instance)
     if not instance_type:
         LOG.warn(_("Instance has had its instance_type removed "
                 "from the DB"), instance=instance)
         return {}
     flavor_id = instance_type["flavorid"]
     flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
                                                               flavor_id,
                                                               "flavors")
     return {
         "id": str(flavor_id),
         "links": [{
             "rel": "bookmark",
             "href": flavor_bookmark,
         }],
     }
コード例 #44
0
ファイル: api.py プロジェクト: Balakrishnan-Vivek/nova
    def migrate_instance_finish(self, context, instance, migration):
        """Finish migrating the network of an instance."""
        instance_type = instance_types.extract_instance_type(instance)
        args = dict(
            instance_uuid=instance['uuid'],
            rxtx_factor=instance_type['rxtx_factor'],
            project_id=instance['project_id'],
            source_compute=migration['source_compute'],
            dest_compute=migration['dest_compute'],
            floating_addresses=None,
        )

        if self._is_multi_host(context, instance):
            args['floating_addresses'] = \
                self._get_floating_ip_addresses(context, instance)
            args['host'] = migration['dest_compute']

        self.network_rpcapi.migrate_instance_finish(context, **args)
コード例 #45
0
ファイル: api.py プロジェクト: zestrada/nova-cs498cc
    def migrate_instance_finish(self, context, instance, migration):
        """Finish migrating the network of an instance."""
        instance_type = instance_types.extract_instance_type(instance)
        args = dict(
            instance_uuid=instance['uuid'],
            rxtx_factor=instance_type['rxtx_factor'],
            project_id=instance['project_id'],
            source_compute=migration['source_compute'],
            dest_compute=migration['dest_compute'],
            floating_addresses=None,
        )

        if self._is_multi_host(context, instance):
            args['floating_addresses'] = \
                self._get_floating_ip_addresses(context, instance)
            args['host'] = migration['dest_compute']

        self.network_rpcapi.migrate_instance_finish(context, **args)
コード例 #46
0
ファイル: driver.py プロジェクト: CiscoAS/nova
    def _live_migration_dest_check(self, context, instance_ref, dest,
                                   ignore_hosts=None):
        """Live migration check routine (for destination host).

        :param context: security context
        :param instance_ref: nova.db.sqlalchemy.models.Instance object
        :param dest: destination host
        :param ignore_hosts: hosts that should be avoided as dest host
        """

        # If dest is not specified, have scheduler pick one.
        if dest is None:
            instance_type = instance_types.extract_instance_type(instance_ref)
            image = self.image_service.show(context, instance_ref['image_ref'])
            request_spec = {'instance_properties': instance_ref,
                            'instance_type': instance_type,
                            'instance_uuids': [instance_ref['uuid']],
                            'image': image}
            filter_properties = {'ignore_hosts': ignore_hosts}
            return self.select_hosts(context, request_spec,
                                     filter_properties)[0]

        # Checking whether The host where instance is running
        # and dest is not same.
        src = instance_ref['host']
        if dest == src:
            raise exception.UnableToMigrateToSelf(
                    instance_id=instance_ref['uuid'], host=dest)

        # Checking dest exists and compute node.
        try:
            dservice_ref = db.service_get_by_compute_host(context, dest)
        except exception.NotFound:
            raise exception.ComputeServiceUnavailable(host=dest)

        # Checking dest host is alive.
        if not self.servicegroup_api.service_is_up(dservice_ref):
            raise exception.ComputeServiceUnavailable(host=dest)

        # Check memory requirements
        self._assert_compute_node_has_enough_memory(context,
                                                   instance_ref, dest)

        return dest
コード例 #47
0
    def list(self, host=None):
        """Show a list of all instances."""

        print ("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
               "  %-10s %-10s %-10s %-5s" % (_('instance'),
                                             _('node'),
                                             _('type'),
                                             _('state'),
                                             _('launched'),
                                             _('image'),
                                             _('kernel'),
                                             _('ramdisk'),
                                             _('project'),
                                             _('user'),
                                             _('zone'),
                                             _('index')))

        if host is None:
            instances = db.instance_get_all(context.get_admin_context())
        else:
            instances = db.instance_get_all_by_host(
                           context.get_admin_context(), host)

        for instance in instances:
            instance_type = instance_types.extract_instance_type(instance)
            print ("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
                   " %-10s %-10s %-10s %-5d" % (instance['display_name'],
                                                instance['host'],
                                                instance_type['name'],
                                                instance['vm_state'],
                                                instance['launched_at'],
                                                instance['image_ref'],
                                                instance['kernel_id'],
                                                instance['ramdisk_id'],
                                                instance['project_id'],
                                                instance['user_id'],
                                                instance['availability_zone'],
                                                instance['launch_index']))
コード例 #48
0
    def _get_flavor(self, context, compute_api, instance, flavors_cache):
        """Get flavor information from the instance's system_metadata,
        allowing a fallback to lookup by-id for deleted instances only"""
        try:
            return instance_types.extract_instance_type(instance)
        except KeyError:
            if not instance['deleted']:
                # Only support the fallback mechanism for deleted instances
                # that would have been skipped by migration #153
                raise

        flavor_type = instance['instance_type_id']
        if flavor_type in flavors_cache:
            return flavors_cache[flavor_type]

        try:
            it_ref = compute_api.get_instance_type(context, flavor_type)
            flavors_cache[flavor_type] = it_ref
        except exception.InstanceTypeNotFound:
            # can't bill if there is no instance type
            it_ref = None

        return it_ref
コード例 #49
0
    def _get_flavor(self, context, compute_api, instance, flavors_cache):
        """Get flavor information from the instance's system_metadata,
        allowing a fallback to lookup by-id for deleted instances only"""
        try:
            return instance_types.extract_instance_type(instance)
        except KeyError:
            if not instance['deleted']:
                # Only support the fallback mechanism for deleted instances
                # that would have been skipped by migration #153
                raise

        flavor_type = instance['instance_type_id']
        if flavor_type in flavors_cache:
            return flavors_cache[flavor_type]

        try:
            it_ref = compute_api.get_instance_type(context, flavor_type)
            flavors_cache[flavor_type] = it_ref
        except exception.InstanceTypeNotFound:
            # can't bill if there is no instance type
            it_ref = None

        return it_ref
コード例 #50
0
ファイル: api.py プロジェクト: bopopescu/novatest
 def _populate_quantum_extension_values(self, instance, port_req_body):
     self._refresh_quantum_extensions_cache()
     if 'nvp-qos' in self.extensions:
         instance_type = instance_types.extract_instance_type(instance)
         rxtx_factor = instance_type.get('rxtx_factor')
         port_req_body['port']['rxtx_factor'] = rxtx_factor
コード例 #51
0
ファイル: base.py プロジェクト: Guoweiwei1130/openstack
    def get_ec2_metadata(self, version):
        if version == "latest":
            version = VERSIONS[-1]

        if version not in VERSIONS:
            raise InvalidMetadataVersion(version)

        hostname = self._get_hostname()

        floating_ips = self.ip_info['floating_ips']
        floating_ip = floating_ips and floating_ips[0] or ''

        fmt_sgroups = [x['name'] for x in self.security_groups]

        meta_data = {
            'ami-id': self.ec2_ids['ami-id'],
            'ami-launch-index': self.instance['launch_index'],
            'ami-manifest-path': 'FIXME',
            'instance-id': self.ec2_ids['instance-id'],
            'hostname': hostname,
            'local-ipv4': self.address,
            'reservation-id': self.instance['reservation_id'],
            'security-groups': fmt_sgroups}

        # public keys are strangely rendered in ec2 metadata service
        #  meta-data/public-keys/ returns '0=keyname' (with no trailing /)
        # and only if there is a public key given.
        # '0=keyname' means there is a normally rendered dict at
        #  meta-data/public-keys/0
        #
        # meta-data/public-keys/ : '0=%s' % keyname
        # meta-data/public-keys/0/ : 'openssh-key'
        # meta-data/public-keys/0/openssh-key : '%s' % publickey
        if self.instance['key_name']:
            meta_data['public-keys'] = {
                '0': {'_name': "0=" + self.instance['key_name'],
                      'openssh-key': self.instance['key_data']}}

        if self._check_version('2007-01-19', version):
            meta_data['local-hostname'] = hostname
            meta_data['public-hostname'] = hostname
            meta_data['public-ipv4'] = floating_ip

        if False and self._check_version('2007-03-01', version):
            # TODO(vish): store product codes
            meta_data['product-codes'] = []

        if self._check_version('2007-08-29', version):
            instance_type = instance_types.extract_instance_type(self.instance)
            meta_data['instance-type'] = instance_type['name']

        if False and self._check_version('2007-10-10', version):
            # TODO(vish): store ancestor ids
            meta_data['ancestor-ami-ids'] = []

        if self._check_version('2007-12-15', version):
            meta_data['block-device-mapping'] = self.mappings
            if 'kernel-id' in self.ec2_ids:
                meta_data['kernel-id'] = self.ec2_ids['kernel-id']
            if 'ramdisk-id' in self.ec2_ids:
                meta_data['ramdisk-id'] = self.ec2_ids['ramdisk-id']

        if self._check_version('2008-02-01', version):
            meta_data['placement'] = {'availability-zone':
                                      self.availability_zone}

        if self._check_version('2008-09-01', version):
            meta_data['instance-action'] = 'none'

        data = {'meta-data': meta_data}
        if self.userdata_raw is not None:
            data['user-data'] = self.userdata_raw

        return data
コード例 #52
0
def get_disk_mapping(virt_type, instance,
                     disk_bus, cdrom_bus,
                     block_device_info=None,
                     image_meta=None, rescue=False):
    """Determine how to map default disks to the virtual machine.

       This is about figuring out whether the default 'disk',
       'disk.local', 'disk.swap' and 'disk.config' images have
       been overriden by the block device mapping.

       Returns the guest disk mapping for the devices."""

    inst_type = instance_types.extract_instance_type(instance)

    mapping = {}

    if virt_type == "lxc":
        # NOTE(zul): This information is not used by the libvirt driver
        # however we need to populate mapping so the image can be
        # created when the instance is started. This can
        # be removed when we convert LXC to use block devices.
        root_disk_bus = disk_bus
        root_device_type = 'disk'

        root_info = get_next_disk_info(mapping,
                                       root_disk_bus,
                                       root_device_type)
        mapping['root'] = root_info
        mapping['disk'] = root_info

        return mapping

    if rescue:
        rescue_info = get_next_disk_info(mapping,
                                         disk_bus)
        mapping['disk.rescue'] = rescue_info
        mapping['root'] = rescue_info

        os_info = get_next_disk_info(mapping,
                                     disk_bus)
        mapping['disk'] = os_info

        return mapping

    if image_meta and image_meta.get('disk_format') == 'iso':
        root_disk_bus = cdrom_bus
        root_device_type = 'cdrom'
    else:
        root_disk_bus = disk_bus
        root_device_type = 'disk'

    root_device_name = driver.block_device_info_get_root(block_device_info)
    if root_device_name is not None:
        root_device = block_device.strip_dev(root_device_name)
        root_info = {'bus': get_disk_bus_for_disk_dev(virt_type,
                                                      root_device),
                     'dev': root_device,
                     'type': root_device_type}
    else:
        root_info = get_next_disk_info(mapping,
                                       root_disk_bus,
                                       root_device_type)
    mapping['root'] = root_info
    if not block_device.volume_in_mapping(root_info['dev'],
                                          block_device_info):
        mapping['disk'] = root_info

    eph_info = get_next_disk_info(mapping,
                                  disk_bus)
    ephemeral_device = False
    if not (block_device.volume_in_mapping(eph_info['dev'],
                                           block_device_info) or
            0 in [eph['num'] for eph in
                  driver.block_device_info_get_ephemerals(
                block_device_info)]):
        if instance['ephemeral_gb'] > 0:
            ephemeral_device = True

    if ephemeral_device:
        mapping['disk.local'] = eph_info

    for eph in driver.block_device_info_get_ephemerals(
        block_device_info):
        disk_dev = block_device.strip_dev(eph['device_name'])
        disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)

        mapping[get_eph_disk(eph)] = {'bus': disk_bus,
                                      'dev': disk_dev,
                                      'type': 'disk'}

    swap = driver.block_device_info_get_swap(block_device_info)
    if driver.swap_is_usable(swap):
        disk_dev = block_device.strip_dev(swap['device_name'])
        disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)

        mapping['disk.swap'] = {'bus': disk_bus,
                                'dev': disk_dev,
                                'type': 'disk'}
    elif inst_type['swap'] > 0:
        swap_info = get_next_disk_info(mapping,
                                       disk_bus)
        if not block_device.volume_in_mapping(swap_info['dev'],
                                              block_device_info):
            mapping['disk.swap'] = swap_info

    block_device_mapping = driver.block_device_info_get_mapping(
        block_device_info)

    for vol in block_device_mapping:
        disk_dev = vol['mount_device'].rpartition("/")[2]
        disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)

        mapping[vol['mount_device']] = {'bus': disk_bus,
                                        'dev': disk_dev,
                                        'type': 'disk'}

    if configdrive.enabled_for(instance):
        config_info = get_next_disk_info(mapping,
                                         disk_bus,
                                         last_device=True)
        mapping['disk.config'] = config_info

    return mapping
コード例 #53
0
    def _tenant_usages_for_period(self, context, period_start,
                                  period_stop, tenant_id=None, detailed=True):

        compute_api = api.API()
        instances = compute_api.get_active_by_window(context,
                                                     period_start,
                                                     period_stop,
                                                     tenant_id)
        rval = {}
        flavors = {}

        for instance in instances:
            info = {}
            info['hours'] = self._hours_for(instance,
                                            period_start,
                                            period_stop)
            flavor = instance_types.extract_instance_type(instance)

            info['instance_id'] = instance['uuid']
            info['name'] = instance['display_name']

            info['memory_mb'] = flavor['memory_mb']
            info['local_gb'] = flavor['root_gb'] + flavor['ephemeral_gb']
            info['vcpus'] = flavor['vcpus']

            info['tenant_id'] = instance['project_id']

            info['flavor'] = flavor['name']

            info['started_at'] = instance['launched_at']

            info['ended_at'] = instance['terminated_at']

            if info['ended_at']:
                info['state'] = 'terminated'
            else:
                info['state'] = instance['vm_state']

            now = timeutils.utcnow()

            if info['state'] == 'terminated':
                delta = info['ended_at'] - info['started_at']
            else:
                delta = now - info['started_at']

            info['uptime'] = delta.days * 24 * 3600 + delta.seconds

            if info['tenant_id'] not in rval:
                summary = {}
                summary['tenant_id'] = info['tenant_id']
                if detailed:
                    summary['server_usages'] = []
                summary['total_local_gb_usage'] = 0
                summary['total_vcpus_usage'] = 0
                summary['total_memory_mb_usage'] = 0
                summary['total_hours'] = 0
                summary['start'] = period_start
                summary['stop'] = period_stop
                rval[info['tenant_id']] = summary

            summary = rval[info['tenant_id']]
            summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
            summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
            summary['total_memory_mb_usage'] += (info['memory_mb'] *
                                                 info['hours'])

            summary['total_hours'] += info['hours']
            if detailed:
                summary['server_usages'].append(info)

        return rval.values()
コード例 #54
0
 def _populate_quantum_extension_values(self, instance, port_req_body):
     self._refresh_quantum_extensions_cache()
     if 'nvp-qos' in self.extensions:
         instance_type = instance_types.extract_instance_type(instance)
         rxtx_factor = instance_type.get('rxtx_factor')
         port_req_body['port']['rxtx_factor'] = rxtx_factor
コード例 #55
0
ファイル: base.py プロジェクト: bopopescu/NovaOrc
    def get_ec2_metadata(self, version):
        if version == "latest":
            version = VERSIONS[-1]

        if version not in VERSIONS:
            raise InvalidMetadataVersion(version)

        hostname = self._get_hostname()

        floating_ips = self.ip_info['floating_ips']
        floating_ip = floating_ips and floating_ips[0] or ''

        fmt_sgroups = [x['name'] for x in self.security_groups]

        meta_data = {
            'ami-id': self.ec2_ids['ami-id'],
            'ami-launch-index': self.instance['launch_index'],
            'ami-manifest-path': 'FIXME',
            'instance-id': self.ec2_ids['instance-id'],
            'hostname': hostname,
            'local-ipv4': self.address,
            'reservation-id': self.instance['reservation_id'],
            'security-groups': fmt_sgroups
        }

        # public keys are strangely rendered in ec2 metadata service
        #  meta-data/public-keys/ returns '0=keyname' (with no trailing /)
        # and only if there is a public key given.
        # '0=keyname' means there is a normally rendered dict at
        #  meta-data/public-keys/0
        #
        # meta-data/public-keys/ : '0=%s' % keyname
        # meta-data/public-keys/0/ : 'openssh-key'
        # meta-data/public-keys/0/openssh-key : '%s' % publickey
        if self.instance['key_name']:
            meta_data['public-keys'] = {
                '0': {
                    '_name': "0=" + self.instance['key_name'],
                    'openssh-key': self.instance['key_data']
                }
            }

        if self._check_version('2007-01-19', version):
            meta_data['local-hostname'] = hostname
            meta_data['public-hostname'] = hostname
            meta_data['public-ipv4'] = floating_ip

        if False and self._check_version('2007-03-01', version):
            # TODO(vish): store product codes
            meta_data['product-codes'] = []

        if self._check_version('2007-08-29', version):
            instance_type = instance_types.extract_instance_type(self.instance)
            meta_data['instance-type'] = instance_type['name']

        if False and self._check_version('2007-10-10', version):
            # TODO(vish): store ancestor ids
            meta_data['ancestor-ami-ids'] = []

        if self._check_version('2007-12-15', version):
            meta_data['block-device-mapping'] = self.mappings
            if 'kernel-id' in self.ec2_ids:
                meta_data['kernel-id'] = self.ec2_ids['kernel-id']
            if 'ramdisk-id' in self.ec2_ids:
                meta_data['ramdisk-id'] = self.ec2_ids['ramdisk-id']

        if self._check_version('2008-02-01', version):
            meta_data['placement'] = {
                'availability-zone': self.availability_zone
            }

        if self._check_version('2008-09-01', version):
            meta_data['instance-action'] = 'none'

        data = {'meta-data': meta_data}
        if self.userdata_raw is not None:
            data['user-data'] = self.userdata_raw

        return data
コード例 #56
0
ファイル: test_scheduler.py プロジェクト: achbarou/nova
    def test_live_migration_auto_set_dest(self):
        instance = self._live_migration_instance()

        # Confirm scheduler picks target host if none given.
        self.mox.StubOutWithMock(instance_types, 'extract_instance_type')
        self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
        self.mox.StubOutWithMock(self.driver, 'select_hosts')
        self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
        self.mox.StubOutWithMock(rpc, 'call')
        self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration')

        dest = None
        block_migration = False
        disk_over_commit = False
        request_spec = {'instance_properties': instance,
                        'instance_type': {},
                        'instance_uuids': [instance['uuid']],
                        'image': self.image_service.show(self.context,
                                                         instance['image_ref'])
                        }

        self.driver._live_migration_src_check(self.context, instance)

        instance_types.extract_instance_type(
                instance).MultipleTimes().AndReturn({})

        # First selected host raises exception.InvalidHypervisorType
        self.driver.select_hosts(self.context, request_spec,
                {'ignore_hosts': [instance['host']]}).AndReturn(['fake_host2'])
        self.driver._live_migration_common_check(self.context, instance,
                'fake_host2').AndRaise(exception.InvalidHypervisorType())

        # Second selected host raises exception.InvalidCPUInfo
        self.driver.select_hosts(self.context, request_spec,
                {'ignore_hosts': [instance['host'],
                                  'fake_host2']}).AndReturn(['fake_host3'])
        self.driver._live_migration_common_check(self.context, instance,
                                                 'fake_host3')
        rpc.call(self.context, "compute.fake_host3",
                   {"method": 'check_can_live_migrate_destination',
                    "namespace": None,
                    "args": {'instance': instance,
                             'block_migration': block_migration,
                             'disk_over_commit': disk_over_commit},
                    "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
                 None).AndRaise(exception.InvalidCPUInfo(reason=""))

        # Third selected host pass all checks
        self.driver.select_hosts(self.context, request_spec,
                {'ignore_hosts': [instance['host'],
                                  'fake_host2',
                                  'fake_host3']}).AndReturn(['fake_host4'])
        self.driver._live_migration_common_check(self.context, instance,
                                                 'fake_host4')
        rpc.call(self.context, "compute.fake_host4",
                   {"method": 'check_can_live_migrate_destination',
                    "namespace": None,
                    "args": {'instance': instance,
                             'block_migration': block_migration,
                             'disk_over_commit': disk_over_commit},
                    "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
                 None).AndReturn({})
        self.driver.compute_rpcapi.live_migration(self.context,
                host=instance['host'], instance=instance, dest='fake_host4',
                block_migration=block_migration, migrate_data={})

        self.mox.ReplayAll()
        result = self.driver.schedule_live_migration(self.context,
                instance=instance, dest=dest,
                block_migration=block_migration,
                disk_over_commit=disk_over_commit)
        self.assertEqual(result, None)
コード例 #57
0
    def _tenant_usages_for_period(self,
                                  context,
                                  period_start,
                                  period_stop,
                                  tenant_id=None,
                                  detailed=True):

        compute_api = api.API()
        instances = compute_api.get_active_by_window(context, period_start,
                                                     period_stop, tenant_id)
        rval = {}
        flavors = {}

        for instance in instances:
            info = {}
            info['hours'] = self._hours_for(instance, period_start,
                                            period_stop)
            flavor = instance_types.extract_instance_type(instance)

            info['instance_id'] = instance['uuid']
            info['name'] = instance['display_name']

            info['memory_mb'] = flavor['memory_mb']
            info['local_gb'] = flavor['root_gb'] + flavor['ephemeral_gb']
            info['vcpus'] = flavor['vcpus']

            info['tenant_id'] = instance['project_id']

            info['flavor'] = flavor['name']

            info['started_at'] = instance['launched_at']

            info['ended_at'] = instance['terminated_at']

            if info['ended_at']:
                info['state'] = 'terminated'
            else:
                info['state'] = instance['vm_state']

            now = timeutils.utcnow()

            if info['state'] == 'terminated':
                delta = info['ended_at'] - info['started_at']
            else:
                delta = now - info['started_at']

            info['uptime'] = delta.days * 24 * 3600 + delta.seconds

            if info['tenant_id'] not in rval:
                summary = {}
                summary['tenant_id'] = info['tenant_id']
                if detailed:
                    summary['server_usages'] = []
                summary['total_local_gb_usage'] = 0
                summary['total_vcpus_usage'] = 0
                summary['total_memory_mb_usage'] = 0
                summary['total_hours'] = 0
                summary['start'] = period_start
                summary['stop'] = period_stop
                rval[info['tenant_id']] = summary

            summary = rval[info['tenant_id']]
            summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
            summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
            summary['total_memory_mb_usage'] += (info['memory_mb'] *
                                                 info['hours'])

            summary['total_hours'] += info['hours']
            if detailed:
                summary['server_usages'].append(info)

        return rval.values()
コード例 #58
0
def info_from_instance(context, instance_ref, network_info, system_metadata,
                       **kw):
    """Get detailed instance information for an instance which is common to all
    notifications.

    :param network_info: network_info provided if not None
    :param system_metadata: system_metadata DB entries for the instance,
    if not None.  *NOTE*: Currently unused here in trunk, but needed for
    potential custom modifications.
    """
    def null_safe_str(s):
        return str(s) if s else ''

    image_ref_url = glance.generate_image_url(instance_ref['image_ref'])

    instance_type = instance_types.extract_instance_type(instance_ref)
    instance_type_name = instance_type.get('name', '')

    if system_metadata is None:
        system_metadata = utils.metadata_to_dict(
            instance_ref['system_metadata'])

    instance_info = dict(
        # Owner properties
        tenant_id=instance_ref['project_id'],
        user_id=instance_ref['user_id'],

        # Identity properties
        instance_id=instance_ref['uuid'],
        display_name=instance_ref['display_name'],
        reservation_id=instance_ref['reservation_id'],
        hostname=instance_ref['hostname'],

        # Type properties
        instance_type=instance_type_name,
        instance_type_id=instance_ref['instance_type_id'],
        architecture=instance_ref['architecture'],

        # Capacity properties
        memory_mb=instance_ref['memory_mb'],
        disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'],
        vcpus=instance_ref['vcpus'],
        # Note(dhellmann): This makes the disk_gb value redundant, but
        # we are keeping it for backwards-compatibility with existing
        # users of notifications.
        root_gb=instance_ref['root_gb'],
        ephemeral_gb=instance_ref['ephemeral_gb'],

        # Location properties
        host=instance_ref['host'],
        availability_zone=instance_ref['availability_zone'],

        # Date properties
        created_at=str(instance_ref['created_at']),
        # Nova's deleted vs terminated instance terminology is confusing,
        # this should be when the instance was deleted (i.e. terminated_at),
        # not when the db record was deleted. (mdragon)
        deleted_at=null_safe_str(instance_ref.get('terminated_at')),
        launched_at=null_safe_str(instance_ref.get('launched_at')),

        # Image properties
        image_ref_url=image_ref_url,
        os_type=instance_ref['os_type'],
        kernel_id=instance_ref['kernel_id'],
        ramdisk_id=instance_ref['ramdisk_id'],

        # Status properties
        state=instance_ref['vm_state'],
        state_description=null_safe_str(instance_ref.get('task_state')),

        # accessIPs
        access_ip_v4=instance_ref['access_ip_v4'],
        access_ip_v6=instance_ref['access_ip_v6'],
    )

    if network_info is not None:
        fixed_ips = []
        for vif in network_info:
            for ip in vif.fixed_ips():
                ip["label"] = vif["network"]["label"]
                fixed_ips.append(ip)
        instance_info['fixed_ips'] = fixed_ips

    # add image metadata
    image_meta_props = image_meta(system_metadata)
    instance_info["image_meta"] = image_meta_props

    # add instance metadata
    instance_info['metadata'] = instance_ref['metadata']

    instance_info.update(kw)
    return instance_info