def test_config_drive_required_by_image_property(self): inst = fake_instance.fake_instance_obj(context.get_admin_context()) inst.config_drive = '' inst.system_metadata = { utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory'} self.assertTrue(configdrive.required_by(inst)) inst.system_metadata = { utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional'} self.assertFalse(configdrive.required_by(inst))
def test_config_drive_required_by_image_property(self): inst = fake_instance.fake_instance_obj(context.get_admin_context()) inst.config_drive = '' inst.system_metadata = { utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory' } self.assertTrue(configdrive.required_by(inst)) inst.system_metadata = { utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional' } self.assertFalse(configdrive.required_by(inst))
def _check_and_attach_config_drive(self, instance, vm_gen): if configdrive.required_by(instance): configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path: self._vmops.attach_config_drive(instance, configdrive_path, vm_gen) else: raise vmutils.HyperVException( _("Config drive is required by instance: %s, " "but it does not exist.") % instance.name)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info(_LI("Spawning new instance"), instance=instance) instance_name = instance.name if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._create_root_vhd(context, instance) eph_vhd_path = self.create_ephemeral_vhd(instance) vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta) try: self.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path, vm_gen) if configdrive.required_by(instance): configdrive_path = self._create_config_drive( instance, injected_files, admin_password, network_info) self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info(_LI("Spawning new instance"), instance=instance) instance_name = instance.name if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._create_root_vhd(context, instance) eph_vhd_path = self.create_ephemeral_vhd(instance) vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta) try: self.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path, vm_gen) if configdrive.required_by(instance): configdrive_path = self._create_config_drive(instance, injected_files, admin_password, network_info) self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance)
def get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta, block_device_info=None, rescue=False): """Determine how to map default disks to the virtual machine. This is about figuring out whether the default 'disk', 'disk.local', 'disk.swap' and 'disk.config' images have been overridden by the block device mapping. Returns the guest disk mapping for the devices. """ inst_type = instance.get_flavor() mapping = {} pre_assigned_device_names = \ [block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain( driver.block_device_info_get_ephemerals(block_device_info), [driver.block_device_info_get_swap(block_device_info)], driver.block_device_info_get_mapping(block_device_info)) if get_device_name(bdm)] if rescue: rescue_info = get_next_disk_info(mapping, disk_bus, boot_index=1) mapping['disk.rescue'] = rescue_info mapping['root'] = rescue_info os_info = get_next_disk_info(mapping, disk_bus) mapping['disk'] = os_info return mapping # NOTE (ndipanov): root_bdm can be None when we boot from image # as there is no driver represenation of local targeted images # and they will not be in block_device_info list. root_bdm = block_device.get_root_bdm( driver.block_device_info_get_mapping(block_device_info)) root_device_name = block_device.strip_dev( driver.block_device_info_get_root(block_device_info)) root_info = get_root_info(virt_type, image_meta, root_bdm, disk_bus, cdrom_bus, root_device_name) mapping['root'] = root_info # NOTE (ndipanov): This implicitly relies on image->local BDMs not # being considered in the driver layer - so missing # bdm with boot_index 0 means - use image, unless it was # overridden. This can happen when using legacy syntax and # no root_device_name is set on the instance. if not root_bdm and not block_device.volume_in_mapping( root_info['dev'], block_device_info): mapping['disk'] = root_info default_eph = has_default_ephemeral(instance, disk_bus, block_device_info, mapping) if default_eph: mapping['disk.local'] = default_eph for idx, eph in enumerate( driver.block_device_info_get_ephemerals(block_device_info)): eph_info = get_info_from_bdm( virt_type, image_meta, eph, mapping, disk_bus, assigned_devices=pre_assigned_device_names) mapping[get_eph_disk(idx)] = eph_info update_bdm(eph, eph_info) swap = driver.block_device_info_get_swap(block_device_info) if swap and swap.get('swap_size', 0) > 0: swap_info = get_info_from_bdm(virt_type, image_meta, swap, mapping, disk_bus) mapping['disk.swap'] = swap_info update_bdm(swap, swap_info) elif inst_type['swap'] > 0: swap_info = get_next_disk_info(mapping, disk_bus) if not block_device.volume_in_mapping(swap_info['dev'], block_device_info): mapping['disk.swap'] = swap_info block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: vol_info = get_info_from_bdm( virt_type, image_meta, vol, mapping, assigned_devices=pre_assigned_device_names) mapping[block_device.prepend_dev(vol_info['dev'])] = vol_info update_bdm(vol, vol_info) if configdrive.required_by(instance): device_type = get_config_drive_type() disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, device_type, instance=instance) config_info = get_next_disk_info(mapping, disk_bus, device_type, last_device=True) mapping['disk.config'] = config_info return mapping
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Deploy an instance. :param context: The security context. :param instance: The instance object. :param image_meta: Image dict returned by patron.image.glance that defines the image from which to boot this instance. :param injected_files: User files to inject into instance. Ignored by this driver. :param admin_password: Administrator password to set in instance. Ignored by this driver. :param network_info: Instance network information. :param block_device_info: Instance block device information. Ignored by this driver. """ # The compute manager is meant to know the node uuid, so missing uuid # is a significant issue. It may mean we've been passed the wrong data. node_uuid = instance.get('node') if not node_uuid: raise ironic.exc.BadRequest( _("Ironic node uuid not supplied to " "driver for instance %s.") % instance.uuid) node = self.ironicclient.call("node.get", node_uuid) flavor = instance.flavor self._add_driver_fields(node, instance, image_meta, flavor) # NOTE(Shrews): The default ephemeral device needs to be set for # services (like cloud-init) that depend on it being returned by the # metadata server. Addresses bug https://launchpad.net/bugs/1324286. if flavor.ephemeral_gb: instance.default_ephemeral_device = '/dev/sda1' instance.save() # validate we are ready to do the deploy validate_chk = self.ironicclient.call("node.validate", node_uuid) if not validate_chk.deploy or not validate_chk.power: # something is wrong. undo what we have done self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) raise exception.ValidationError( _("Ironic node: %(id)s failed to validate." " (deploy: %(deploy)s, power: %(power)s)") % { 'id': node.uuid, 'deploy': validate_chk.deploy, 'power': validate_chk.power }) # prepare for the deploy try: self._plug_vifs(node, instance, network_info) self._start_firewall(instance, network_info) except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Error preparing deploy for instance " "%(instance)s on baremetal node %(node)s."), { 'instance': instance.uuid, 'node': node_uuid }) self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) # Config drive configdrive_value = None if configdrive.required_by(instance): extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password configdrive_value = self._generate_configdrive(instance, node, network_info, extra_md=extra_md) LOG.info( _LI("Config drive for instance %(instance)s on " "baremetal node %(node)s created."), { 'instance': instance['uuid'], 'node': node_uuid }) # trigger the node deploy try: self.ironicclient.call("node.set_provision_state", node_uuid, ironic_states.ACTIVE, configdrive=configdrive_value) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_LE("Failed to request Ironic to provision instance " "%(inst)s: %(reason)s"), { 'inst': instance.uuid, 'reason': six.text_type(e) }) LOG.error(msg) self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active, self.ironicclient, instance) try: timer.start(interval=CONF.ironic.api_retry_interval).wait() except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Error deploying instance %(instance)s on " "baremetal node %(node)s."), { 'instance': instance.uuid, 'node': node_uuid }) self.destroy(context, instance, network_info)
def get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta, block_device_info=None, rescue=False): """Determine how to map default disks to the virtual machine. This is about figuring out whether the default 'disk', 'disk.local', 'disk.swap' and 'disk.config' images have been overridden by the block device mapping. Returns the guest disk mapping for the devices. """ inst_type = instance.get_flavor() mapping = {} pre_assigned_device_names = \ [block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain( driver.block_device_info_get_ephemerals(block_device_info), [driver.block_device_info_get_swap(block_device_info)], driver.block_device_info_get_mapping(block_device_info)) if get_device_name(bdm)] if rescue: rescue_info = get_next_disk_info(mapping, disk_bus, boot_index=1) mapping['disk.rescue'] = rescue_info mapping['root'] = rescue_info os_info = get_next_disk_info(mapping, disk_bus) mapping['disk'] = os_info return mapping # NOTE (ndipanov): root_bdm can be None when we boot from image # as there is no driver represenation of local targeted images # and they will not be in block_device_info list. root_bdm = block_device.get_root_bdm( driver.block_device_info_get_mapping(block_device_info)) root_device_name = block_device.strip_dev( driver.block_device_info_get_root(block_device_info)) root_info = get_root_info(virt_type, image_meta, root_bdm, disk_bus, cdrom_bus, root_device_name) mapping['root'] = root_info # NOTE (ndipanov): This implicitly relies on image->local BDMs not # being considered in the driver layer - so missing # bdm with boot_index 0 means - use image, unless it was # overridden. This can happen when using legacy syntax and # no root_device_name is set on the instance. if not root_bdm and not block_device.volume_in_mapping(root_info['dev'], block_device_info): mapping['disk'] = root_info default_eph = has_default_ephemeral(instance, disk_bus, block_device_info, mapping) if default_eph: mapping['disk.local'] = default_eph for idx, eph in enumerate(driver.block_device_info_get_ephemerals( block_device_info)): eph_info = get_info_from_bdm( virt_type, image_meta, eph, mapping, disk_bus, assigned_devices=pre_assigned_device_names) mapping[get_eph_disk(idx)] = eph_info update_bdm(eph, eph_info) swap = driver.block_device_info_get_swap(block_device_info) if swap and swap.get('swap_size', 0) > 0: swap_info = get_info_from_bdm(virt_type, image_meta, swap, mapping, disk_bus) mapping['disk.swap'] = swap_info update_bdm(swap, swap_info) elif inst_type['swap'] > 0: swap_info = get_next_disk_info(mapping, disk_bus) if not block_device.volume_in_mapping(swap_info['dev'], block_device_info): mapping['disk.swap'] = swap_info block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: vol_info = get_info_from_bdm( virt_type, image_meta, vol, mapping, assigned_devices=pre_assigned_device_names) mapping[block_device.prepend_dev(vol_info['dev'])] = vol_info update_bdm(vol, vol_info) if configdrive.required_by(instance): device_type = get_config_drive_type() disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, device_type, instance=instance) config_info = get_next_disk_info(mapping, disk_bus, device_type, last_device=True) mapping['disk.config'] = config_info return mapping
def test_invalid_string_values(self): for value in (strutils.FALSE_STRINGS + ('foo',)): self.flags(force_config_drive=value) self.assertFalse(configdrive.required_by({}))
def test_valid_string_values(self): for value in (strutils.TRUE_STRINGS + ('always',)): self.flags(force_config_drive=value) self.assertTrue(configdrive.required_by({}))
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Deploy an instance. :param context: The security context. :param instance: The instance object. :param image_meta: Image dict returned by patron.image.glance that defines the image from which to boot this instance. :param injected_files: User files to inject into instance. Ignored by this driver. :param admin_password: Administrator password to set in instance. Ignored by this driver. :param network_info: Instance network information. :param block_device_info: Instance block device information. Ignored by this driver. """ # The compute manager is meant to know the node uuid, so missing uuid # is a significant issue. It may mean we've been passed the wrong data. node_uuid = instance.get('node') if not node_uuid: raise ironic.exc.BadRequest( _("Ironic node uuid not supplied to " "driver for instance %s.") % instance.uuid) node = self.ironicclient.call("node.get", node_uuid) flavor = instance.flavor self._add_driver_fields(node, instance, image_meta, flavor) # NOTE(Shrews): The default ephemeral device needs to be set for # services (like cloud-init) that depend on it being returned by the # metadata server. Addresses bug https://launchpad.net/bugs/1324286. if flavor.ephemeral_gb: instance.default_ephemeral_device = '/dev/sda1' instance.save() # validate we are ready to do the deploy validate_chk = self.ironicclient.call("node.validate", node_uuid) if not validate_chk.deploy or not validate_chk.power: # something is wrong. undo what we have done self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) raise exception.ValidationError(_( "Ironic node: %(id)s failed to validate." " (deploy: %(deploy)s, power: %(power)s)") % {'id': node.uuid, 'deploy': validate_chk.deploy, 'power': validate_chk.power}) # prepare for the deploy try: self._plug_vifs(node, instance, network_info) self._start_firewall(instance, network_info) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error preparing deploy for instance " "%(instance)s on baremetal node %(node)s."), {'instance': instance.uuid, 'node': node_uuid}) self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) # Config drive configdrive_value = None if configdrive.required_by(instance): extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password configdrive_value = self._generate_configdrive( instance, node, network_info, extra_md=extra_md) LOG.info(_LI("Config drive for instance %(instance)s on " "baremetal node %(node)s created."), {'instance': instance['uuid'], 'node': node_uuid}) # trigger the node deploy try: self.ironicclient.call("node.set_provision_state", node_uuid, ironic_states.ACTIVE, configdrive=configdrive_value) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_LE("Failed to request Ironic to provision instance " "%(inst)s: %(reason)s"), {'inst': instance.uuid, 'reason': six.text_type(e)}) LOG.error(msg) self._cleanup_deploy(context, node, instance, network_info, flavor=flavor) timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active, self.ironicclient, instance) try: timer.start(interval=CONF.ironic.api_retry_interval).wait() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error deploying instance %(instance)s on " "baremetal node %(node)s."), {'instance': instance.uuid, 'node': node_uuid}) self.destroy(context, instance, network_info)