def test_config_drive_required_by_image_property(self):
        inst = fake_instance.fake_instance_obj(context.get_admin_context())
        inst.config_drive = ""
        inst.system_metadata = {utils.SM_IMAGE_PROP_PREFIX + "img_config_drive": "mandatory"}
        self.assertTrue(configdrive.required_by(inst))

        inst.system_metadata = {utils.SM_IMAGE_PROP_PREFIX + "img_config_drive": "optional"}
        self.assertFalse(configdrive.required_by(inst))
示例#2
0
    def test_config_drive_required_by_image_property(self):
        inst = fake_instance.fake_instance_obj(context.get_admin_context())
        inst.config_drive = ''
        inst.system_metadata = {
            utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory'}
        self.assertTrue(configdrive.required_by(inst))

        inst.system_metadata = {
            utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional'}
        self.assertFalse(configdrive.required_by(inst))
示例#3
0
def create_config_drive_iso(instance, injected_files, admin_password, network_info):
    """"
    If a config drive is required by the instance it will create
    a config drive ISO file and returns the path to the file.  Otherwise
    it will return None
    @param instance: the VM instance
    @param injected_files: files specified to be injected on the VM spawn
                            method
    @param admin_password: Admin password specified on the VM spawn call
    @param network_info: network_info passed to the VM spawn call
    """

    if configdrive.required_by(instance):

        LOG.info(_("Using config drive"), instance=instance)
        extra_md = {}
        if admin_password:
            extra_md["admin_pass"] = admin_password

        inst_md = instance_metadata.InstanceMetadata(
            instance, content=injected_files, extra_md=extra_md, network_info=network_info
        )

        local_img_dir = CONF.powervm_img_local_path
        base_name = "%s_config.iso" % instance["name"]
        configdrive_path = os.path.join(local_img_dir, base_name)
        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            LOG.info(_("Creating config drive at %(path)s"), {"path": configdrive_path}, instance=instance)

            try:
                cdb.make_drive(configdrive_path)
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Creating config drive failed " "with error: %s"), e, instance=instance)
        return configdrive_path
示例#4
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_LI("Spawning new instance"), instance=instance)

        instance_name = instance['name']
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)
        vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path, vm_gen)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(instance,
                                                             injected_files,
                                                             admin_password)
                self.attach_config_drive(instance, configdrive_path, vm_gen)

            self.power_on(instance)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
示例#5
0
def _get_rescue_disk_mapping(virt_type, instance, disk_bus, image_meta):
    """Build disk mapping for a legacy instance rescue

    This legacy method of rescue requires that the rescue device is attached
    first, ahead of the original root disk and optional config drive.

    :param virt_type: Virt type used by libvirt.
    :param instance: nova.objects.instance.Instance object
    :param disk_bus: Disk bus to use within the mapping
    :param image_meta: objects.image_meta.ImageMeta for the instance

    :returns: Disk mapping for the given instance
    """
    mapping = {}
    rescue_info = get_next_disk_info(mapping,
                                     disk_bus, boot_index=1)
    mapping['disk.rescue'] = rescue_info
    mapping['root'] = rescue_info

    os_info = get_next_disk_info(mapping,
                                 disk_bus)
    mapping['disk'] = os_info

    if configdrive.required_by(instance):
        device_type = get_config_drive_type()
        disk_bus = get_disk_bus_for_device_type(instance,
                                                virt_type,
                                                image_meta,
                                                device_type)
        config_info = get_next_disk_info(mapping,
                                         disk_bus,
                                         device_type)
        mapping['disk.config.rescue'] = config_info

    return mapping
示例#6
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_("Spawning new instance"), instance=instance)

        instance_name = instance['name']
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(instance,
                                                             injected_files,
                                                             admin_password)
                self.attach_config_drive(instance, configdrive_path)

            self.power_on(instance)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
示例#7
0
    def live_migration(self, context, instance_ref, dest, post_method,
                       recover_method, block_migration=False,
                       migrate_data=None):
        LOG.debug("live_migration called", instance=instance_ref)
        instance_name = instance_ref["name"]

        try:
            self._vmops.copy_vm_console_logs(instance_name, dest)
            if (configdrive.required_by(instance_ref) and
                    CONF.hyperv.config_drive_cdrom):
                self._pathutils.copy_configdrive(instance_name, dest)

            iscsi_targets = self._livemigrutils.live_migrate_vm(instance_name,
                                                                dest)
            for (target_iqn, target_lun) in iscsi_targets:
                self._volumeops.logout_storage_target(target_iqn)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.debug("Calling live migration recover_method "
                          "for instance: %s", instance_name)
                recover_method(context, instance_ref, dest, block_migration)

        LOG.debug("Calling live migration post_method for instance: %s",
                  instance_name)
        post_method(context, instance_ref, dest, block_migration)
示例#8
0
    def spawn(
        self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None
    ):
        """Create a new VM and start it."""
        LOG.info(_("Spawning new instance"), instance=instance)

        instance_name = instance["name"]
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        try:
            self.create_instance(instance, network_info, block_device_info, root_vhd_path)

            if configdrive.required_by(instance):
                self._create_config_drive(instance, injected_files, admin_password)

            self.power_on(instance)
        except Exception as ex:
            LOG.exception(ex)
            self.destroy(instance)
            raise vmutils.HyperVException(_("Spawn instance failed"))
示例#9
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_("Spawning new instance"), instance=instance)

        instance_name = instance['name']
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path)

            if configdrive.required_by(instance):
                self._create_config_drive(instance, injected_files,
                                          admin_password)

            self.power_on(instance)
        except Exception as ex:
            LOG.exception(ex)
            self.destroy(instance)
            raise vmutils.HyperVException(_('Spawn instance failed'))
示例#10
0
    def live_migration(self,
                       context,
                       instance_ref,
                       dest,
                       post_method,
                       recover_method,
                       block_migration=False,
                       migrate_data=None):
        LOG.debug("live_migration called", instance=instance_ref)
        instance_name = instance_ref["name"]

        try:
            self._vmops.copy_vm_console_logs(instance_name, dest)
            if (configdrive.required_by(instance_ref)
                    and CONF.hyperv.config_drive_cdrom):
                self._pathutils.copy_configdrive(instance_name, dest)

            self._livemigrutils.live_migrate_vm(instance_name, dest)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.debug(
                    "Calling live migration recover_method "
                    "for instance: %s", instance_name)
                recover_method(context, instance_ref, dest, block_migration)

        LOG.debug("Calling live migration post_method for instance: %s",
                  instance_name)
        post_method(context, instance_ref, dest, block_migration)
示例#11
0
文件: vmops.py 项目: pnavarro/nova
    def spawn(self, context, instance, image_meta, injected_files,
        admin_password, network_info, block_device_info=None):
        """ Create a new VM and start it."""
        instance_name = instance["name"]
        vm = self._vmutils.lookup(self._conn, instance_name)
        if vm is not None:
            raise exception.InstanceExists(name=instance_name)

        ebs_root = self._volumeops.volume_in_mapping(
            self._volumeops.get_default_root_device(),
           block_device_info)

        #If is not a boot from volume spawn
        if not (ebs_root):
            #Fetch the file, assume it is a VHD file.
            vhdfile = self._vmutils.get_vhd_path(instance_name)
            try:
                self._cache_image(fn=self._vmutils.fetch_image,
                  context=context,
                  target=vhdfile,
                  fname=instance['image_ref'],
                  image_id=instance['image_ref'],
                  user=instance['user_id'],
                  project=instance['project_id'],
                  cow=CONF.use_cow_images)
            except Exception as exn:
                LOG.exception(_('cache image failed: %s'), exn)
                self.destroy(instance)

        try:
            self._create_vm(instance)

            if not ebs_root:
                self._attach_ide_drive(instance['name'], vhdfile, 0, 0,
                    constants.IDE_DISK)
            else:
                self._volumeops.attach_boot_volume(block_device_info,
                                             instance_name)

            #A SCSI controller for volumes connection is created
            self._create_scsi_controller(instance['name'])

            for (network, mapping) in network_info:
                #mac_address = vif['address'].replace(':', '')
                #self._create_nic(instance['name'], mac_address)
                nic = self.vif_driver.plug(instance, (network, mapping))
                self._add_nic(instance['name'], nic)

            if configdrive.required_by(instance):
                self._create_config_drive(instance, injected_files,
                    admin_password)

            LOG.debug(_('Starting VM %s '), instance_name)
            self._set_vm_state(instance['name'], 'Enabled')
            LOG.info(_('Started VM %s '), instance_name)
        except Exception as exn:
            LOG.exception(_('spawn vm failed: %s'), exn)
            self.destroy(instance)
            raise exn
    def test_invalid_string_values(self):
        instance = objects.Instance(
            config_drive=None,
            system_metadata={}
        )

        for value in (strutils.FALSE_STRINGS + ('foo',)):
            self.flags(force_config_drive=value)
            self.assertFalse(configdrive.required_by(instance))
示例#13
0
    def test_no_config_drive(self):
        self.flags(force_config_drive=False)

        instance = objects.Instance(config_drive=None,
                                    system_metadata={
                                        "image_img_config_drive": "optional",
                                    })

        self.assertFalse(configdrive.required_by(instance))
示例#14
0
 def _check_and_attach_config_drive(self, instance, vm_gen):
     if configdrive.required_by(instance):
         configdrive_path = self._pathutils.lookup_configdrive_path(instance.name)
         if configdrive_path:
             self._vmops.attach_config_drive(instance, configdrive_path, vm_gen)
         else:
             raise vmutils.HyperVException(
                 _("Config drive is required by instance: %s, " "but it does not exist.") % instance.name
             )
示例#15
0
    def test_image_meta_force(self):
        self.flags(force_config_drive=False)

        instance = objects.Instance(config_drive=None,
                                    system_metadata={
                                        "image_img_config_drive": "mandatory",
                                    })

        self.assertTrue(configdrive.required_by(instance))
示例#16
0
    def spawn(self, context, instance, image_meta, injected_files,
        admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        vm = self._vmutils.lookup(self._conn, instance['name'])
        if vm is not None:
            raise exception.InstanceExists(name=instance['name'])

        ebs_root = self._volumeops.volume_in_mapping(
            self._volumeops.get_default_root_device(),
           block_device_info)

        #If is not a boot from volume spawn
        if not (ebs_root):
            #Fetch the file, assume it is a VHD file.
            vhdfile = self._vmutils.get_vhd_path(instance['name'])
            try:
                self._cache_image(fn=self._vmutils.fetch_image,
                  context=context,
                  target=vhdfile,
                  fname=instance['image_ref'],
                  image_id=instance['image_ref'],
                  user=instance['user_id'],
                  project=instance['project_id'],
                  cow=CONF.use_cow_images)
            except Exception as exn:
                LOG.exception(_('cache image failed: %s'), exn)
                self.destroy(instance)

        try:
            self._create_vm(instance)

            if not ebs_root:
                self._attach_ide_drive(instance['name'], vhdfile, 0, 0,
                    constants.IDE_DISK)
            else:
                self._volumeops.attach_boot_volume(block_device_info,
                                             instance['name'])

            #A SCSI controller for volumes connection is created
            self._create_scsi_controller(instance['name'])

            for vif in network_info:
                self._create_nic(instance['name'], vif)
                self._vif_driver.plug(instance, vif)

            if configdrive.required_by(instance):
                self._create_config_drive(instance, injected_files,
                    admin_password)

            LOG.debug(_('Starting VM %s '), instance['name'])
            self._set_vm_state(instance['name'], 'Enabled')
            LOG.info(_('Started VM %s '), instance['name'])
        except Exception as exn:
            LOG.exception(_('spawn vm failed: %s'), exn)
            self.destroy(instance)
            raise exn
示例#17
0
 def _check_and_attach_config_drive(self, instance, vm_gen):
     if configdrive.required_by(instance):
         configdrive_path = self._pathutils.lookup_configdrive_path(
             instance.name)
         if configdrive_path:
             self._vmops.attach_config_drive(instance, configdrive_path,
                                             vm_gen)
         else:
             raise exception.ConfigDriveNotFound(
                 instance_uuid=instance.uuid)
示例#18
0
 def _check_and_attach_config_drive(self, instance, vm_gen):
     if configdrive.required_by(instance):
         configdrive_path = self._pathutils.lookup_configdrive_path(
             instance.name)
         if configdrive_path:
             self._vmops.attach_config_drive(instance, configdrive_path,
                                             vm_gen)
         else:
             raise exception.ConfigDriveNotFound(
                 instance_uuid=instance.uuid)
示例#19
0
    def test_config_flag_force_for_existing_vms(self):
        self.flags(force_config_drive=True)

        instance = objects.Instance(config_drive=None,
                                    launched_at='2019-05-17T00:00:00.000000',
                                    system_metadata={
                                        "image_img_config_drive": "optional",
                                    })

        self.assertFalse(configdrive.required_by(instance))
示例#20
0
 def _check_and_attach_config_drive(self, instance):
     if configdrive.required_by(instance):
         configdrive_path = self._pathutils.lookup_configdrive_path(
             instance.name)
         if configdrive_path:
             self._vmops.attach_config_drive(instance, configdrive_path)
         else:
             raise vmutils.HyperVException(
                 _("Config drive is required by instance: %s, "
                   "but it does not exist.") % instance.name)
示例#21
0
    def test_config_flag_force_for_new_vms(self):
        self.flags(force_config_drive=True)

        instance = objects.Instance(config_drive=None,
                                    launched_at=None,
                                    system_metadata={
                                        "image_img_config_drive": "optional",
                                    })

        self.assertTrue(configdrive.required_by(instance))
示例#22
0
    def test_no_config_drive(self):
        self.flags(force_config_drive=False)

        instance = objects.Instance(
            config_drive=None,
            system_metadata={
                "image_img_config_drive": "optional",
            }
        )

        self.assertFalse(configdrive.required_by(instance))
示例#23
0
    def test_image_meta_force(self):
        self.flags(force_config_drive=False)

        instance = objects.Instance(
            config_drive=None,
            system_metadata={
                "image_img_config_drive": "mandatory",
            }
        )

        self.assertTrue(configdrive.required_by(instance))
示例#24
0
文件: driver.py 项目: y00187570/nova
        def _setup_flow_and_run():
            # Define the flow
            flow = tf_lf.Flow("destroy")

            # Power Off the LPAR. If its disks are about to be deleted, issue a
            # hard shutdown.
            flow.add(
                tf_vm.PowerOff(self.adapter,
                               instance,
                               force_immediate=destroy_disks))

            # The FeedTask accumulates storage disconnection tasks to be run in
            # parallel.
            stg_ftsk = pvm_par.build_active_vio_feed_task(
                self.adapter, xag=[pvm_const.XAG.VIO_SMAP])

            # Call the unplug VIFs task.  While CNAs get removed from the LPAR
            # directly on the destroy, this clears up the I/O Host side.
            flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))

            # Add the disconnect/deletion of the vOpt to the transaction
            # manager.
            if configdrive.required_by(instance):
                flow.add(
                    tf_stg.DeleteVOpt(self.adapter,
                                      instance,
                                      stg_ftsk=stg_ftsk))

            # Extract the block devices.
            bdms = driver.block_device_info_get_mapping(block_device_info)

            # Determine if there are volumes to detach.  If so, remove each
            # volume (within the transaction manager)
            for bdm, vol_drv in self._vol_drv_iter(context,
                                                   instance,
                                                   bdms,
                                                   stg_ftsk=stg_ftsk):
                flow.add(tf_stg.DetachVolume(vol_drv))

            # Detach the disk storage adapters
            flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))

            # Accumulated storage disconnection tasks next
            flow.add(stg_ftsk)

            # Delete the storage disks
            if destroy_disks:
                flow.add(tf_stg.DeleteDisk(self.disk_dvr))

            # TODO(thorst, efried) Add LPAR id based scsi map clean up task
            flow.add(tf_vm.Delete(self.adapter, instance))

            # Build the engine & run!
            tf_base.run(flow, instance=instance)
示例#25
0
    def test_config_flag_force_for_existing_vms(self):
        self.flags(force_config_drive=True)

        instance = objects.Instance(
            config_drive=None,
            launched_at='2019-05-17T00:00:00.000000',
            system_metadata={
                "image_img_config_drive": "optional",
            }
        )

        self.assertFalse(configdrive.required_by(instance))
示例#26
0
    def test_config_flag_force_for_new_vms(self):
        self.flags(force_config_drive=True)

        instance = objects.Instance(
            config_drive=None,
            launched_at=None,
            system_metadata={
                "image_img_config_drive": "optional",
            }
        )

        self.assertTrue(configdrive.required_by(instance))
示例#27
0
文件: vmops.py 项目: luisgarcc/nova
    def spawn(
        self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None
    ):
        """Create a new VM and start it."""

        instance_name = instance["name"]
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        ebs_root = self._volumeops.volume_in_mapping(self._volumeops.get_default_root_device(), block_device_info)

        # If is not a boot from volume spawn
        if not (ebs_root):
            # Fetch the file, assume it is a VHD file.
            vhdfile = self._pathutils.get_vhd_path(instance_name)
            try:
                self._cache_image(
                    fn=self._fetch_image,
                    context=context,
                    target=vhdfile,
                    fname=instance["image_ref"],
                    image_id=instance["image_ref"],
                    user=instance["user_id"],
                    project=instance["project_id"],
                    cow=CONF.use_cow_images,
                )
            except Exception as exn:
                LOG.exception(_("cache image failed: %s"), exn)
                raise

        try:
            self._vmutils.create_vm(instance_name, instance["memory_mb"], instance["vcpus"], CONF.limit_cpu_features)

            if not ebs_root:
                self._vmutils.attach_ide_drive(instance_name, vhdfile, 0, 0, constants.IDE_DISK)
            else:
                self._volumeops.attach_boot_volume(block_device_info, instance_name)

            self._vmutils.create_scsi_controller(instance_name)

            for vif in network_info:
                LOG.debug(_("Creating nic for instance: %s"), instance_name)
                self._vmutils.create_nic(instance_name, vif["id"], vif["address"])
                self._vif_driver.plug(instance, vif)

            if configdrive.required_by(instance):
                self._create_config_drive(instance, injected_files, admin_password)

            self._set_vm_state(instance_name, constants.HYPERV_VM_STATE_ENABLED)
        except Exception as ex:
            LOG.exception(ex)
            self.destroy(instance)
            raise vmutils.HyperVException(_("Spawn instance failed"))
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info,
              block_device_info=None):
        """Create a new VM and start it."""
        LOG.info("Spawning new instance", instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)

        self._block_dev_man.validate_and_update_bdi(instance, image_meta,
                                                    vm_gen, block_device_info)
        root_device = block_device_info['root_disk']
        self._create_root_device(context, instance, root_device, vm_gen)
        self._create_ephemerals(instance, block_device_info['ephemerals'])

        try:
            with self.wait_vif_plug_events(instance, network_info):
                # waiting will occur after the instance is created.
                self.create_instance(instance, network_info, root_device,
                                     block_device_info, vm_gen, image_meta)
                # This is supported starting from OVS version 2.5
                self.plug_vifs(instance, network_info)

            self._save_device_metadata(context, instance, block_device_info)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(
                    context, instance, injected_files, admin_password,
                    network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)
            self.set_boot_order(instance.name, vm_gen, block_device_info)
            # vifs are already plugged in at this point. We waited on the vif
            # plug event previously when we created the instance. Skip the
            # plug vifs during power on in this case
            self.power_on(instance,
                          network_info=network_info,
                          should_plug_vifs=False)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance, network_info, block_device_info)
示例#29
0
    def create_container(self, instance, injected_files,
                         block_device_info, rescue):
        LOG.debug('Creating container config')

        # Ensure the directory exists and is writable
        fileutils.ensure_tree(
            self.container_dir.get_instance_dir(instance.name))

        # Check to see if we are using swap.
        swap = driver.block_device_info_get_swap(block_device_info)
        if driver.swap_is_usable(swap):
            msg = _('Swap space is not supported by LXD.')
            raise exception.NovaException(msg)

        # Check to see if ephemeral block devices exist.
        ephemeral_gb = instance.ephemeral_gb
        if ephemeral_gb > 0:
            msg = _('Ephemeral block devices is not supported.')
            raise exception.NovaException(msg)

        container_config = self._init_container_config()
        container_config = self.add_config(container_config, 'name',
                                           instance.name)
        container_config = self.add_config(container_config, 'profiles',
                                           [str(CONF.lxd.default_profile)])
        container_config = self.configure_container_config(container_config,
                                                           instance)

        ''' Create an LXD image '''
        container_config = (
            self.add_config(container_config, 'source',
                            self.configure_lxd_image(container_config,
                                                     instance)))

        if configdrive.required_by(instance):
            container_configdrive = (
                self.configure_container_configdrive(
                    container_config,
                    instance,
                    injected_files))
            LOG.debug(pprint.pprint(container_configdrive))

        if rescue:
            container_rescue_devices = (
                self.configure_container_rescuedisk(
                    container_config,
                    instance))
            LOG.debug(pprint.pprint(container_rescue_devices))

        return container_config
示例#30
0
 def _initialize_controller_slot_counter(self, instance, vm_gen):
     # we have 2 IDE controllers, for a total of 4 slots
     free_slots_by_device_type = {
         constants.CTRL_TYPE_IDE:
         [os_win_const.IDE_CONTROLLER_SLOTS_NUMBER] * 2,
         constants.CTRL_TYPE_SCSI:
         [os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER]
     }
     if configdrive.required_by(instance):
         if vm_gen == constants.VM_GEN_1:
             # reserve one slot for the config drive on the second
             # controller in case of generation 1 virtual machines
             free_slots_by_device_type[constants.CTRL_TYPE_IDE][1] -= 1
     return free_slots_by_device_type
 def _initialize_controller_slot_counter(self, instance, vm_gen):
     # we have 2 IDE controllers, for a total of 4 slots
     free_slots_by_device_type = {
         constants.CTRL_TYPE_IDE: [
             os_win_const.IDE_CONTROLLER_SLOTS_NUMBER] * 2,
         constants.CTRL_TYPE_SCSI: [
             os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER]
         }
     if configdrive.required_by(instance):
         if vm_gen == constants.VM_GEN_1:
             # reserve one slot for the config drive on the second
             # controller in case of generation 1 virtual machines
             free_slots_by_device_type[constants.CTRL_TYPE_IDE][1] -= 1
     return free_slots_by_device_type
示例#32
0
    def _add_driver_fields(self, node, instance, image_meta, flavor, 
                           admin_pass=None, files=None, network_info=None,
                           preserve_ephemeral=None):
        icli = client_wrapper.IronicClientWrapper()
        patch = patcher.create(node).get_deploy_patch(instance,
                                                      image_meta,
                                                      flavor,
                                                      preserve_ephemeral)

        # Associate the node with an instance
        patch.append({'path': '/instance_uuid', 'op': 'add',
                      'value': instance['uuid']})

        if configdrive.required_by(instance):
            LOG.info(_('Using config drive'), instance=instance)
            extra_md = {}
            if admin_pass:
                extra_md['admin_pass'] = admin_pass

            inst_md = instance_metadata.InstanceMetadata(instance,
                content=files, extra_md=extra_md, network_info=network_info)

            fd, configdrive_path = tempfile.mkstemp()
            os.close(fd)

            with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
                try:
                    cdb.make_drive(configdrive_path)
                except processutils.ProcessExecutionError as e:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_('Creating config drive failed '
                                  'with error: %s'),
                                  e, instance=instance)
            # gzip the configdrive.
            with open(configdrive_path, "rb") as configdrive_fh:
                configdrive_payload = base64.b64encode(zlib.compress(
                                                            configdrive_fh.read()))
            os.remove(configdrive_path)
            patch.append({'path': '/instance_info/config_drive', 'op': 'add',
                      'value': configdrive_payload})

        try:
            icli.call('node.update', node.uuid, patch)
        except ironic_exception.BadRequest:
            msg = (_("Failed to add deploy parameters on node %(node)s "
                     "when provisioning the instance %(instance)s")
                   % {'node': node.uuid, 'instance': instance['uuid']})
            LOG.error(msg)
            raise exception.InstanceDeployFailure(msg)
示例#33
0
    def _rescue_instance(self, context, instance, network_info, image_meta,
                         rescue_password):
        rescue_image_id = image_meta.id or instance.image_ref
        rescue_vhd_path = self._create_root_vhd(
            context, instance, rescue_image_id=rescue_image_id)

        rescue_vm_gen = self.get_image_vm_generation(instance.uuid,
                                                     image_meta)
        vm_gen = self._vmutils.get_vm_generation(instance.name)
        if rescue_vm_gen != vm_gen:
            err_msg = _('The requested rescue image requires a different VM '
                        'generation than the actual rescued instance. '
                        'Rescue image VM generation: %(rescue_vm_gen)s. '
                        'Rescued instance VM generation: %(vm_gen)s.') % dict(
                            rescue_vm_gen=rescue_vm_gen,
                            vm_gen=vm_gen)
            raise exception.ImageUnacceptable(reason=err_msg,
                                              image_id=rescue_image_id)

        root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
        if not root_vhd_path:
            err_msg = _('Instance root disk image could not be found. '
                        'Rescuing instances booted from volume is '
                        'not supported.')
            raise exception.InstanceNotRescuable(reason=err_msg,
                                                 instance_id=instance.uuid)

        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        self._vmutils.detach_vm_disk(instance.name, root_vhd_path,
                                     is_physical=False)
        self._attach_drive(instance.name, rescue_vhd_path, 0,
                           self._ROOT_DISK_CTRL_ADDR, controller_type)
        self._vmutils.attach_scsi_drive(instance.name, root_vhd_path,
                                        drive_type=constants.DISK)

        if configdrive.required_by(instance):
            self._detach_config_drive(instance.name)
            rescue_configdrive_path = self._create_config_drive(
                context,
                instance,
                injected_files=None,
                admin_password=rescue_password,
                network_info=network_info,
                rescue=True)
            self.attach_config_drive(instance, rescue_configdrive_path,
                                     vm_gen)

        self.power_on(instance)
    def validate_and_update_bdi(self, instance, image_meta, vm_gen,
                                block_device_info):
        slot_map = self._initialize_controller_slot_counter(instance, vm_gen)
        self._check_and_update_root_device(vm_gen, image_meta,
                                           block_device_info, slot_map)
        self._check_and_update_ephemerals(vm_gen, block_device_info, slot_map)
        self._check_and_update_volumes(vm_gen, block_device_info, slot_map)

        if vm_gen == constants.VM_GEN_2 and configdrive.required_by(instance):
            # for Generation 2 VMs, the configdrive is attached to the SCSI
            # controller. Check that there is still a slot available for it.
            if slot_map[constants.CTRL_TYPE_SCSI][0] == 0:
                msg = _("There are no more free slots on controller %s for "
                        "configdrive.") % constants.CTRL_TYPE_SCSI
                raise exception.InvalidBDMFormat(details=msg)
示例#35
0
    def validate_and_update_bdi(self, instance, image_meta, vm_gen,
                                block_device_info):
        slot_map = self._initialize_controller_slot_counter(instance, vm_gen)
        self._check_and_update_root_device(vm_gen, image_meta,
                                           block_device_info, slot_map)
        self._check_and_update_ephemerals(vm_gen, block_device_info, slot_map)
        self._check_and_update_volumes(vm_gen, block_device_info, slot_map)

        if vm_gen == constants.VM_GEN_2 and configdrive.required_by(instance):
            # for Generation 2 VMs, the configdrive is attached to the SCSI
            # controller. Check that there is still a slot available for it.
            if slot_map[constants.CTRL_TYPE_SCSI][0] == 0:
                msg = _("There are no more free slots on controller %s for "
                        "configdrive.") % constants.CTRL_TYPE_SCSI
                raise exception.InvalidBDMFormat(details=msg)
示例#36
0
        def _setup_flow_and_run():
            # Define the flow
            flow = tf_lf.Flow("destroy")

            # Power Off the LPAR. If its disks are about to be deleted, issue a
            # hard shutdown.
            flow.add(tf_vm.PowerOff(self.adapter, instance,
                                    force_immediate=destroy_disks))

            # The FeedTask accumulates storage disconnection tasks to be run in
            # parallel.
            stg_ftsk = pvm_par.build_active_vio_feed_task(
                self.adapter, xag=[pvm_const.XAG.VIO_SMAP])

            # Call the unplug VIFs task.  While CNAs get removed from the LPAR
            # directly on the destroy, this clears up the I/O Host side.
            flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))

            # Add the disconnect/deletion of the vOpt to the transaction
            # manager.
            if configdrive.required_by(instance):
                flow.add(tf_stg.DeleteVOpt(
                    self.adapter, instance, stg_ftsk=stg_ftsk))

            # Extract the block devices.
            bdms = driver.block_device_info_get_mapping(block_device_info)

            # Determine if there are volumes to detach.  If so, remove each
            # volume (within the transaction manager)
            for bdm, vol_drv in self._vol_drv_iter(
                     context, instance, bdms, stg_ftsk=stg_ftsk):
                flow.add(tf_stg.DetachVolume(vol_drv))

            # Detach the disk storage adapters
            flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))

            # Accumulated storage disconnection tasks next
            flow.add(stg_ftsk)

            # Delete the storage disks
            if destroy_disks:
                flow.add(tf_stg.DeleteDisk(self.disk_dvr))

            # TODO(thorst, efried) Add LPAR id based scsi map clean up task
            flow.add(tf_vm.Delete(self.adapter, instance))

            # Build the engine & run!
            tf_base.run(flow, instance=instance)
示例#37
0
文件: vmops.py 项目: arbrandes/nova
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info("Spawning new instance", instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)

        self._block_dev_man.validate_and_update_bdi(
            instance, image_meta, vm_gen, block_device_info)
        root_device = block_device_info['root_disk']
        self._create_root_device(context, instance, root_device, vm_gen)
        self._create_ephemerals(instance, block_device_info['ephemerals'])

        try:
            with self.wait_vif_plug_events(instance, network_info):
                # waiting will occur after the instance is created.
                self.create_instance(instance, network_info, root_device,
                                     block_device_info, vm_gen, image_meta)
                # This is supported starting from OVS version 2.5
                self.plug_vifs(instance, network_info)

            self._save_device_metadata(context, instance, block_device_info)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(context,
                                                             instance,
                                                             injected_files,
                                                             admin_password,
                                                             network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)
            self.set_boot_order(instance.name, vm_gen, block_device_info)
            # vifs are already plugged in at this point. We waited on the vif
            # plug event previously when we created the instance. Skip the
            # plug vifs during power on in this case
            self.power_on(instance,
                          network_info=network_info,
                          should_plug_vifs=False)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance, network_info, block_device_info)
示例#38
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info,
              block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_LI("Spawning new instance"), instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)
        if 'properties' in image_meta and 'mtwilson_trustpolicy_location' in image_meta[
                'properties']:
            instance['metadata']['mtwilson_trustpolicy_location'] = image_meta[
                'properties']['mtwilson_trustpolicy_location']
            instance.save()

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)
        # TODO(lpetrut): move this to the create_instance method.
        vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path, vm_gen,
                                 image_meta)
            LOG.info(instance)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(
                    instance, injected_files, admin_password, network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)

            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
示例#39
0
    def finish_migration(self, context, migration, instance, disk_info,
                         network_info, image_meta, resize_instance=False,
                         block_device_info=None, power_on=True):
        LOG.debug("finish_migration called", instance=instance)

        if self.session.container_defined(instance.name, instance):
            return

        try:
            # Ensure that the instance directory exists
            instance_dir = \
                self.container_dir.get_instance_dir(instance.name)
            if not os.path.exists(instance_dir):
                fileutils.ensure_tree(instance_dir)

            if configdrive.required_by(instance):
                configdrive_dir = \
                    self.container_dir.get_container_configdrive(
                        instance.name)
                fileutils.ensure_tree(configdrive_dir)

            # Step 1 - Setup the profile on the dest host
            container_profile = self.config.create_profile(instance,
                                                           network_info)
            self.session.profile_create(container_profile, instance)

            # Step 2 - Open a websocket on the srct and and
            #          generate the container config
            src_host = self._get_hostname(
                migration['source_compute'], instance)
            (state, data) = (self.session.container_migrate(instance.name,
                                                            src_host,
                                                            instance))
            container_config = self.config.create_container(instance)
            container_config['source'] = \
                self.config.get_container_migrate(
                    data, migration, src_host, instance)
            self.session.container_init(container_config, instance)

            # Step 3 - Start the network and contianer
            self.operations.plug_vifs(instance, network_info)
            self.session.container_start(instance.name, instance)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Migration failed for %(instance)s: '
                                  '%(ex)s'),
                              {'instance': instance.name,
                               'ex': ex}, instance=instance)
示例#40
0
文件: utils.py 项目: arbrandes/nova
def generate_configdrive(context, instance, injected_files,
                         network_info, admin_password):
    # Create network configuration files
    LOG.debug('Creating config drive configuration files '
              'for instance: %s', instance.name, instance=instance)

    instance_path = _get_instance_path(instance.uuid)

    transportfiles = None
    if configdrive.required_by(instance):
        transportfiles = _create_config_drive(context, instance_path,
                                              instance,
                                              injected_files,
                                              network_info,
                                              admin_password)
    return transportfiles
def generate_configdrive(context, instance, injected_files, network_info,
                         admin_password):
    # Create network configuration files
    LOG.debug('Creating config drive configuration files '
              'for instance: %s',
              instance.name,
              instance=instance)

    instance_path = _get_instance_path(instance.uuid)

    transportfiles = None
    if configdrive.required_by(instance):
        transportfiles = _create_config_drive(context, instance_path, instance,
                                              injected_files, network_info,
                                              admin_password)
    return transportfiles
示例#42
0
    def generate_configdrive(self, context, instance, injected_files,
                             admin_password):
        # Create network configuration files
        LOG.debug('Creating network configuration files '
                  'for instance: %s',
                  instance['name'],
                  instance=instance)

        instance_path = self._pathutils.get_instance_path(instance['uuid'])

        transportfiles = None
        if configdrive.required_by(instance):
            transportfiles = self._create_config_drive(context, instance_path,
                                                       instance,
                                                       injected_files,
                                                       admin_password)
        return transportfiles
示例#43
0
文件: config.py 项目: gyurco/nova-lxd
    def get_container_config(self, instance, rescue):
        """Translate the nova instance object into an LXD configuration
           dictionary.

           :param instance nova instance object
           :param rescue: boolean to create rescue containers or not.
         """
        LOG.debug('get_container_config called for instance',
                  instance=instance)
        try:
            instance_name = instance.name
            if rescue:
                instance_name = '%s-rescue' % instance.name

            container_config = {
                'name': instance_name,
                'profiles': [str(instance_name)],
                'source': self._get_container_source(instance),
                'devices': {}}
            # if a config drive is required setup the mount point for the
            # container
            if configdrive.required_by(instance):
                configdrive_dir = \
                    self.container_dir.get_container_configdrive(
                        instance_name)
                config = self._configure_disk_path(configdrive_dir, 'mnt',
                                                   'configdrive', instance)
                container_config['devices'].update(config)

            # if a rescue container is required, setupt the mount point for
            #  the container
            if rescue:
                rescue_dir = self.container_dir.get_container_rescue(
                    instance_name)
                config = self._configure_disk_path(rescue_dir, 'mnt',
                                                   'rescue', instance)
                container_config['devices'].update(config)

            return container_config
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to translate nova object '
                              'for %(instance)s: %(ex)'),
                          {'instance': instance_name, 'ex': ex},
                          instance=instance)
示例#44
0
    def create_container(self, instance):
        """Create a LXD container dictionary so that we can
           use it to initialize a container

           :param instance: nova instance object
        """
        LOG.debug('create_container called for instance', instance=instance)

        instance_name = instance.name
        try:

            # Fetch the container configuration from the current nova
            # instance object
            container_config = {
                'name': instance_name,
                'profiles': [str(instance.name)],
                'source': self.get_container_source(instance),
                'devices': {}
            }

            # if a configdrive is required, setup the mount point for
            # the container
            if configdrive.required_by(instance):
                configdrive_dir = \
                    self.container_dir.get_container_configdrive(
                        instance.name)
                config = self.configure_disk_path(configdrive_dir,
                                                  'var/lib/cloud/data',
                                                  'configdrive', instance)
                container_config['devices'].update(config)

            if container_config is None:
                msg = _('Failed to get container configuration for %s') \
                    % instance_name
                raise exception.NovaException(msg)
            return container_config
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    'Failed to get container configuration'
                    ' %(instance)s: %(ex)s', {
                        'instance': instance_name,
                        'ex': ex
                    },
                    instance=instance)
示例#45
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info,
              block_device_info=None):
        """Create a new VM and start it."""
        LOG.info("Spawning new instance", instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)

        self._block_dev_man.validate_and_update_bdi(instance, image_meta,
                                                    vm_gen, block_device_info)
        root_device = block_device_info['root_disk']
        self._create_root_device(context, instance, root_device, vm_gen)
        self._create_ephemerals(instance, block_device_info['ephemerals'])

        try:
            with self.wait_vif_plug_events(instance, network_info):
                # waiting will occur after the instance is created.
                self.create_instance(instance, network_info, root_device,
                                     block_device_info, vm_gen, image_meta)

            self._save_device_metadata(context, instance, block_device_info)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(
                    context, instance, injected_files, admin_password,
                    network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)
            self.set_boot_order(instance.name, vm_gen, block_device_info)
            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance, network_info, block_device_info)
示例#46
0
        def _setup_flow_and_run():
            # Define the flow
            flow = tf_lf.Flow("destroy")

            # Power Off the LPAR. If its disks are about to be deleted, issue a
            # hard shutdown.
            flow.add(
                tf_vm.PowerOff(self.adapter,
                               instance,
                               force_immediate=destroy_disks))
            # TODO(thorst, efried) Add unplug vifs task

            # The FeedTask accumulates storage disconnection tasks to be run in
            # parallel.
            stg_ftsk = pvm_par.build_active_vio_feed_task(
                self.adapter, xag=[pvm_const.XAG.VIO_SMAP])

            # Add the disconnect/deletion of the vOpt to the transaction
            # manager.
            if configdrive.required_by(instance):
                flow.add(
                    tf_stg.DeleteVOpt(self.adapter,
                                      instance,
                                      stg_ftsk=stg_ftsk))

            # TODO(thorst, efried) Add volume disconnect tasks

            # Detach the disk storage adapters
            flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))

            # Accumulated storage disconnection tasks next
            flow.add(stg_ftsk)

            # Delete the storage disks
            if destroy_disks:
                flow.add(tf_stg.DeleteDisk(self.disk_dvr))

            # TODO(thorst, efried) Add LPAR id based scsi map clean up task
            flow.add(tf_vm.Delete(self.adapter, instance))

            # Build the engine & run!
            tf_base.run(flow, instance=instance)
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_LI("Spawning new instance"), instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)
        if 'properties' in image_meta and 'mtwilson_trustpolicy_location' in image_meta['properties']:
            instance['metadata']['mtwilson_trustpolicy_location'] = image_meta['properties']['mtwilson_trustpolicy_location']
            instance.save()

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)
        # TODO(lpetrut): move this to the create_instance method.
        vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path,
                                 vm_gen, image_meta)
            LOG.info(instance)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(instance,
                                                             injected_files,
                                                             admin_password,
                                                             network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)

            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.debug(_LI("Spawning new instance"), instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)
        if 'properties' in image_meta and 'mtwilson_trustpolicy_location' in image_meta['properties']:
            instance['metadata']['mtwilson_trustpolicy_location'] = image_meta['properties']['mtwilson_trustpolicy_location']
            instance.save()
        LOG.debug("Inside spawn ")
        vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)

        self._block_device_manager.validate_and_update_bdi(
            instance, image_meta, vm_gen, block_device_info)
        root_device = block_device_info['root_disk']
        self._create_root_device(context, instance, root_device, vm_gen)
        self._create_ephemerals(instance, block_device_info['ephemerals'])

        try:
            with self.wait_vif_plug_events(instance, network_info):
                self.create_instance(instance, network_info,
                                     root_device, block_device_info,
                                     vm_gen, image_meta)
                LOG.info(instance)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(instance,
                                                             injected_files,
                                                             admin_password,
                                                             network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)
            self.set_boot_order(vm_gen, block_device_info, instance.name)
            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
示例#49
0
    def create_container(self, instance):
        """Create a LXD container dictionary so that we can
           use it to initialize a container

           :param instance: nova instance object
        """
        LOG.debug('create_container called for instance', instance=instance)

        instance_name = instance.name
        try:

            # Fetch the container configuration from the current nova
            # instance object
            container_config = {
                'name': instance_name,
                'profiles': [str(instance.name)],
                'source': self.get_container_source(instance),
                'devices': {}
            }

            # if a configdrive is required, setup the mount point for
            # the container
            if configdrive.required_by(instance):
                configdrive_dir = \
                    self.container_dir.get_container_configdrive(
                        instance.name)
                config = self.configure_disk_path(configdrive_dir,
                                                  'var/lib/cloud/data',
                                                  'configdrive', instance)
                container_config['devices'].update(config)

            if container_config is None:
                msg = _('Failed to get container configuration for %s') \
                    % instance_name
                raise exception.NovaException(msg)
            return container_config
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                LOG.error('Failed to get container configuration'
                          ' %(instance)s: %(ex)s',
                          {'instance': instance_name, 'ex': ex},
                          instance=instance)
示例#50
0
文件: vmops.py 项目: andymcc/nova
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_LI("Spawning new instance"), instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)

        self._block_dev_man.validate_and_update_bdi(
            instance, image_meta, vm_gen, block_device_info)
        root_device = block_device_info['root_disk']
        self._create_root_device(context, instance, root_device, vm_gen)
        self._create_ephemerals(instance, block_device_info['ephemerals'])

        try:
            with self.wait_vif_plug_events(instance, network_info):
                # waiting will occur after the instance is created.
                self.create_instance(instance, network_info, root_device,
                                     block_device_info, vm_gen, image_meta)

            self._save_device_metadata(context, instance, block_device_info)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(context,
                                                             instance,
                                                             injected_files,
                                                             admin_password,
                                                             network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)
            self.set_boot_order(instance.name, vm_gen, block_device_info)
            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
示例#51
0
    def create_container(self, instance):
        """Create a LXD container dictionary so that we can
           use it to initialize a container

           :param instance: nova instance object
        """
        LOG.debug("create_container called for instance", instance=instance)

        instance_name = instance.name
        try:

            # Fetch the container configuration from the current nova
            # instance object
            container_config = {
                "name": instance_name,
                "profiles": [str(instance.name)],
                "source": self.get_container_source(instance),
                "devices": {},
            }

            # if a configdrive is required, setup the mount point for
            # the container
            if configdrive.required_by(instance):
                configdrive_dir = self.container_dir.get_container_configdrive(instance.name)
                config = self.configure_disk_path(configdrive_dir, "var/lib/cloud/data", "configdrive", instance)
                container_config["devices"].update(config)

            if container_config is None:
                msg = _("Failed to get container configuration for %s") % instance_name
                raise exception.NovaException(msg)
            return container_config
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    "Failed to get container configuration" " %(instance)s: %(ex)s",
                    {"instance": instance_name, "ex": ex},
                    instance=instance,
                )
示例#52
0
    def rescue_instance(self, context, instance, network_info, image_meta, rescue_password):
        rescue_image_id = image_meta.get("id") or instance.image_ref
        rescue_vhd_path = self._create_root_vhd(context, instance, rescue_image_id=rescue_image_id)

        rescue_vm_gen = self.get_image_vm_generation(rescue_vhd_path, image_meta)
        vm_gen = self._vmutils.get_vm_gen(instance.name)
        if rescue_vm_gen != vm_gen:
            err_msg = _(
                "The requested rescue image requires a different VM "
                "generation than the actual rescued instance. "
                "Rescue image VM generation: %(rescue_vm_gen)s. "
                "Rescued instance VM generation: %(vm_gen)s."
            )
            raise vmutils.HyperVException(err_msg % {"rescue_vm_gen": rescue_vm_gen, "vm_gen": vm_gen})

        root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name)
        if not root_vhd_path:
            err_msg = _(
                "Instance root disk image could not be found. "
                "Rescuing instances booted from volume is "
                "not supported."
            )
            raise vmutils.HyperVException(err_msg)

        controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]

        self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False)
        self._attach_drive(instance.name, rescue_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type)
        self._vmutils.attach_scsi_drive(instance.name, root_vhd_path, drive_type=constants.DISK)

        if configdrive.required_by(instance):
            self._detach_config_drive(instance.name)
            rescue_configdrive_path = self._create_config_drive(
                instance, injected_files=None, admin_password=rescue_password, network_info=network_info, rescue=True
            )
            self.attach_config_drive(instance, rescue_configdrive_path, vm_gen)

        self.power_on(instance)
示例#53
0
    def generate_configdrive(self, context, instance, os_version, network_info,
                             injected_files, admin_password):
        # Create network configuration files
        LOG.debug('Creating network configuration files '
                  'for instance: %s' % instance['name'],
                  instance=instance)

        linuxdist = self._dist_manager.get_linux_dist(os_version)()
        instance_path = self._pathutils.get_instance_path(instance['uuid'])

        files_and_cmds = linuxdist.create_network_configuration_files(
            instance_path, network_info)
        (net_conf_files, net_conf_cmds) = files_and_cmds
        # Add network configure files to inject_files
        if len(net_conf_files) > 0:
            injected_files.extend(net_conf_files)

        transportfiles = None
        if configdrive.required_by(instance):
            transportfiles = self._create_config_drive(
                context, instance_path, instance, injected_files,
                admin_password, net_conf_cmds, linuxdist)
        return transportfiles
def get_disk_mapping(virt_type, instance,
                     disk_bus, cdrom_bus,
                     image_meta,
                     block_device_info=None,
                     rescue=False):
    """Determine how to map default disks to the virtual machine.

       This is about figuring out whether the default 'disk',
       'disk.local', 'disk.swap' and 'disk.config' images have
       been overridden by the block device mapping.

       Returns the guest disk mapping for the devices.
    """

    mapping = {}

    if rescue:
        rescue_info = get_next_disk_info(mapping,
                                         disk_bus, boot_index=1)
        mapping['disk.rescue'] = rescue_info
        mapping['root'] = rescue_info

        os_info = get_next_disk_info(mapping,
                                     disk_bus)
        mapping['disk'] = os_info

        return mapping

    inst_type = instance.get_flavor()

    pre_assigned_device_names = \
    [block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
        driver.block_device_info_get_ephemerals(block_device_info),
        [driver.block_device_info_get_swap(block_device_info)],
        driver.block_device_info_get_mapping(block_device_info))
     if get_device_name(bdm)]

    # NOTE (ndipanov): root_bdm can be None when we boot from image
    # as there is no driver represenation of local targeted images
    # and they will not be in block_device_info list.
    root_bdm = block_device.get_root_bdm(
        driver.block_device_info_get_mapping(block_device_info))

    root_device_name = block_device.strip_dev(
        driver.block_device_info_get_root(block_device_info))
    root_info = get_root_info(
        instance, virt_type, image_meta, root_bdm,
        disk_bus, cdrom_bus, root_device_name)

    mapping['root'] = root_info
    # NOTE (ndipanov): This implicitly relies on image->local BDMs not
    #                  being considered in the driver layer - so missing
    #                  bdm with boot_index 0 means - use image, unless it was
    #                  overridden. This can happen when using legacy syntax and
    #                  no root_device_name is set on the instance.
    if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
                                                           block_device_info):
        mapping['disk'] = root_info
    elif root_bdm:
        # NOTE (ft): If device name is not set in root bdm, root_info has a
        # generated one. We have to copy device name to root bdm to prevent its
        # second generation in loop through bdms. If device name is already
        # set, nothing is changed.
        update_bdm(root_bdm, root_info)

    default_eph = has_default_ephemeral(instance, disk_bus, block_device_info,
                                        mapping)
    if default_eph:
        mapping['disk.local'] = default_eph

    for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
            block_device_info)):
        eph_info = get_info_from_bdm(
            instance, virt_type, image_meta, eph, mapping, disk_bus,
            assigned_devices=pre_assigned_device_names)
        mapping[get_eph_disk(idx)] = eph_info
        update_bdm(eph, eph_info)

    swap = driver.block_device_info_get_swap(block_device_info)
    if swap and swap.get('swap_size', 0) > 0:
        swap_info = get_info_from_bdm(
            instance, virt_type, image_meta,
            swap, mapping, disk_bus)
        mapping['disk.swap'] = swap_info
        update_bdm(swap, swap_info)
    elif inst_type['swap'] > 0:
        swap_info = get_next_disk_info(mapping, disk_bus,
            assigned_devices=pre_assigned_device_names)
        if not block_device.volume_in_mapping(swap_info['dev'],
                                              block_device_info):
            mapping['disk.swap'] = swap_info

    block_device_mapping = driver.block_device_info_get_mapping(
        block_device_info)

    for vol in block_device_mapping:
        vol_info = get_info_from_bdm(
            instance, virt_type, image_meta, vol, mapping,
            assigned_devices=pre_assigned_device_names)
        mapping[block_device.prepend_dev(vol_info['dev'])] = vol_info
        update_bdm(vol, vol_info)

    if configdrive.required_by(instance):
        device_type = get_config_drive_type()
        disk_bus = get_disk_bus_for_device_type(instance,
                                                virt_type,
                                                image_meta,
                                                device_type)
        config_info = get_next_disk_info(mapping,
                                         disk_bus,
                                         device_type,
                                         last_device=True)
        mapping['disk.config'] = config_info

    return mapping
示例#55
0
文件: vmops.py 项目: blahRus/nova
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info,
              block_device_info=None):
        """Create a new VM and start it."""

        instance_name = instance['name']
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        ebs_root = self._volumeops.volume_in_mapping(
            self._volumeops.get_default_root_device(), block_device_info)

        #If is not a boot from volume spawn
        if not (ebs_root):
            #Fetch the file, assume it is a VHD file.
            vhdfile = self._pathutils.get_vhd_path(instance_name)
            try:
                self._cache_image(fn=self._fetch_image,
                                  context=context,
                                  target=vhdfile,
                                  fname=instance['image_ref'],
                                  image_id=instance['image_ref'],
                                  user=instance['user_id'],
                                  project=instance['project_id'],
                                  cow=CONF.use_cow_images)
            except Exception as exn:
                LOG.exception(_('cache image failed: %s'), exn)
                raise

        try:
            self._vmutils.create_vm(instance_name, instance['memory_mb'],
                                    instance['vcpus'], CONF.limit_cpu_features)

            if not ebs_root:
                self._vmutils.attach_ide_drive(instance_name, vhdfile, 0, 0,
                                               constants.IDE_DISK)
            else:
                self._volumeops.attach_boot_volume(block_device_info,
                                                   instance_name)

            self._vmutils.create_scsi_controller(instance_name)

            for vif in network_info:
                LOG.debug(_('Creating nic for instance: %s'), instance_name)
                self._vmutils.create_nic(instance_name, vif['id'],
                                         vif['address'])
                self._vif_driver.plug(instance, vif)

            if configdrive.required_by(instance):
                self._create_config_drive(instance, injected_files,
                                          admin_password)

            self._set_vm_state(instance_name,
                               constants.HYPERV_VM_STATE_ENABLED)
        except Exception as ex:
            LOG.exception(ex)
            self.destroy(instance)
            raise vmutils.HyperVException(_('Spawn instance failed'))
示例#56
0
文件: driver.py 项目: joker946/nova
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info=None,
              block_device_info=None):
        """Deploy an instance.

        :param context: The security context.
        :param instance: The instance object.
        :param image_meta: Image dict returned by nova.image.glance
            that defines the image from which to boot this instance.
        :param injected_files: User files to inject into instance.
        :param admin_password: Administrator password to set in
            instance.
        :param network_info: Instance network information.
        :param block_device_info: Instance block device
            information. Ignored by this driver.
        """
        # The compute manager is meant to know the node uuid, so missing uuid
        # is a significant issue. It may mean we've been passed the wrong data.
        node_uuid = instance.get('node')
        if not node_uuid:
            raise ironic.exc.BadRequest(
                _("Ironic node uuid not supplied to "
                  "driver for instance %s.") % instance.uuid)

        node = self.ironicclient.call("node.get", node_uuid)
        flavor = instance.flavor

        self._add_driver_fields(node, instance, image_meta, flavor)

        # NOTE(Shrews): The default ephemeral device needs to be set for
        # services (like cloud-init) that depend on it being returned by the
        # metadata server. Addresses bug https://launchpad.net/bugs/1324286.
        if flavor.ephemeral_gb:
            instance.default_ephemeral_device = '/dev/sda1'
            instance.save()

        # validate we are ready to do the deploy
        validate_chk = self.ironicclient.call("node.validate", node_uuid)
        if not validate_chk.deploy or not validate_chk.power:
            # something is wrong. undo what we have done
            self._cleanup_deploy(context,
                                 node,
                                 instance,
                                 network_info,
                                 flavor=flavor)
            raise exception.ValidationError(
                _("Ironic node: %(id)s failed to validate."
                  " (deploy: %(deploy)s, power: %(power)s)") % {
                      'id': node.uuid,
                      'deploy': validate_chk.deploy,
                      'power': validate_chk.power
                  })

        # prepare for the deploy
        try:
            self._plug_vifs(node, instance, network_info)
            self._start_firewall(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Error preparing deploy for instance "
                        "%(instance)s on baremetal node %(node)s."), {
                            'instance': instance.uuid,
                            'node': node_uuid
                        })
                self._cleanup_deploy(context,
                                     node,
                                     instance,
                                     network_info,
                                     flavor=flavor)

        # Config drive
        configdrive_value = None
        if configdrive.required_by(instance):
            extra_md = {}
            if admin_password:
                extra_md['admin_pass'] = admin_password

            configdrive_value = self._generate_configdrive(
                instance,
                node,
                network_info,
                extra_md=extra_md,
                files=injected_files)

            LOG.info(
                _LI("Config drive for instance %(instance)s on "
                    "baremetal node %(node)s created."), {
                        'instance': instance['uuid'],
                        'node': node_uuid
                    })

        # trigger the node deploy
        try:
            self.ironicclient.call("node.set_provision_state",
                                   node_uuid,
                                   ironic_states.ACTIVE,
                                   configdrive=configdrive_value)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                msg = (_LE("Failed to request Ironic to provision instance "
                           "%(inst)s: %(reason)s"), {
                               'inst': instance.uuid,
                               'reason': six.text_type(e)
                           })
                LOG.error(msg)
                self._cleanup_deploy(context,
                                     node,
                                     instance,
                                     network_info,
                                     flavor=flavor)

        timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
                                                     self.ironicclient,
                                                     instance)
        try:
            timer.start(interval=CONF.ironic.api_retry_interval).wait()
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Error deploying instance %(instance)s on "
                        "baremetal node %(node)s."), {
                            'instance': instance.uuid,
                            'node': node_uuid
                        })
                self.destroy(context, instance, network_info)