예제 #1
0
    def detach_volume(self, context, connection_info, instance, mountpoint,
                      encryption=None):
        """Detach the volume attached to the instance.

        :param context: security context
        :param connection_info: Volume connection information from the block
                                device mapping
        :param instance: nova.objects.instance.Instance
        :param mountpoint: Unused
        :param encryption: Unused
        """
        self._log_operation('detach_volume', instance)

        # Define the flow
        flow = tf_lf.Flow("detach_volume")

        # Get a volume adapter for this volume
        vol_drv = volume.build_volume_driver(self.adapter, instance,
                                             connection_info)

        # Add a task to detach the volume
        flow.add(tf_stg.DetachVolume(vol_drv))

        # Run the flow
        tf_base.run(flow, instance=instance)
예제 #2
0
    def plug_vifs(self, instance, network_info):
        """Plug VIFs into networks."""
        self._log_operation('plug_vifs', instance)

        # Define the flow
        flow = tf_lf.Flow("plug_vifs")

        # Get the LPAR Wrapper
        flow.add(tf_vm.Get(self.adapter, instance))

        # Run the attach
        flow.add(
            tf_net.PlugVifs(self.virtapi, self.adapter, instance,
                            network_info))

        # Run the flow
        try:
            tf_base.run(flow, instance=instance)
        except exc.InstanceNotFound:
            raise exc.VirtualInterfacePlugException(
                _("Plug vif failed because instance %s was not found.") %
                instance.name)
        except Exception:
            LOG.exception("PowerVM error plugging vifs.", instance=instance)
            raise exc.VirtualInterfacePlugException(
                _("Plug vif failed because of an unexpected error."))
예제 #3
0
    def plug_vifs(self, instance, network_info):
        """Plug VIFs into networks."""
        self._log_operation('plug_vifs', instance)

        # Define the flow
        flow = tf_lf.Flow("plug_vifs")

        # Get the LPAR Wrapper
        flow.add(tf_vm.Get(self.adapter, instance))

        # Run the attach
        flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
                                 network_info))

        # Run the flow
        try:
            tf_base.run(flow, instance=instance)
        except exc.InstanceNotFound:
            raise exc.VirtualInterfacePlugException(
                _("Plug vif failed because instance %s was not found.")
                % instance.name)
        except Exception:
            LOG.exception("PowerVM error plugging vifs.", instance=instance)
            raise exc.VirtualInterfacePlugException(
                _("Plug vif failed because of an unexpected error."))
예제 #4
0
파일: driver.py 프로젝트: udayraghavan/nova
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info=None, block_device_info=None):
        """Create a new instance/VM/domain on the virtualization platform.

        Once this successfully completes, the instance should be
        running (power_state.RUNNING).

        If this fails, any partial instance should be completely
        cleaned up, and the virtualization platform should be in the state
        that it was before this call began.

        :param context: security context
        :param instance: nova.objects.instance.Instance
                         This function should use the data there to guide
                         the creation of the new instance.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param injected_files: User files to inject into instance.
        :param admin_password: Administrator password to set in instance.
        :param network_info: instance network information
        :param block_device_info: Information about block devices to be
                                  attached to the instance.
        """
        self._log_operation('spawn', instance)
        # Define the flow
        flow_spawn = tf_lf.Flow("spawn")
        flow_spawn.add(tf_vm.Create(self.adapter, self.host_wrapper, instance))
        # TODO(thorst, efried) Plug the VIFs
        # TODO(thorst, efried) Create/Connect the disk
        # TODO(thorst, efried) Add the config drive
        # Last step is to power on the system.
        flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))

        # Run the flow.
        tf_base.run(flow_spawn, instance=instance)
예제 #5
0
파일: driver.py 프로젝트: bopopescu/nova-2
    def attach_volume(self, context, connection_info, instance, mountpoint,
                      disk_bus=None, device_type=None, encryption=None):
        """Attach the volume to the instance using the connection_info.

        :param context: security context
        :param connection_info: Volume connection information from the block
                                device mapping
        :param instance: nova.objects.instance.Instance
        :param mountpoint: Unused
        :param disk_bus: Unused
        :param device_type: Unused
        :param encryption: Unused
        """
        self._log_operation('attach_volume', instance)

        # Define the flow
        flow = tf_lf.Flow("attach_volume")

        # Build the driver
        vol_drv = volume.build_volume_driver(self.adapter, instance,
                                             connection_info)

        # Add the volume attach to the flow.
        flow.add(tf_stg.AttachVolume(vol_drv))

        # Run the flow
        tf_base.run(flow, instance=instance)

        # The volume connector may have updated the system metadata.  Save
        # the instance to persist the data.  Spawn/destroy auto saves instance,
        # but the attach does not.  Detach does not need this save - as the
        # detach flows do not (currently) modify system metadata.  May need
        # to revise in the future as volume connectors evolve.
        instance.save()
예제 #6
0
        def _setup_flow_and_run():
            # Define the flow
            flow = tf_lf.Flow("destroy")

            # Power Off the LPAR. If its disks are about to be deleted, issue a
            # hard shutdown.
            flow.add(
                tf_vm.PowerOff(self.adapter,
                               instance,
                               force_immediate=destroy_disks))
            # TODO(thorst, efried) Add unplug vifs task
            # TODO(thorst, efried) Add config drive tasks
            # TODO(thorst, efried) Add volume disconnect tasks

            # Detach the disk storage adapters
            flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))

            # Delete the storage disks
            if destroy_disks:
                flow.add(tf_stg.DeleteDisk(self.disk_dvr))

            # TODO(thorst, efried) Add LPAR id based scsi map clean up task
            flow.add(tf_vm.Delete(self.adapter, instance))

            # Build the engine & run!
            tf_base.run(flow, instance=instance)
예제 #7
0
파일: driver.py 프로젝트: y00187570/nova
    def detach_volume(self,
                      context,
                      connection_info,
                      instance,
                      mountpoint,
                      encryption=None):
        """Detach the volume attached to the instance.

        :param context: security context
        :param connection_info: Volume connection information from the block
                                device mapping
        :param instance: nova.objects.instance.Instance
        :param mountpoint: Unused
        :param encryption: Unused
        """
        self._log_operation('detach_volume', instance)

        # Define the flow
        flow = tf_lf.Flow("detach_volume")

        # Get a volume adapter for this volume
        vol_drv = volume.build_volume_driver(self.adapter, instance,
                                             connection_info)

        # Add a task to detach the volume
        flow.add(tf_stg.DetachVolume(vol_drv))

        # Run the flow
        tf_base.run(flow, instance=instance)
예제 #8
0
    def attach_volume(self, context, connection_info, instance, mountpoint,
                      disk_bus=None, device_type=None, encryption=None):
        """Attach the volume to the instance using the connection_info.

        :param context: security context
        :param connection_info: Volume connection information from the block
                                device mapping
        :param instance: nova.objects.instance.Instance
        :param mountpoint: Unused
        :param disk_bus: Unused
        :param device_type: Unused
        :param encryption: Unused
        """
        self._log_operation('attach_volume', instance)

        # Define the flow
        flow = tf_lf.Flow("attach_volume")

        # Build the driver
        vol_drv = volume.build_volume_driver(self.adapter, instance,
                                             connection_info)

        # Add the volume attach to the flow.
        flow.add(tf_stg.AttachVolume(vol_drv))

        # Run the flow
        tf_base.run(flow, instance=instance)

        # The volume connector may have updated the system metadata.  Save
        # the instance to persist the data.  Spawn/destroy auto saves instance,
        # but the attach does not.  Detach does not need this save - as the
        # detach flows do not (currently) modify system metadata.  May need
        # to revise in the future as volume connectors evolve.
        instance.save()
예제 #9
0
파일: driver.py 프로젝트: RCmerci/nova
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info=None, block_device_info=None):
        """Create a new instance/VM/domain on the virtualization platform.

        Once this successfully completes, the instance should be
        running (power_state.RUNNING).

        If this fails, any partial instance should be completely
        cleaned up, and the virtualization platform should be in the state
        that it was before this call began.

        :param context: security context
        :param instance: nova.objects.instance.Instance
                         This function should use the data there to guide
                         the creation of the new instance.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param injected_files: User files to inject into instance.
        :param admin_password: Administrator password to set in instance.
        :param network_info: instance network information
        :param block_device_info: Information about block devices to be
                                  attached to the instance.
        """
        self._log_operation('spawn', instance)
        # Define the flow
        flow_spawn = tf_lf.Flow("spawn")

        # This FeedTask accumulates VIOS storage connection operations to be
        # run in parallel. Include both SCSI and fibre channel mappings for
        # the scrubber.
        stg_ftsk = pvm_par.build_active_vio_feed_task(
            self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})

        flow_spawn.add(tf_vm.Create(
            self.adapter, self.host_wrapper, instance, stg_ftsk))

        # TODO(thorst, efried) Plug the VIFs

        # Create the boot image.
        flow_spawn.add(tf_stg.CreateDiskForImg(
            self.disk_dvr, context, instance, image_meta))
        # Connects up the disk to the LPAR
        flow_spawn.add(tf_stg.AttachDisk(
            self.disk_dvr, instance, stg_ftsk=stg_ftsk))

        # TODO(thorst, efried) Add the config drive

        # Add the transaction manager flow at the end of the 'I/O
        # connection' tasks. This will run all the connections in parallel.
        flow_spawn.add(stg_ftsk)

        # Last step is to power on the system.
        flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))

        # Run the flow.
        tf_base.run(flow_spawn, instance=instance)
예제 #10
0
파일: driver.py 프로젝트: andymcc/nova
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info=None, block_device_info=None):
        """Create a new instance/VM/domain on the virtualization platform.

        Once this successfully completes, the instance should be
        running (power_state.RUNNING).

        If this fails, any partial instance should be completely
        cleaned up, and the virtualization platform should be in the state
        that it was before this call began.

        :param context: security context
        :param instance: nova.objects.instance.Instance
                         This function should use the data there to guide
                         the creation of the new instance.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param injected_files: User files to inject into instance.
        :param admin_password: Administrator password to set in instance.
        :param network_info: instance network information
        :param block_device_info: Information about block devices to be
                                  attached to the instance.
        """
        self._log_operation('spawn', instance)
        # Define the flow
        flow_spawn = tf_lf.Flow("spawn")

        # This FeedTask accumulates VIOS storage connection operations to be
        # run in parallel. Include both SCSI and fibre channel mappings for
        # the scrubber.
        stg_ftsk = pvm_par.build_active_vio_feed_task(
            self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})

        flow_spawn.add(tf_vm.Create(
            self.adapter, self.host_wrapper, instance, stg_ftsk))

        # TODO(thorst, efried) Plug the VIFs

        # Create the boot image.
        flow_spawn.add(tf_stg.CreateDiskForImg(
            self.disk_dvr, context, instance, image_meta))
        # Connects up the disk to the LPAR
        flow_spawn.add(tf_stg.AttachDisk(
            self.disk_dvr, instance, stg_ftsk=stg_ftsk))

        # TODO(thorst, efried) Add the config drive

        # Add the transaction manager flow at the end of the 'I/O
        # connection' tasks. This will run all the connections in parallel.
        flow_spawn.add(stg_ftsk)

        # Last step is to power on the system.
        flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))

        # Run the flow.
        tf_base.run(flow_spawn, instance=instance)
예제 #11
0
파일: driver.py 프로젝트: y00187570/nova
        def _setup_flow_and_run():
            # Define the flow
            flow = tf_lf.Flow("destroy")

            # Power Off the LPAR. If its disks are about to be deleted, issue a
            # hard shutdown.
            flow.add(
                tf_vm.PowerOff(self.adapter,
                               instance,
                               force_immediate=destroy_disks))

            # The FeedTask accumulates storage disconnection tasks to be run in
            # parallel.
            stg_ftsk = pvm_par.build_active_vio_feed_task(
                self.adapter, xag=[pvm_const.XAG.VIO_SMAP])

            # Call the unplug VIFs task.  While CNAs get removed from the LPAR
            # directly on the destroy, this clears up the I/O Host side.
            flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))

            # Add the disconnect/deletion of the vOpt to the transaction
            # manager.
            if configdrive.required_by(instance):
                flow.add(
                    tf_stg.DeleteVOpt(self.adapter,
                                      instance,
                                      stg_ftsk=stg_ftsk))

            # Extract the block devices.
            bdms = driver.block_device_info_get_mapping(block_device_info)

            # Determine if there are volumes to detach.  If so, remove each
            # volume (within the transaction manager)
            for bdm, vol_drv in self._vol_drv_iter(context,
                                                   instance,
                                                   bdms,
                                                   stg_ftsk=stg_ftsk):
                flow.add(tf_stg.DetachVolume(vol_drv))

            # Detach the disk storage adapters
            flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))

            # Accumulated storage disconnection tasks next
            flow.add(stg_ftsk)

            # Delete the storage disks
            if destroy_disks:
                flow.add(tf_stg.DeleteDisk(self.disk_dvr))

            # TODO(thorst, efried) Add LPAR id based scsi map clean up task
            flow.add(tf_vm.Delete(self.adapter, instance))

            # Build the engine & run!
            tf_base.run(flow, instance=instance)
예제 #12
0
파일: driver.py 프로젝트: y00187570/nova
    def snapshot(self, context, instance, image_id, update_task_state):
        """Snapshots the specified instance.

        :param context: security context
        :param instance: nova.objects.instance.Instance
        :param image_id: Reference to a pre-created image that will hold the
                         snapshot.
        :param update_task_state: Callback function to update the task_state
            on the instance while the snapshot operation progresses. The
            function takes a task_state argument and an optional
            expected_task_state kwarg which defaults to
            nova.compute.task_states.IMAGE_SNAPSHOT. See
            nova.objects.instance.Instance.save for expected_task_state usage.
        """

        if not self.disk_dvr.capabilities.get('snapshot'):
            raise exc.NotSupportedWithOption(
                message=_("The snapshot operation is not supported in "
                          "conjunction with a [powervm]/disk_driver setting "
                          "of %s.") % CONF.powervm.disk_driver)

        self._log_operation('snapshot', instance)

        # Define the flow.
        flow = tf_lf.Flow("snapshot")

        # Notify that we're starting the process.
        flow.add(
            tf_img.UpdateTaskState(update_task_state,
                                   task_states.IMAGE_PENDING_UPLOAD))

        # Connect the instance's boot disk to the management partition, and
        # scan the scsi bus and bring the device into the management partition.
        flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))

        # Notify that the upload is in progress.
        flow.add(
            tf_img.UpdateTaskState(
                update_task_state,
                task_states.IMAGE_UPLOADING,
                expected_state=task_states.IMAGE_PENDING_UPLOAD))

        # Stream the disk to glance.
        flow.add(
            tf_img.StreamToGlance(context, self.image_api, image_id, instance))

        # Disconnect the boot disk from the management partition and delete the
        # device.
        flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))

        # Run the flow.
        tf_base.run(flow, instance=instance)
예제 #13
0
    def snapshot(self, context, instance, image_id, update_task_state):
        """Snapshots the specified instance.

        :param context: security context
        :param instance: nova.objects.instance.Instance
        :param image_id: Reference to a pre-created image that will hold the
                         snapshot.
        :param update_task_state: Callback function to update the task_state
            on the instance while the snapshot operation progresses. The
            function takes a task_state argument and an optional
            expected_task_state kwarg which defaults to
            nova.compute.task_states.IMAGE_SNAPSHOT. See
            nova.objects.instance.Instance.save for expected_task_state usage.
        """

        if not self.disk_dvr.capabilities.get('snapshot'):
            raise exc.NotSupportedWithOption(
                message=_("The snapshot operation is not supported in "
                          "conjunction with a [powervm]/disk_driver setting "
                          "of %s.") % CONF.powervm.disk_driver)

        self._log_operation('snapshot', instance)

        # Define the flow.
        flow = tf_lf.Flow("snapshot")

        # Notify that we're starting the process.
        flow.add(tf_img.UpdateTaskState(update_task_state,
                                        task_states.IMAGE_PENDING_UPLOAD))

        # Connect the instance's boot disk to the management partition, and
        # scan the scsi bus and bring the device into the management partition.
        flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))

        # Notify that the upload is in progress.
        flow.add(tf_img.UpdateTaskState(
            update_task_state, task_states.IMAGE_UPLOADING,
            expected_state=task_states.IMAGE_PENDING_UPLOAD))

        # Stream the disk to glance.
        flow.add(tf_img.StreamToGlance(context, self.image_api, image_id,
                                       instance))

        # Disconnect the boot disk from the management partition and delete the
        # device.
        flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))

        # Run the flow.
        tf_base.run(flow, instance=instance)
예제 #14
0
        def _setup_flow_and_run():
            # Define the flow
            flow = tf_lf.Flow("destroy")

            # Power Off the LPAR. If its disks are about to be deleted, issue a
            # hard shutdown.
            flow.add(tf_vm.PowerOff(self.adapter, instance,
                                    force_immediate=destroy_disks))

            # The FeedTask accumulates storage disconnection tasks to be run in
            # parallel.
            stg_ftsk = pvm_par.build_active_vio_feed_task(
                self.adapter, xag=[pvm_const.XAG.VIO_SMAP])

            # Call the unplug VIFs task.  While CNAs get removed from the LPAR
            # directly on the destroy, this clears up the I/O Host side.
            flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))

            # Add the disconnect/deletion of the vOpt to the transaction
            # manager.
            if configdrive.required_by(instance):
                flow.add(tf_stg.DeleteVOpt(
                    self.adapter, instance, stg_ftsk=stg_ftsk))

            # Extract the block devices.
            bdms = driver.block_device_info_get_mapping(block_device_info)

            # Determine if there are volumes to detach.  If so, remove each
            # volume (within the transaction manager)
            for bdm, vol_drv in self._vol_drv_iter(
                     context, instance, bdms, stg_ftsk=stg_ftsk):
                flow.add(tf_stg.DetachVolume(vol_drv))

            # Detach the disk storage adapters
            flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))

            # Accumulated storage disconnection tasks next
            flow.add(stg_ftsk)

            # Delete the storage disks
            if destroy_disks:
                flow.add(tf_stg.DeleteDisk(self.disk_dvr))

            # TODO(thorst, efried) Add LPAR id based scsi map clean up task
            flow.add(tf_vm.Delete(self.adapter, instance))

            # Build the engine & run!
            tf_base.run(flow, instance=instance)
예제 #15
0
    def snapshot(self, context, instance, image_id, update_task_state):
        """Snapshots the specified instance.

        :param context: security context
        :param instance: nova.objects.instance.Instance
        :param image_id: Reference to a pre-created image that will hold the
                         snapshot.
        :param update_task_state: Callback function to update the task_state
            on the instance while the snapshot operation progresses. The
            function takes a task_state argument and an optional
            expected_task_state kwarg which defaults to
            nova.compute.task_states.IMAGE_SNAPSHOT. See
            nova.objects.instance.Instance.save for expected_task_state usage.
        """
        # TODO(esberglu) Add check for disk driver snapshot capability when
        # additional disk drivers are implemented.
        self._log_operation('snapshot', instance)

        # Define the flow.
        flow = tf_lf.Flow("snapshot")

        # Notify that we're starting the process.
        flow.add(
            tf_img.UpdateTaskState(update_task_state,
                                   task_states.IMAGE_PENDING_UPLOAD))

        # Connect the instance's boot disk to the management partition, and
        # scan the scsi bus and bring the device into the management partition.
        flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))

        # Notify that the upload is in progress.
        flow.add(
            tf_img.UpdateTaskState(
                update_task_state,
                task_states.IMAGE_UPLOADING,
                expected_state=task_states.IMAGE_PENDING_UPLOAD))

        # Stream the disk to glance.
        flow.add(
            tf_img.StreamToGlance(context, self.image_api, image_id, instance))

        # Disconnect the boot disk from the management partition and delete the
        # device.
        flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))

        # Run the flow.
        tf_base.run(flow, instance=instance)
예제 #16
0
        def _setup_flow_and_run():
            # Define the flow
            flow = tf_lf.Flow("destroy")

            # Power Off the LPAR. If its disks are about to be deleted, issue a
            # hard shutdown.
            flow.add(
                tf_vm.PowerOff(self.adapter,
                               instance,
                               force_immediate=destroy_disks))
            # TODO(thorst, efried) Add unplug vifs task

            # The FeedTask accumulates storage disconnection tasks to be run in
            # parallel.
            stg_ftsk = pvm_par.build_active_vio_feed_task(
                self.adapter, xag=[pvm_const.XAG.VIO_SMAP])

            # Add the disconnect/deletion of the vOpt to the transaction
            # manager.
            if configdrive.required_by(instance):
                flow.add(
                    tf_stg.DeleteVOpt(self.adapter,
                                      instance,
                                      stg_ftsk=stg_ftsk))

            # TODO(thorst, efried) Add volume disconnect tasks

            # Detach the disk storage adapters
            flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))

            # Accumulated storage disconnection tasks next
            flow.add(stg_ftsk)

            # Delete the storage disks
            if destroy_disks:
                flow.add(tf_stg.DeleteDisk(self.disk_dvr))

            # TODO(thorst, efried) Add LPAR id based scsi map clean up task
            flow.add(tf_vm.Delete(self.adapter, instance))

            # Build the engine & run!
            tf_base.run(flow, instance=instance)
예제 #17
0
파일: driver.py 프로젝트: bopopescu/nova-2
    def unplug_vifs(self, instance, network_info):
        """Unplug VIFs from networks."""
        self._log_operation('unplug_vifs', instance)

        # Define the flow
        flow = tf_lf.Flow("unplug_vifs")

        # Run the detach
        flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))

        # Run the flow
        try:
            tf_base.run(flow, instance=instance)
        except exc.InstanceNotFound:
            LOG.warning('VM was not found during unplug operation as it is '
                        'already possibly deleted.', instance=instance)
        except Exception:
            LOG.exception("PowerVM error trying to unplug vifs.",
                          instance=instance)
            raise exc.InterfaceDetachFailed(instance_uuid=instance.uuid)
예제 #18
0
    def unplug_vifs(self, instance, network_info):
        """Unplug VIFs from networks."""
        self._log_operation('unplug_vifs', instance)

        # Define the flow
        flow = tf_lf.Flow("unplug_vifs")

        # Run the detach
        flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))

        # Run the flow
        try:
            tf_base.run(flow, instance=instance)
        except exc.InstanceNotFound:
            LOG.warning('VM was not found during unplug operation as it is '
                        'already possibly deleted.', instance=instance)
        except Exception:
            LOG.exception("PowerVM error trying to unplug vifs.",
                          instance=instance)
            raise exc.InterfaceDetachFailed(instance_uuid=instance.uuid)
예제 #19
0
파일: driver.py 프로젝트: andymcc/nova
        def _setup_flow_and_run():
            # Define the flow
            flow = tf_lf.Flow("destroy")

            # Power Off the LPAR. If its disks are about to be deleted, issue a
            # hard shutdown.
            flow.add(tf_vm.PowerOff(self.adapter, instance,
                                    force_immediate=destroy_disks))
            # TODO(thorst, efried) Add unplug vifs task
            # TODO(thorst, efried) Add config drive tasks
            # TODO(thorst, efried) Add volume disconnect tasks

            # Detach the disk storage adapters
            flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))

            # Delete the storage disks
            if destroy_disks:
                flow.add(tf_stg.DeleteDisk(self.disk_dvr))

            # TODO(thorst, efried) Add LPAR id based scsi map clean up task
            flow.add(tf_vm.Delete(self.adapter, instance))

            # Build the engine & run!
            tf_base.run(flow, instance=instance)
예제 #20
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, allocations, network_info=None,
              block_device_info=None):
        """Create a new instance/VM/domain on the virtualization platform.

        Once this successfully completes, the instance should be
        running (power_state.RUNNING).

        If this fails, any partial instance should be completely
        cleaned up, and the virtualization platform should be in the state
        that it was before this call began.

        :param context: security context
        :param instance: nova.objects.instance.Instance
                         This function should use the data there to guide
                         the creation of the new instance.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param injected_files: User files to inject into instance.
        :param admin_password: Administrator password to set in instance.
        :param allocations: Information about resources allocated to the
                            instance via placement, of the form returned by
                            SchedulerReportClient.get_allocations_for_consumer.
        :param network_info: instance network information
        :param block_device_info: Information about block devices to be
                                  attached to the instance.
        """
        self._log_operation('spawn', instance)
        # Define the flow
        flow_spawn = tf_lf.Flow("spawn")

        # This FeedTask accumulates VIOS storage connection operations to be
        # run in parallel. Include both SCSI and fibre channel mappings for
        # the scrubber.
        stg_ftsk = pvm_par.build_active_vio_feed_task(
            self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})

        flow_spawn.add(tf_vm.Create(
            self.adapter, self.host_wrapper, instance, stg_ftsk))

        # Create a flow for the IO
        flow_spawn.add(tf_net.PlugVifs(
            self.virtapi, self.adapter, instance, network_info))
        flow_spawn.add(tf_net.PlugMgmtVif(
            self.adapter, instance))

        # Create the boot image.
        flow_spawn.add(tf_stg.CreateDiskForImg(
            self.disk_dvr, context, instance, image_meta))
        # Connects up the disk to the LPAR
        flow_spawn.add(tf_stg.AttachDisk(
            self.disk_dvr, instance, stg_ftsk=stg_ftsk))

        # Extract the block devices.
        bdms = driver.block_device_info_get_mapping(block_device_info)

        # Determine if there are volumes to connect.  If so, add a connection
        # for each type.
        for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms,
                                               stg_ftsk=stg_ftsk):
            # Connect the volume.  This will update the connection_info.
            flow_spawn.add(tf_stg.AttachVolume(vol_drv))

        # If the config drive is needed, add those steps.  Should be done
        # after all the other I/O.
        if configdrive.required_by(instance):
            flow_spawn.add(tf_stg.CreateAndConnectCfgDrive(
                self.adapter, instance, injected_files, network_info,
                stg_ftsk, admin_pass=admin_password))

        # Add the transaction manager flow at the end of the 'I/O
        # connection' tasks. This will run all the connections in parallel.
        flow_spawn.add(stg_ftsk)

        # Last step is to power on the system.
        flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))

        # Run the flow.
        tf_base.run(flow_spawn, instance=instance)
예제 #21
0
파일: driver.py 프로젝트: y00187570/nova
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              allocations,
              network_info=None,
              block_device_info=None,
              power_on=True,
              accel_info=None):
        """Create a new instance/VM/domain on the virtualization platform.

        Once this successfully completes, the instance should be
        running (power_state.RUNNING).

        If this fails, any partial instance should be completely
        cleaned up, and the virtualization platform should be in the state
        that it was before this call began.

        :param context: security context
        :param instance: nova.objects.instance.Instance
                         This function should use the data there to guide
                         the creation of the new instance.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param injected_files: User files to inject into instance.
        :param admin_password: Administrator password to set in instance.
        :param allocations: Information about resources allocated to the
                            instance via placement, of the form returned by
                            SchedulerReportClient.get_allocations_for_consumer.
        :param network_info: instance network information
        :param block_device_info: Information about block devices to be
                                  attached to the instance.
        :param power_on: True if the instance should be powered on, False
                         otherwise
        """
        self._log_operation('spawn', instance)
        # Define the flow
        flow_spawn = tf_lf.Flow("spawn")

        # This FeedTask accumulates VIOS storage connection operations to be
        # run in parallel. Include both SCSI and fibre channel mappings for
        # the scrubber.
        stg_ftsk = pvm_par.build_active_vio_feed_task(
            self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})

        flow_spawn.add(
            tf_vm.Create(self.adapter, self.host_wrapper, instance, stg_ftsk))

        # Create a flow for the IO
        flow_spawn.add(
            tf_net.PlugVifs(self.virtapi, self.adapter, instance,
                            network_info))
        flow_spawn.add(tf_net.PlugMgmtVif(self.adapter, instance))

        # Create the boot image.
        flow_spawn.add(
            tf_stg.CreateDiskForImg(self.disk_dvr, context, instance,
                                    image_meta))
        # Connects up the disk to the LPAR
        flow_spawn.add(
            tf_stg.AttachDisk(self.disk_dvr, instance, stg_ftsk=stg_ftsk))

        # Extract the block devices.
        bdms = driver.block_device_info_get_mapping(block_device_info)

        # Determine if there are volumes to connect.  If so, add a connection
        # for each type.
        for bdm, vol_drv in self._vol_drv_iter(context,
                                               instance,
                                               bdms,
                                               stg_ftsk=stg_ftsk):
            # Connect the volume.  This will update the connection_info.
            flow_spawn.add(tf_stg.AttachVolume(vol_drv))

        # If the config drive is needed, add those steps.  Should be done
        # after all the other I/O.
        if configdrive.required_by(instance):
            flow_spawn.add(
                tf_stg.CreateAndConnectCfgDrive(self.adapter,
                                                instance,
                                                injected_files,
                                                network_info,
                                                stg_ftsk,
                                                admin_pass=admin_password))

        # Add the transaction manager flow at the end of the 'I/O
        # connection' tasks. This will run all the connections in parallel.
        flow_spawn.add(stg_ftsk)

        # Last step is to power on the system.
        flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))

        # Run the flow.
        tf_base.run(flow_spawn, instance=instance)