Esempio n. 1
0
    def execute_impl(self, stg_elem, vios_wrap, disk_path):
        """Unmap and remove an instance's boot disk from the mgmt partition.

        Input parameters ('requires') provided by InstanceDiskToMgmt task.

        :param stg_elem: The storage element wrapper (pypowervm LU, PV, etc.)
                         to be disconnected.
        :param vios_wrap: The Virtual I/O Server wrapper from which the
                          mapping is to be removed.
        :param disk_path: The local path to the disk device to be removed, e.g.
                          '/dev/sde'
        """
        # stg_elem is None if boot disk was not mapped to management partition
        if stg_elem is None:
            return
        LOG.info(
            _LI("Unmapping boot disk %(disk_name)s of instance "
                "%(instance_name)s from management partition via Virtual "
                "I/O Server %(vios_name)s."), {
                    'disk_name': stg_elem.name,
                    'instance_name': self.instance.name,
                    'vios_name': vios_wrap.name
                })
        self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
        LOG.info(
            _LI("Removing disk %(disk_path)s from the management "
                "partition."), {'disk_path': disk_path})
        mgmt.remove_block_dev(disk_path)
Esempio n. 2
0
    def _get_or_upload_image_lu(self, context, image_meta):
        """Ensures our SSP has an LU containing the specified image.

        If an LU of type IMAGE corresponding to the input image metadata
        already exists in our SSP, return it.  Otherwise, create it, prime it
        with the image contents from glance, and return it.

        :param context: nova context used to retrieve image from glance
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :return: A pypowervm LU ElementWrapper representing the image.
        """
        # Key off of the name to see whether we already have the image
        luname = self._get_image_name(image_meta)
        lu = self._find_lu(luname, pvm_stg.LUType.IMAGE)
        if lu:
            LOG.info(_LI('SSP: Using already-uploaded image LU %s.'), luname)
            return lu

        # We don't have it yet.  Create it and upload the glance image to it.
        # Make the image LU only as big as the image.
        stream = self._get_image_upload(context, image_meta)
        LOG.info(_LI('SSP: Uploading new image LU %s.'), luname)
        lu, f_wrap = tsk_stg.upload_new_lu(
            self._any_vios_uuid(), self._ssp, stream, luname, image_meta.size)
        return lu
Esempio n. 3
0
    def _get_or_upload_image_lu(self, context, image_meta):
        """Ensures our SSP has an LU containing the specified image.

        If an LU of type IMAGE corresponding to the input image metadata
        already exists in our SSP, return it.  Otherwise, create it, prime it
        with the image contents from glance, and return it.

        :param context: nova context used to retrieve image from glance
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :return: A pypowervm LU ElementWrapper representing the image.
        """
        # Key off of the name to see whether we already have the image
        luname = self._get_image_name(image_meta)
        lu = self._find_lu(luname, pvm_stg.LUType.IMAGE)
        if lu:
            LOG.info(_LI('SSP: Using already-uploaded image LU %s.'), luname)
            return lu

        # We don't have it yet.  Create it and upload the glance image to it.
        # Make the image LU only as big as the image.
        stream = self._get_image_upload(context, image_meta)
        LOG.info(_LI('SSP: Uploading new image LU %s.'), luname)
        lu, f_wrap = tsk_stg.upload_new_lu(self._any_vios_uuid(), self._ssp,
                                           stream, luname, image_meta.size)
        return lu
Esempio n. 4
0
def dlt_lpar(adapter, lpar_uuid):
    """Delete an LPAR

    :param adapter: The adapter for the pypowervm API
    :param lpar_uuid: The lpar to delete
    """
    # Attempt to delete the VM.
    try:
        LOG.info(_LI('Deleting virtual machine. LPARID: %s'), lpar_uuid)

        # Ensure any vterms are closed.  Will no-op otherwise.
        vterm.close_vterm(adapter, lpar_uuid)

        # Run the LPAR delete
        resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
        LOG.info(_LI('Virtual machine delete status: %d'), resp.status)
        return resp
    except pvm_exc.HttpError as e:
        if e.response and e.response.status == 404:
            LOG.warning(_LW('Virtual Machine not found LPAR_ID: %s'),
                        lpar_uuid)
        else:
            LOG.error(_LE('HttpError deleting virtual machine. LPARID: %s'),
                      lpar_uuid)
            raise
    except pvm_exc.Error:
        # Attempting to close vterm did not help so raise exception
        LOG.error(_LE('Virtual machine delete failed: LPARID=%s'), lpar_uuid)
        raise
Esempio n. 5
0
    def execute_impl(self):
        data = None
        if self.nvram_mgr is not None:
            LOG.info(_LI('Fetching NVRAM for instance %s.'),
                     self.instance.name, instance=self.instance)
            data = self.nvram_mgr.fetch(self.instance)
            LOG.debug('NVRAM data is: %s', data, instance=self.instance)

        wrap = vm.crt_lpar(self.adapter, self.host_wrapper, self.instance,
                           self.flavor, nvram=data, slot_mgr=self.slot_mgr)
        pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk,
                                             lpars_exist=True)
        # If the stg_ftsk passed in was None and we initialized a
        # 'create_scrubber' stg_ftsk then run it immediately. We do
        # this because we moved the LPAR storage scrub tasks out of the
        # build_map initialization. This was so that we could construct the
        # build map earlier in the spawn, just before the LPAR is created.
        # Only rebuilds should be passing in None for stg_ftsk.
        if self.stg_ftsk.name == 'create_scrubber':
            LOG.info(_LI('Scrubbing storage for instance %s as part of '
                         'rebuild.'), self.instance.name,
                     instance=self.instance)
            self.stg_ftsk.execute()

        return wrap
Esempio n. 6
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Unplugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {
                          'inst': self.instance.name,
                          'reason': reason
                      },
                      instance=self.instance)
            raise VirtualInterfaceUnplugException()

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for vif in self.network_info:
            for cna_w in cna_w_list:
                # If the MAC address matched, attempt the delete.
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    LOG.info(_LI('Deleting VIF with mac %(mac)s for instance '
                                 '%(inst)s.'), {
                                     'mac': vif['address'],
                                     'inst': self.instance.name
                                 },
                             instance=self.instance)
                    try:
                        cna_w.delete()
                    except Exception as e:
                        LOG.error(_LE('Unable to unplug VIF with mac %(mac)s '
                                      'for instance %(inst)s.'), {
                                          'mac': vif['address'],
                                          'inst': self.instance.name
                                      },
                                  instance=self.instance)
                        LOG.error(e)
                        raise VirtualInterfaceUnplugException()

                    # Break from the loop as we had a successful unplug.
                    # This prevents from going to 'else' loop.
                    break
            else:
                LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                                'instance %(inst)s.  The VIF was not found on '
                                'the instance.'), {
                                    'mac': vif['address'],
                                    'inst': self.instance.name
                                },
                            instance=self.instance)
        return cna_w_list
Esempio n. 7
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Plugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # Get the current adapters on the system
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Trim the VIFs down to the ones that haven't yet been created.
        crt_vifs = []
        for vif in self.network_info:
            for cna_w in cna_w_list:
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    break
            else:
                crt_vifs.append(vif)

        # If there are no vifs to create, then just exit immediately.
        if len(crt_vifs) == 0:
            return []

        # Check to see if the LPAR is OK to add VIFs to.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s.  The '
                          'VM was in a state where VIF plugging is not '
                          'acceptable.  The reason from the system is: '
                          '%(reason)s'),
                      {'sys': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # For the VIFs, run the creates (and wait for the events back)
        try:
            with self.virt_api.wait_for_instance_event(
                    self.instance, self._get_vif_events(),
                    deadline=CONF.vif_plugging_timeout,
                    error_callback=self._vif_callback_failed):
                for vif in crt_vifs:
                    LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
                                 '%(sys)s'),
                             {'mac': vif['address'],
                              'sys': self.instance.name},
                             instance=self.instance)
                    vm.crt_vif(self.adapter, self.instance, self.host_uuid,
                               vif)
        except eventlet.timeout.Timeout:
            LOG.error(_LE('Error waiting for VIF to be created for instance '
                          '%(sys)s'), {'sys': self.instance.name},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # Return the list of created VIFs.
        return cna_w_list
Esempio n. 8
0
    def _create_cfg_dr_iso(self,
                           instance,
                           injected_files,
                           network_info,
                           admin_pass=None):
        """Creates an ISO file that contains the injected files.

        Used for config drive.

        :param instance: The VM instance from OpenStack.
        :param injected_files: A list of file paths that will be injected into
                               the ISO.
        :param network_info: The network_info from the nova spawn method.
        :param admin_pass: Optional password to inject for the VM.
        :return iso_path: The path to the ISO
        :return file_name: The file name for the ISO
        """
        LOG.info(_LI("Creating config drive for instance: %s"),
                 instance.name,
                 instance=instance)
        extra_md = {}
        if admin_pass is not None:
            extra_md['admin_pass'] = admin_pass

        # Sanitize the vifs for the network config
        network_info = self._sanitize_network_info(network_info)

        inst_md = instance_metadata.InstanceMetadata(instance,
                                                     content=injected_files,
                                                     extra_md=extra_md,
                                                     network_info=network_info)

        # Make sure the path exists.
        im_path = CONF.powervm.image_meta_local_path
        if not os.path.exists(im_path):
            os.mkdir(im_path)

        file_name = pvm_util.sanitize_file_name_for_api(
            instance.name,
            prefix=CFG_DRV_PREFIX,
            suffix=CFG_DRV_SUFFIX,
            max_len=pvm_const.MaxLen.VOPT_NAME)
        iso_path = os.path.join(im_path, file_name)
        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            LOG.info(_LI("Config drive ISO being built for instance %(inst)s "
                         "building to path %(iso_path)s."), {
                             'inst': instance.name,
                             'iso_path': iso_path
                         },
                     instance=instance)
            cdb.make_drive(iso_path)
            return iso_path, file_name
Esempio n. 9
0
    def execute(self, *args, **kwargs):
        LOG.info(_LI('Running task %(task)s.'), {'task': self.name},
                 instance=self.instance)
        start_time = time.time()

        ret = self.execute_impl(*args, **kwargs)

        run_time = time.time() - start_time
        LOG.info(_LI('Task %(task)s completed in %(seconds)d seconds.'), {
            'task': self.name,
            'seconds': run_time
        },
                 instance=self.instance)
        return ret
Esempio n. 10
0
    def execute(self, *args, **kwargs):
        LOG.info(_LI('Running task %(task)s for instance %(inst)s'),
                 {'task': self.name, 'inst': self.instance.name},
                 instance=self.instance)
        start_time = time.time()

        ret = self.execute_impl(*args, **kwargs)

        run_time = time.time() - start_time
        LOG.info(_LI('Task %(task)s completed in %(seconds)d seconds for '
                     'instance %(inst)s'),
                 {'task': self.name, 'inst': self.instance.name,
                  'seconds': run_time}, instance=self.instance)
        return ret
Esempio n. 11
0
    def _create_disk_from_image(self,
                                context,
                                instance,
                                image_meta,
                                disk_size_gb,
                                image_type=disk_drv.DiskType.BOOT):
        """Creates a boot disk and links the specified image to it.

        If the specified image has not already been uploaded, an Image LU is
        created for it.  A Disk LU is then created for the instance and linked
        to the Image LU.

        :param context: nova context used to retrieve image from glance
        :param instance: instance to create the disk for.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param disk_size_gb: The size of the disk to create in GB.  If smaller
                             than the image, it will be ignored (as the disk
                             must be at least as big as the image).  Must be an
                             int.
        :param image_type: The image type. See disk_drv.DiskType.
        :return: The backing pypowervm LU storage object that was created.
        """
        LOG.info(
            _LI('SSP: Create %(image_type)s disk from image %(image_id)s '
                'for instance %(instance_uuid)s.'),
            dict(image_type=image_type,
                 image_id=image_meta.id,
                 instance_uuid=instance.uuid))

        def upload(path):
            IMAGE_API.download(context, image_meta.id, dest_path=path)

        image_lu = tsk_cs.get_or_upload_image_lu(
            self._tier,
            self._get_image_name(image_meta),
            self._any_vios_uuid(),
            upload,
            image_meta.size,
            upload_type=tsk_stg.UploadType.FUNC)

        boot_lu_name = self._get_disk_name(image_type, instance)
        LOG.info(_LI('SSP: Disk name is %s'), boot_lu_name)

        return tsk_stg.crt_lu(self._tier,
                              boot_lu_name,
                              disk_size_gb,
                              typ=pvm_stg.LUType.DISK,
                              clone=image_lu)[1]
Esempio n. 12
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Unplugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {'inst': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise VirtualInterfaceUnplugException()

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for vif in self.network_info:
            for cna_w in cna_w_list:
                # If the MAC address matched, attempt the delete.
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    LOG.info(_LI('Deleting VIF with mac %(mac)s for instance '
                                 '%(inst)s.'), {'mac': vif['address'],
                                                'inst': self.instance.name},
                             instance=self.instance)
                    try:
                        cna_w.delete()
                    except Exception as e:
                        LOG.error(_LE('Unable to unplug VIF with mac %(mac)s '
                                      'for instance %(inst)s.'),
                                  {'mac': vif['address'],
                                   'inst': self.instance.name},
                                  instance=self.instance)
                        LOG.error(e)
                        raise VirtualInterfaceUnplugException()

                    # Break from the loop as we had a successful unplug.
                    # This prevents from going to 'else' loop.
                    break
            else:
                LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                                'instance %(inst)s.  The VIF was not found on '
                                'the instance.'),
                            {'mac': vif['address'],
                             'inst': self.instance.name},
                            instance=self.instance)
        return cna_w_list
Esempio n. 13
0
    def execute_impl(self):
        if self.nvram_mgr is None:
            LOG.info(_LI("No op for NVRAM delete."), instance=self.instance)
            return

        LOG.info(_LI('Deleting NVRAM for instance: %s'),
                 self.instance.name, instance=self.instance)
        try:
            self.nvram_mgr.remove(self.instance)
        except Exception as e:
            LOG.exception(_LE('Unable to delete NVRAM for instance '
                              '%(name)s. Exception: %(reason)s'),
                          {'name': self.instance.name,
                           'reason': six.text_type(e)},
                          instance=self.instance)
Esempio n. 14
0
 def execute(self, disk_path):
     metadata = image.snapshot_metadata(self.context, self.image_api,
                                        self.image_id, self.instance)
     LOG.info(_LI("Starting stream of boot device for instance %(inst)s "
                  "(local blockdev %(devpath)s) to glance image "
                  "%(img_id)s."), {'inst': self.instance.name,
                                   'devpath': disk_path,
                                   'img_id': self.image_id})
     image.stream_blockdev_to_glance(self.context, self.image_api,
                                     self.image_id, metadata, disk_path)
     LOG.info(_LI("Completed stream of boot device for instance %(inst)s "
                  "(local blockdev %(devpath)s) to glance image "
                  "%(img_id)s."), {'inst': self.instance.name,
                                   'devpath': disk_path,
                                   'img_id': self.image_id})
Esempio n. 15
0
 def execute(self, disk_path):
     metadata = image.snapshot_metadata(self.context, self.image_api,
                                        self.image_id, self.instance)
     LOG.info(_LI("Starting stream of boot device for instance %(inst)s "
                  "(local blockdev %(devpath)s) to glance image "
                  "%(img_id)s."), {'inst': self.instance.name,
                                   'devpath': disk_path,
                                   'img_id': self.image_id})
     image.stream_blockdev_to_glance(self.context, self.image_api,
                                     self.image_id, metadata, disk_path)
     LOG.info(_LI("Completed stream of boot device for instance %(inst)s "
                  "(local blockdev %(devpath)s) to glance image "
                  "%(img_id)s."), {'inst': self.instance.name,
                                   'devpath': disk_path,
                                   'img_id': self.image_id})
Esempio n. 16
0
    def execute_impl(self):
        if self.nvram_mgr is None:
            LOG.info(_LI("No op for NVRAM delete."), instance=self.instance)
            return

        LOG.info(_LI('Deleting NVRAM for instance: %s'),
                 self.instance.name, instance=self.instance)
        try:
            self.nvram_mgr.remove(self.instance)
        except Exception as e:
            LOG.exception(_LE('Unable to delete NVRAM for instance '
                              '%(name)s. Exception: %(reason)s'),
                          {'name': self.instance.name,
                           'reason': six.text_type(e)},
                          instance=self.instance)
Esempio n. 17
0
    def unplug(self, vif, cna_w_list=None):
        """Unplugs a virtual interface (network) from a VM.

        :param vif: The virtual interface to plug into the instance.
        :param cna_w_list: (Optional, Default: None) The list of Client Network
                           Adapters from pypowervm.  Providing this input
                           allows for an improvement in operation speed.
        :return cna_w: The deleted Client Network Adapter.
        """
        # This is a default implementation that most implementations will
        # require.

        # Need to find the adapters if they were not provided
        if not cna_w_list:
            cna_w_list = vm.get_cnas(self.adapter, self.instance)

        cna_w = self._find_cna_for_vif(cna_w_list, vif)
        if not cna_w:
            LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                            'instance %(inst)s.  The VIF was not found on '
                            'the instance.'),
                        {'mac': vif['address'], 'inst': self.instance.name})
            return None

        LOG.info(_LI('Deleting VIF with mac %(mac)s for instance %(inst)s.'),
                 {'mac': vif['address'], 'inst': self.instance.name})
        try:
            cna_w.delete()
        except Exception as e:
            LOG.error(_LE('Unable to unplug VIF with mac %(mac)s for instance '
                          '%(inst)s.'), {'mac': vif['address'],
                                         'inst': self.instance.name})
            LOG.exception(e)
            raise VirtualInterfaceUnplugException()
        return cna_w
Esempio n. 18
0
 def execute(self):
     LOG.info(_LI('Renaming instance to name: %s'),
              self.name,
              instance=self.instance)
     wrap = vm.rename(self.adapter, self.host_uuid, self.instance,
                      self.vm_name)
     return wrap
Esempio n. 19
0
    def __init__(self, connection):
        super(LocalStorage, self).__init__(connection)

        # Query to get the Volume Group UUID
        self.vg_name = CONF.powervm.volume_group_name
        self._vios_uuid, self.vg_uuid = self._get_vg_uuid(self.vg_name)
        LOG.info(_LI("Local Storage driver initialized: volume group: '%s'"), self.vg_name)
Esempio n. 20
0
    def _add_remove_hdisk(self, vio_wrap, device_name, stg_ftsk=None):
        """Adds a post-mapping task to remove the hdisk from the VIOS.

        This removal is only done after the mapping updates have completed.
        This method is also used during migration to remove hdisks that remain
        on the source host after the VM is migrated to the destination.

        :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
                         from.
        :param device_name: The hdisk name to remove.
        :param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
        """
        def rm_hdisk():
            LOG.info(_LI("Running remove for hdisk: '%s'"), device_name)
            try:
                # Attempt to remove the hDisk
                hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
                                   vio_wrap.uuid)
            except Exception as e:
                # If there is a failure, log it, but don't stop the process
                LOG.warning(
                    _LW("There was an error removing the hdisk "
                        "%(disk)s from the Virtual I/O Server."),
                    {'disk': device_name})
                LOG.warning(e)

        # Check if there are not multiple mapping for the device
        if not self._check_host_mappings(vio_wrap, device_name):
            name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
            stg_ftsk = stg_ftsk or self.stg_ftsk
            stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
        else:
            LOG.info(
                _LI("hdisk %(disk)s is not removed because it has "
                    "existing storage mappings"), {'disk': device_name})
Esempio n. 21
0
    def _remove_maps_for_fabric(self, fabric):
        """Removes the vFC storage mappings from the VM for a given fabric.

        :param fabric: The fabric to remove the mappings from.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        if not npiv_port_maps:
            # If no mappings exist, exit out of the method.
            return

        vios_wraps = self.stg_ftsk.feed

        for npiv_port_map in npiv_port_maps:
            ls = [LOG.info, _LI("Removing a NPIV mapping for instance "
                                "%(inst)s for fabric %(fabric)s."),
                  {'inst': self.instance.name, 'fabric': fabric}]
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is not None:
                # Add the subtask to remove the specific map
                task_wrapper = self.stg_ftsk.wrapper_tasks[vios_w.uuid]
                task_wrapper.add_functor_subtask(
                    pvm_vfcm.remove_maps, self.vm_uuid,
                    port_map=npiv_port_map, logspec=ls)
            else:
                LOG.warning(_LW("No storage connections found between the "
                                "Virtual I/O Servers and FC Fabric "
                                "%(fabric)s."), {'fabric': fabric})
Esempio n. 22
0
    def _remove_maps_for_fabric(self, fabric):
        """Removes the vFC storage mappings from the VM for a given fabric.

        :param fabric: The fabric to remove the mappings from.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        if not npiv_port_maps:
            # If no mappings exist, exit out of the method.
            return

        vios_wraps = self.stg_ftsk.feed

        for npiv_port_map in npiv_port_maps:
            ls = [LOG.info, _LI("Removing a NPIV mapping for instance "
                                "%(inst)s for fabric %(fabric)s."),
                  {'inst': self.instance.name, 'fabric': fabric}]
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is not None:
                # Add the subtask to remove the specific map
                task_wrapper = self.stg_ftsk.wrapper_tasks[vios_w.uuid]
                task_wrapper.add_functor_subtask(
                    pvm_vfcm.remove_maps, self.vm_uuid,
                    port_map=npiv_port_map, logspec=ls)
            else:
                LOG.warning(_LW("No storage connections found between the "
                                "Virtual I/O Servers and FC Fabric "
                                "%(fabric)s."), {'fabric': fabric},
                            instance=self.instance)
Esempio n. 23
0
    def execute(self, vm_cnas):
        LOG.info(_LI('Plugging the Management Network Interface to instance '
                     '%s'),
                 self.instance.name,
                 instance=self.instance)
        # Determine if we need to create the secure RMC VIF.  This should only
        # be needed if there is not a VIF on the secure RMC vSwitch
        vswitch_w = vm.get_secure_rmc_vswitch(self.adapter, self.host_uuid)
        if vswitch_w is None:
            LOG.debug(
                'No management VIF created for instance %s due to '
                'lack of Management Virtual Switch', self.instance.name)
            return None

        # This next check verifies that there are no existing NICs on the
        # vSwitch, so that the VM does not end up with multiple RMC VIFs.
        for cna_w in vm_cnas:
            if cna_w.vswitch_uri == vswitch_w.href:
                LOG.debug('Management VIF already created for instance %s',
                          self.instance.name)
                return None

        # Return the created management CNA
        return vm.crt_secure_rmc_vif(self.adapter, self.instance,
                                     self.host_uuid)
Esempio n. 24
0
    def execute_impl(self):
        """Map the instance's boot disk and discover it."""

        # Search for boot disk on the Novalink partition
        if self.disk_dvr.mp_uuid in self.disk_dvr.vios_uuids:
            dev_name = self.disk_dvr.boot_disk_path_for_instance(
                self.instance, self.disk_dvr.mp_uuid)
            if dev_name is not None:
                return None, None, dev_name

        self.stg_elem, self.vios_wrap = (
            self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
        new_maps = pvm_smap.find_maps(self.vios_wrap.scsi_mappings,
                                      client_lpar_id=self.disk_dvr.mp_uuid,
                                      stg_elem=self.stg_elem)
        if not new_maps:
            raise npvmex.NewMgmtMappingNotFoundException(
                stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)

        # new_maps should be length 1, but even if it's not - i.e. we somehow
        # matched more than one mapping of the same dev to the management
        # partition from the same VIOS - it is safe to use the first one.
        the_map = new_maps[0]
        # Scan the SCSI bus, discover the disk, find its canonical path.
        LOG.info(
            _LI("Discovering device and path for mapping of %(dev_name)s "
                "on the management partition."),
            {'dev_name': self.stg_elem.name})
        self.disk_path = mgmt.discover_vscsi_disk(the_map)
        return self.stg_elem, self.vios_wrap, self.disk_path
Esempio n. 25
0
 def execute_impl(self):
     LOG.info(_LI('Extending disk size of disk: %(disk)s size: %(size)s.'),
              {
                  'disk': self.disk_info['type'],
                  'size': self.size
              })
     self.disk_dvr.extend_disk(self.instance, self.disk_info, self.size)
Esempio n. 26
0
 def add_func(vios_w):
     LOG.info(_LI("Adding cfg drive mapping for instance %(inst)s for "
                  "Virtual I/O Server %(vios)s"),
              {'inst': instance.name, 'vios': vios_w.name})
     mapping = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w,
                                           lpar_uuid, vopt)
     return tsk_map.add_map(vios_w, mapping)
Esempio n. 27
0
    def _cleanup_volume(self, udid=None, devname=None):
        """Cleanup the hdisk associated with this udid."""

        if not udid and not devname:
            LOG.warning(_LW('Could not remove hdisk for volume: %s'),
                        self.volume_id)
            return

        LOG.info(_LI('Removing hdisk for udid: %s'), udid)

        def find_hdisk_to_remove(vios_w):
            if devname is None:
                device_name = vios_w.hdisk_from_uuid(udid)
            else:
                device_name = devname
            if device_name is None:
                return
            LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'), {
                'hdisk': device_name,
                'vios': vios_w.name
            })
            self._add_remove_hdisk(vios_w,
                                   device_name,
                                   stg_ftsk=rmv_hdisk_ftsk)

        # Create a feed task to get the vios, find the hdisk and remove it.
        rmv_hdisk_ftsk = tx.FeedTask(
            'find_hdisk_to_remove',
            pvm_vios.VIOS.getter(self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
        # Find vios hdisks for this udid to remove.
        rmv_hdisk_ftsk.add_functor_subtask(find_hdisk_to_remove,
                                           flag_update=False)
        rmv_hdisk_ftsk.execute()
Esempio n. 28
0
    def wwpns(self):
        """Builds the WWPNs of the adapters that will connect the ports."""
        # Refresh the instance.  It could have been updated by a concurrent
        # call from another thread to get the wwpns.
        self.instance.refresh()
        vios_wraps = self.stg_ftsk.feed
        resp_wwpns = []

        # If this is the first time to query the WWPNs for the instance, we
        # need to generate a set of valid WWPNs.  Loop through the configured
        # FC fabrics and determine if these are new, part of a migration, or
        # were already configured.
        for fabric in self._fabric_names():
            fc_state = self._get_fabric_state(fabric)
            LOG.info(_LI("NPIV wwpns fabric state=%(st)s for "
                         "instance %(inst)s") %
                     {'st': fc_state, 'inst': self.instance.name})

            if self._is_initial_wwpn(fc_state, fabric):
                # Get a set of WWPNs that are globally unique from the system.
                v_wwpns = pvm_vfcm.build_wwpn_pair(
                    self.adapter, self.host_uuid,
                    pair_count=self._ports_per_fabric())

                # Derive the virtual to physical port mapping
                port_maps = pvm_vfcm.derive_npiv_map(
                    vios_wraps, self._fabric_ports(fabric), v_wwpns)

                # the fabric is mapped to the physical port) and the fabric
                # state.
                self._set_fabric_meta(fabric, port_maps)
                self._set_fabric_state(fabric, FS_UNMAPPED)
                self.instance.save()
            elif self._is_migration_wwpn(fc_state):
                # The migration process requires the 'second' wwpn from the
                # fabric to be used.
                port_maps = self._configure_wwpns_for_migration(fabric)
            else:
                # This specific fabric had been previously set.  Just pull
                # from the meta (as it is likely already mapped to the
                # instance)
                port_maps = self._get_fabric_meta(fabric)

            # Every loop through, we reverse the vios wrappers.  This is
            # done so that if Fabric A only has 1 port, it goes on the
            # first VIOS.  Then Fabric B would put its port on a different
            # VIOS.  This servers as a form of multi pathing (so that your
            # paths are not restricted to a single VIOS).
            vios_wraps.reverse()

            # Port map is set by either conditional, but may be set to None.
            # If not None, then add the WWPNs to the response.
            if port_maps is not None:
                for mapping in port_maps:
                    # Only add the first WWPN.  That is the one that will be
                    # logged into the fabric.
                    resp_wwpns.append(mapping[1].split()[0])

        # The return object needs to be a list for the volume connector.
        return resp_wwpns
Esempio n. 29
0
    def _emit_event(self, pvm_state, inst, is_immed):
        if is_immed:
            # Cancel out any delayed events
            cancel_thread = self._delayed_event_threads.get(inst.uuid)
            if cancel_thread:
                cancel_thread.cancel()
                del self._delayed_event_threads[inst.uuid]
        else:
            # Make sure you're still in the thread.  If not (thread was started
            # but the is_immed _emit_event had run the del), then just bail
            inst_queue = self._delayed_event_threads.get(inst.uuid)
            if not inst_queue:
                return

        # See if it's really a change of state from what OpenStack knows
        transition = vm.translate_event(pvm_state, inst.power_state)
        if transition is None:
            return

        # Log as if normal event
        lce = event.LifecycleEvent(inst.uuid, transition)
        LOG.info(_LI('Sending life cycle event for instance state '
                     'change to: %s'),
                 pvm_state,
                 instance=inst)
        self._driver.emit_event(lce)

        if not is_immed:
            # Delete out the queue
            del self._delayed_event_threads[inst.uuid]
Esempio n. 30
0
    def _age_and_verify_cached_images(self, context, all_instances, base_dir):
        """Finds and removes unused images from the cache.

        :param context: nova context
        :param all_instances: List of all instances on the node
        :param base_dir: Volume group of cached images
        """
        # Use the 'used_images' key from nova imagecache to get a dict that
        # uses image_ids as keys.
        cache = self._scan_base_image(base_dir)
        running_inst = self._list_running_instances(context, all_instances)
        adjusted_ids = []
        for img_id in running_inst.get('used_images'):
            if img_id:
                adjusted_ids.append(
                    driver.DiskAdapter.get_name_by_uuid(driver.DiskType.IMAGE,
                                                        img_id,
                                                        short=True))
        # Compare base images with running instances remove unused
        unused = [image for image in cache if image.name not in adjusted_ids]
        # Remove unused
        if unused:
            for image in unused:
                LOG.info(_LI("Removing unused cache image: '%s'"), image.name)
            tsk_stg.rm_vg_storage(base_dir, vdisks=unused)
Esempio n. 31
0
    def post_live_migration_at_destination(self, network_info, vol_drvs):
        """Do post migration cleanup on destination host.

        :param network_info: instance network information
        :param vol_drvs: volume drivers for the attached volumes
        """
        # The LPAR should be on this host now.
        LOG.debug("Post live migration at destination.",
                  instance=self.instance)

        # An unbounded dictionary that each volume adapter can use to persist
        # data from one call to the next.
        mig_vol_stor = {}

        # For each volume, make sure it completes the migration
        for vol_drv in vol_drvs:
            LOG.info(_LI('Performing post migration for volume %(volume)s'),
                     dict(volume=vol_drv.volume_id))
            try:
                vol_drv.post_live_migration_at_destination(mig_vol_stor)
            except Exception as e:
                LOG.exception(e)
                # It failed.
                raise LiveMigrationVolume(
                    host=self.drvr.host_wrapper.system_name,
                    name=self.instance.name,
                    volume=vol_drv.volume_id)
Esempio n. 32
0
    def store(self, instance, data, force=True):
        """Store the NVRAM into the storage service.

        :param instance: instance object
        :param data: the NVRAM data base64 encoded string
        :param force: boolean whether an update should always be saved,
                      otherwise, check to see if it's changed.
        """
        exists = self._exists(instance.uuid)
        if not force and exists:
            # See if the entry exists and has not changed.
            results = self._run_operation('stat', options={'long': True},
                                          container=self.container,
                                          objects=[instance.uuid])
            result = results[0]
            if result['success']:
                existing_hash = result['headers']['etag']
                if six.PY3:
                    data = data.encode('ascii')
                md5 = hashlib.md5(data).hexdigest()
                if existing_hash == md5:
                    LOG.info(_LI('NVRAM has not changed for instance: %s'),
                             instance.name, instance=instance)
                    return

        self._store(instance.uuid, instance.name, data, exists=exists)
        LOG.debug('NVRAM updated for instance: %s' % instance.name)
Esempio n. 33
0
    def _create_disk_from_image(self, context, instance, image_meta, disk_size,
                                image_type=disk_dvr.DiskType.BOOT):
        """Creates a disk and copies the specified image to it.

        Cleans up created disk if an error occurs.

        :param context: nova context used to retrieve image from glance
        :param instance: instance to create the disk for.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param disk_size: The size of the disk to create in GB.  If smaller
                          than the image, it will be ignored (as the disk
                          must be at least as big as the image).  Must be an
                          int.
        :param image_type: the image type. See disk constants above.
        :return: The backing pypowervm storage object that was created.
        """
        LOG.info(_LI('Create disk.'), instance=instance)

        # Disk size to API is in bytes.  Input from method is in Gb
        disk_bytes = self._disk_gb_to_bytes(disk_size, floor=image_meta.size)
        vol_name = self._get_disk_name(image_type, instance, short=True)

        with self.cache_lock.read_lock():
            img_udid = self._get_or_upload_image(context, image_meta)
            # Transfer the image
            return tsk_stg.crt_copy_vdisk(
                self.adapter, self._vios_uuid, self.vg_uuid, img_udid,
                image_meta.size, vol_name, disk_bytes)
Esempio n. 34
0
 def add_func(vios_w):
     LOG.info(_LI("Adding logical volume disk connection between VM "
                  "%(vm)s and VIOS %(vios)s."),
              {'vm': instance.name, 'vios': vios_w.name})
     mapping = tsk_map.build_vscsi_mapping(
         self.host_uuid, vios_w, lpar_uuid, disk_info)
     return tsk_map.add_map(vios_w, mapping)
Esempio n. 35
0
 def rm_func(vios_w):
     LOG.info(
         _LI("Disconnecting instance %(inst)s from storage disks.") %
         {'inst': instance.name})
     return tsk_map.remove_maps(vios_w,
                                lpar_uuid,
                                match_func=match_func)
Esempio n. 36
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is None:
                LOG.error(_LE("Mappings were not able to find a proper VIOS. "
                              "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id, instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))

            ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
                                "for Virtual I/O Server %(vios)s."),
                  {'inst': self.instance.name, 'vios': vios_w.name}]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_state, name='fab_%s_%s' % (fabric, volume_id)))
Esempio n. 37
0
 def add_func(vios_w):
     LOG.info(_LI("Adding logical volume disk connection between VM "
                  "%(vm)s and VIOS %(vios)s."),
              {'vm': instance.name, 'vios': vios_w.name})
     mapping = tsk_map.build_vscsi_mapping(
         self.host_uuid, vios_w, lpar_uuid, disk_info)
     return tsk_map.add_map(vios_w, mapping)
Esempio n. 38
0
    def _add_remove_hdisk(self, vio_wrap, device_name,
                          stg_ftsk=None):
        """Adds a post-mapping task to remove the hdisk from the VIOS.

        This removal is only done after the mapping updates have completed.
        This method is also used during migration to remove hdisks that remain
        on the source host after the VM is migrated to the destination.

        :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
                         from.
        :param device_name: The hdisk name to remove.
        :param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
        """
        def rm_hdisk():
            LOG.info(_LI("Running remove for hdisk: '%s'") % device_name)
            try:
                # Attempt to remove the hDisk
                hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
                                   vio_wrap.uuid)
            except Exception as e:
                # If there is a failure, log it, but don't stop the process
                LOG.warning(_LW("There was an error removing the hdisk "
                            "%(disk)s from the Virtual I/O Server."),
                            {'disk': device_name})
                LOG.warning(e)

        # Check if there are not multiple mapping for the device
        if not self._check_host_mappings(vio_wrap, device_name):
            name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
            stg_ftsk = stg_ftsk or self.stg_ftsk
            stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
        else:
            LOG.info(_LI("hdisk %(disk)s is not removed because it has "
                         "existing storage mappings"), {'disk': device_name})
Esempio n. 39
0
    def _set_fabric_meta(self, fabric, port_map):
        """Sets the port map into the instance's system metadata.

        The system metadata will store per-fabric port maps that link the
        physical ports to the virtual ports.  This is needed for the async
        nature between the wwpns call (get_volume_connector) and the
        connect_volume (spawn).

        :param fabric: The name of the fabric.
        :param port_map: The port map (as defined via the derive_npiv_map
                         pypowervm method).
        """

        # We will store the metadata in comma-separated strings with up to 4
        # three-token pairs. Each set of three comprises the Physical Port
        # WWPN followed by the two Virtual Port WWPNs:
        # Ex:
        # npiv_wwpn_adpt_A:
        #     "p_wwpn1,v_wwpn1,v_wwpn2,p_wwpn2,v_wwpn3,v_wwpn4,..."
        # npiv_wwpn_adpt_A_2:
        #     "p_wwpn5,v_wwpn9,vwwpn_10,p_wwpn6,..."

        meta_elems = []
        for p_wwpn, v_wwpn in port_map:
            meta_elems.append(p_wwpn)
            meta_elems.extend(v_wwpn.split())

        LOG.info(
            _LI("Fabric %(fabric)s wwpn metadata will be set to "
                "%(meta)s for instance %(inst)s"), {
                    'fabric': fabric,
                    'meta': ",".join(meta_elems),
                    'inst': self.instance.name
                })

        # Clear out the original metadata.  We may be reducing the number of
        # keys (ex. reschedule) so we need to just delete what we had before
        # we add something new.
        meta_key_root = self._sys_meta_fabric_key(fabric)
        for key in tuple(self.instance.system_metadata.keys()):
            if key.startswith(meta_key_root):
                del self.instance.system_metadata[key]

        # Build up the mapping for the new keys.
        fabric_id_iter = 1
        meta_key = meta_key_root
        key_len = len(meta_key)

        for key in range(self._get_num_keys(port_map)):
            start_elem = 12 * (fabric_id_iter - 1)
            meta_value = ",".join(meta_elems[start_elem:start_elem + 12])
            self.instance.system_metadata[meta_key] = meta_value
            # If this is not the first time through, replace the end else cat
            if fabric_id_iter > 1:
                fabric_id_iter += 1
                meta_key = meta_key.replace(meta_key[key_len:],
                                            "_%s" % fabric_id_iter)
            else:
                fabric_id_iter += 1
                meta_key = meta_key + "_%s" % fabric_id_iter
Esempio n. 40
0
    def store(self, instance, data, force=True):
        """Store the NVRAM into the storage service.

        :param instance: instance object
        :param data: the NVRAM data base64 encoded string
        :param force: boolean whether an update should always be saved,
                      otherwise, check to see if it's changed.
        """
        exists = self._exists(instance.uuid)
        if not force and exists:
            # See if the entry exists and has not changed.
            results = self._run_operation('stat', options={'long': True},
                                          container=self.container,
                                          objects=[instance.uuid])
            result = results[0]
            if result['success']:
                existing_hash = result['headers']['etag']
                if six.PY3:
                    data = data.encode('ascii')
                md5 = hashlib.md5(data).hexdigest()
                if existing_hash == md5:
                    LOG.info(_LI('NVRAM has not changed for instance: %s'),
                             instance.name, instance=instance)
                    return

        self._store(instance.uuid, instance.name, data, exists=exists)
        LOG.debug('NVRAM updated for instance: %s', instance.name)
Esempio n. 41
0
 def execute(self):
     LOG.info(_LI('Resizing instance: %s'), self.instance.name,
              instance=self.instance)
     wrap = vm.update(self.adapter, self.host_wrapper,
                      self.instance, self.flavor, entry=None,
                      name=self.vm_name)
     return wrap
Esempio n. 42
0
    def execute_impl(self, vm_cnas):
        # If configured to not use RMC mgmt vifs, then return None.  Need to
        # return None because the Config Drive step (which may be used...may
        # not be) required the mgmt vif.
        if not CONF.powervm.use_rmc_mgmt_vif:
            LOG.debug('No management VIF created for instance %s as the conf '
                      'option use_rmc_mgmt_vif is set to False',
                      self.instance.name)
            return None

        LOG.info(_LI('Plugging the Management Network Interface to instance '
                     '%s'), self.instance.name, instance=self.instance)
        # Determine if we need to create the secure RMC VIF.  This should only
        # be needed if there is not a VIF on the secure RMC vSwitch
        vswitch_w = vif.get_secure_rmc_vswitch(self.adapter, self.host_uuid)
        if vswitch_w is None:
            LOG.debug('No management VIF created for instance %s due to '
                      'lack of Management Virtual Switch',
                      self.instance.name)
            return None

        # This next check verifies that there are no existing NICs on the
        # vSwitch, so that the VM does not end up with multiple RMC VIFs.
        for cna_w in vm_cnas:
            if cna_w.vswitch_uri == vswitch_w.href:
                LOG.debug('Management VIF already created for instance %s',
                          self.instance.name)
                return None

        # Return the created management CNA
        return vif.plug_secure_rmc_vif(
            self.adapter, self.instance, self.host_uuid, self.slot_mgr)
Esempio n. 43
0
def _push_vif_event(adapter, action, vif_w, instance, vif_type):
    """Push a custom event to the REST server for a vif action (plug/unplug).

    This event prompts the neutron agent to mark the port up or down.

    :param adapter: The pypowervm adapter.
    :param action: The action taken on the vif - either 'plug' or 'unplug'
    :param vif_w: The pypowervm wrapper of the affected vif (CNA, VNIC, etc.)
    :param instance: The nova instance for the event
    :param vif_type: The type of event source (pvm_sea, ovs, bridge,
                     pvm_sriov etc)
    """
    data = vif_w.href
    detail = jsonutils.dumps(
        dict(provider=EVENT_PROVIDER_ID,
             action=action,
             mac=vif_w.mac,
             type=vif_type))
    event = pvm_evt.Event.bld(adapter, data, detail)
    try:
        event = event.create()
        LOG.debug(_LI('Pushed custom event for consumption by neutron agent: '
                      '%s'),
                  str(event),
                  instance=instance)
    except Exception:
        LOG.error(_LE('Custom VIF event push failed.  %s'),
                  str(event),
                  instance=instance)
        raise
Esempio n. 44
0
    def create_disk_from_image(self, context, instance, image, disk_size,
                               image_type=disk_dvr.DiskType.BOOT):
        """Creates a disk and copies the specified image to it.

        :param context: nova context used to retrieve image from glance
        :param instance: instance to create the disk for.
        :param image: image dict used to locate the image in glance
        :param disk_size: The size of the disk to create in GB.  If smaller
                          than the image, it will be ignored (as the disk
                          must be at least as big as the image).  Must be an
                          int.
        :param image_type: the image type. See disk constants above.
        :return: The backing pypowervm storage object that was created.
        """
        LOG.info(_LI('Create disk.'))

        # Transfer the image
        stream = self._get_image_upload(context, image)
        vol_name = self._get_disk_name(image_type, instance, short=True)

        # Disk size to API is in bytes.  Input from method is in Gb
        disk_bytes = self._disk_gb_to_bytes(disk_size, floor=image['size'])

        # This method will create a new disk at our specified size.  It will
        # then put the image in the disk.  If the disk is bigger, user can
        # resize the disk, create a new partition, etc...
        # If the image is bigger than disk, API should make the disk big
        # enough to support the image (up to 1 Gb boundary).
        vdisk, f_wrap = tsk_stg.upload_new_vdisk(
            self.adapter, self._vios_uuid, self.vg_uuid, stream, vol_name,
            image['size'], d_size=disk_bytes)

        return vdisk
Esempio n. 45
0
    def post_live_migration_at_destination(self, network_infos, vol_drvs):
        """Do post migration cleanup on destination host.

        :param network_infos: instance network information
        :param vol_drvs: volume drivers for the attached volumes
        """
        # The LPAR should be on this host now.
        LOG.debug("Post live migration at destination.",
                  instance=self.instance)

        # Run the post live migration steps at the destination
        for network_info in network_infos:
            vif.post_live_migrate_at_destination(
                self.drvr.adapter, self.drvr.host_uuid, self.instance,
                network_info)

        # An unbounded dictionary that each volume adapter can use to persist
        # data from one call to the next.
        mig_vol_stor = {}

        # For each volume, make sure it completes the migration
        for vol_drv in vol_drvs:
            LOG.info(_LI('Performing post migration for volume %(volume)s'),
                     dict(volume=vol_drv.volume_id))
            try:
                vol_drv.post_live_migration_at_destination(mig_vol_stor)
            except Exception as e:
                LOG.exception(e)
                # It failed.
                raise LiveMigrationVolume(
                    host=self.drvr.host_wrapper.system_name,
                    name=self.instance.name, volume=vol_drv.volume_id)
Esempio n. 46
0
 def execute(self):
     LOG.info(_LI('Creating instance: %s'), self.instance.name)
     wrap = vm.crt_lpar(self.adapter, self.host_wrapper, self.instance,
                        self.flavor)
     pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk,
                                          lpars_exist=True)
     return wrap
Esempio n. 47
0
    def _discover_volume_on_vios(self, vios_w, volume_id):
        """Discovers an hdisk on a single vios for the volume.

        :param vios_w: VIOS wrapper to process
        :param volume_id: Volume to discover
        :returns: Status of the volume or None
        :returns: Device name or None
        :returns: LUN or None
        """
        # Get the initiatior WWPNs, targets and Lun for the given VIOS.
        vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)

        # Build the ITL map and discover the hdisks on the Virtual I/O
        # Server (if any).
        itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
        if len(itls) == 0:
            LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.'
                      % {'vios': vios_w.name, 'volume_id': volume_id})
            return None, None, None

        status, device_name, udid = hdisk.discover_hdisk(self.adapter,
                                                         vios_w.uuid, itls)

        if hdisk.good_discovery(status, device_name):
            LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
                     'volume %(volume_id)s. Status code: %(status)s.'),
                     {'hdisk': device_name, 'vios': vios_w.name,
                      'volume_id': volume_id, 'status': str(status)})
        elif status == hdisk.LUAStatus.DEVICE_IN_USE:
            LOG.warning(_LW('Discovered device %(dev)s for volume %(volume)s '
                            'on %(vios)s is in use. Error code: %(status)s.'),
                        {'dev': device_name, 'volume': volume_id,
                         'vios': vios_w.name, 'status': str(status)})

        return status, device_name, udid
Esempio n. 48
0
    def _cleanup_volume(self, udid):
        """Cleanup the hdisk associated with this udid."""

        if not udid:
            LOG.warning(_LW('Could not remove hdisk for volume: %s')
                        % self.volume_id)
            return

        LOG.info(_LI('Removing hdisk for udid: %s') % udid)

        def find_hdisk_to_remove(vios_w):
            device_name = vios_w.hdisk_from_uuid(udid)
            if device_name is None:
                return
            LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'),
                     {'hdisk': device_name, 'vios': vios_w.name})
            self._add_remove_hdisk(vios_w, device_name,
                                   stg_ftsk=rmv_hdisk_ftsk)

        # Create a feed task to get the vios, find the hdisk and remove it.
        rmv_hdisk_ftsk = tx.FeedTask(
            'find_hdisk_to_remove', pvm_vios.VIOS.getter(
                self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
        # Find vios hdisks for this udid to remove.
        rmv_hdisk_ftsk.add_functor_subtask(
            find_hdisk_to_remove, flag_update=False)
        rmv_hdisk_ftsk.execute()
Esempio n. 49
0
    def __init__(self, connection):
        super(LocalStorage, self).__init__(connection)

        # Query to get the Volume Group UUID
        self.vg_name = CONF.powervm.volume_group_name
        self._vios_uuid, self.vg_uuid = self._get_vg_uuid(self.vg_name)
        LOG.info(_LI("Local Storage driver initialized: volume group: '%s'"),
                 self.vg_name)
Esempio n. 50
0
 def add_func(vios_w):
     LOG.info(
         _LI("Adding vSCSI mapping to Physical Volume %(dev)s " "to VM %(vm)s")
         % {"dev": device_name, "vm": self.vm_uuid}
     )
     pv = pvm_stor.PV.bld(self.adapter, device_name)
     v_map = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w, self.vm_uuid, pv)
     return tsk_map.add_map(vios_w, v_map)
Esempio n. 51
0
 def execute(self, lpar_wrap, mgmt_cna):
     LOG.info(_LI('Creating Config Drive for instance: %s'),
              self.instance.name)
     self.mb = media.ConfigDrivePowerVM(self.adapter, self.host_uuid)
     self.mb.create_cfg_drv_vopt(self.instance, self.injected_files,
                                 self.network_info, lpar_wrap.uuid,
                                 admin_pass=self.ad_pass,
                                 mgmt_cna=mgmt_cna, stg_ftsk=self.stg_ftsk)
Esempio n. 52
0
 def rm_vopt():
     LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
              lpar_uuid)
     vg_wrap = pvm_stg.VG.get(self.adapter,
                              uuid=self.vg_uuid,
                              parent_type=pvm_vios.VIOS,
                              parent_uuid=self.vios_uuid)
     tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)
Esempio n. 53
0
 def find_hdisk_to_remove(vios_w):
     device_name = vios_w.hdisk_from_uuid(udid)
     if device_name is None:
         return
     LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'),
              {'hdisk': device_name, 'vios': vios_w.name})
     self._add_remove_hdisk(vios_w, device_name,
                            stg_ftsk=rmv_hdisk_ftsk)
Esempio n. 54
0
 def rm_vopt():
     LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
              lpar_uuid)
     vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
                                root_id=self.vios_uuid,
                                child_type=pvm_stg.VG.schema_type,
                                child_id=self.vg_uuid)
     tsk_stg.rm_vg_storage(pvm_stg.VG.wrap(vg_rsp), vopts=media_elems)
Esempio n. 55
0
 def add_func(vios_w):
     LOG.info(_LI("Adding vSCSI mapping to Physical Volume %(dev)s "
                  "to VM %(vm)s") % {'dev': device_name,
                                     'vm': self.vm_uuid})
     pv = pvm_stor.PV.bld(self.adapter, device_name)
     v_map = tsk_map.build_vscsi_mapping(
         self.host_uuid, vios_w, self.vm_uuid, pv,
         lpar_slot_num=lpar_slot_num, lua=lua)
     return tsk_map.add_map(vios_w, v_map)
Esempio n. 56
0
    def _set_fabric_meta(self, fabric, port_map):
        """Sets the port map into the instance's system metadata.

        The system metadata will store per-fabric port maps that link the
        physical ports to the virtual ports.  This is needed for the async
        nature between the wwpns call (get_volume_connector) and the
        connect_volume (spawn).

        :param fabric: The name of the fabric.
        :param port_map: The port map (as defined via the derive_npiv_map
                         pypowervm method).
        """

        # We will store the metadata in comma-separated strings with up to 4
        # three-token pairs. Each set of three comprises the Physical Port
        # WWPN followed by the two Virtual Port WWPNs:
        # Ex:
        # npiv_wwpn_adpt_A:
        #     "p_wwpn1,v_wwpn1,v_wwpn2,p_wwpn2,v_wwpn3,v_wwpn4,..."
        # npiv_wwpn_adpt_A_2:
        #     "p_wwpn5,v_wwpn9,vwwpn_10,p_wwpn6,..."

        meta_elems = []
        for p_wwpn, v_wwpn in port_map:
            meta_elems.append(p_wwpn)
            meta_elems.extend(v_wwpn.split())

        LOG.info(_LI("Fabric %(fabric)s wwpn metadata will be set to "
                     "%(meta)s for instance %(inst)s"),
                 {'fabric': fabric, 'meta': ",".join(meta_elems),
                  'inst': self.instance.name})

        # Clear out the original metadata.  We may be reducing the number of
        # keys (ex. reschedule) so we need to just delete what we had before
        # we add something new.
        meta_key_root = self._sys_meta_fabric_key(fabric)
        for key in tuple(self.instance.system_metadata.keys()):
            if key.startswith(meta_key_root):
                del self.instance.system_metadata[key]

        # Build up the mapping for the new keys.
        fabric_id_iter = 1
        meta_key = meta_key_root
        key_len = len(meta_key)

        for key in range(self._get_num_keys(port_map)):
            start_elem = 12 * (fabric_id_iter - 1)
            meta_value = ",".join(meta_elems[start_elem:start_elem + 12])
            self.instance.system_metadata[meta_key] = meta_value
            # If this is not the first time through, replace the end else cat
            if fabric_id_iter > 1:
                fabric_id_iter += 1
                meta_key = meta_key.replace(meta_key[key_len:],
                                            "_%s" % fabric_id_iter)
            else:
                fabric_id_iter += 1
                meta_key = meta_key + "_%s" % fabric_id_iter
Esempio n. 57
0
 def rm_func(vios_w):
     LOG.info(_LI("Removing vSCSI mapping from Physical Volume %(dev)s "
                  "to VM %(vm)s") % {'dev': device_name, 'vm': vm_uuid})
     removed_maps = tsk_map.remove_maps(
         vios_w, vm_uuid,
         tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
     for rm_map in removed_maps:
         slot_mgr.drop_vscsi_mapping(rm_map)
     return removed_maps
Esempio n. 58
0
    def revert(self, lpar_wrap, result, flow_failures):
        LOG.info(_LI('Powering off instance: %s'), self.instance.name)

        if isinstance(result, task_fail.Failure):
            # The power on itself failed...can't power off.
            LOG.debug('Power on failed.  Not performing power off.')
            return

        power.power_off(lpar_wrap, self.host_uuid, force_immediate=True)