Beispiel #1
0
def _separate_mappings(vios_w, client_href):
    """Separates out the systems existing mappings into silos.

    :param vios_w: The pypowervm wrapper for the VIOS.
    :param client_href: The REST URI of the client to separate the mappings
                        for.  May be a ROOT or CHILD URI.
    :return: A dictionary where the key is the server adapter (which is
             bound to the client).  The value is the list mappings that use
             the server adapter.
    """
    # The key is server_adapter.udid, the value is the list of applicable
    # mappings to the server adapter.
    resp = {}
    client_lpar_uuid = util.get_req_path_uuid(client_href)

    existing_mappings = vios_w.scsi_mappings
    for existing_map in existing_mappings:
        ex_lpar_uuid = util.get_req_path_uuid(existing_map.client_lpar_href
                                              or '')
        if (ex_lpar_uuid == client_lpar_uuid and
                # ignore orphaned mappings
                existing_map.client_adapter is not None):
            # Valid map to consider
            key = existing_map.server_adapter.udid
            if resp.get(key) is None:
                resp[key] = []
            resp[key].append(existing_map)

    return resp
Beispiel #2
0
def add_map(vios_w, scsi_mapping):
    """Will add the mapping to the VIOS wrapper, if not already included.

    This method has the logic in place to detect if the storage from the
    mapping is already part of a SCSI mapping.  If so, it will not re-add
    the mapping to the VIOS wrapper.

    The new mapping is added to the wrapper, but it is up to the invoker to
    call the update method on the wrapper.

    :param vios_w: The Virtual I/O Server wrapping to add the mapping to.
    :param scsi_mapping: The scsi mapping to include in the VIOS.
    :return: The scsi_mapping that was added.  None if the mapping was already
             on the vios_w.
    """
    # Check to see if the mapping is already in the system.
    lpar_uuid = util.get_req_path_uuid(scsi_mapping.client_lpar_href,
                                       preserve_case=True)
    existing_mappings = find_maps(vios_w.scsi_mappings,
                                  client_lpar_id=lpar_uuid,
                                  stg_elem=scsi_mapping.backing_storage)
    if len(existing_mappings) > 0:
        return None
    vios_w.scsi_mappings.append(scsi_mapping)
    return scsi_mapping
Beispiel #3
0
    def _handle_event(self, pvm_event, details, inst=None):
        """Handle an individual event.

        :param pvm_event: PowerVM Event Wrapper
        :param details: Parsed Details from the event
        :param inst: (Optional, Default: None) The pypowervm wrapper object
                    that represents the VM instance.
                    If None we try to look it up based on UUID.
        :return: returns the instance object or None (when it's not an
                 instance event or action is not partition state change
                 or NVRAM change)
        """
        # See if this uri (from data) ends with a PowerVM UUID.
        if not pvm_util.is_instance_path(pvm_event.data):
            return None

        # If a vm event and one we handle, call the inst handler.
        pvm_uuid = pvm_util.get_req_path_uuid(pvm_event.data,
                                              preserve_case=True)
        if (pvm_event.data.endswith('LogicalPartition/' + pvm_uuid)
                and (self.inst_actions_handled & set(details))):
            if not inst:
                LOG.debug(
                    'PowerVM Nova Event Handler: Getting inst '
                    'for id %s', pvm_uuid)
                inst = vm.get_instance(ctx.get_admin_context(), pvm_uuid)
            if inst:
                LOG.debug(
                    'Handle action "%(action)s" event for instance: '
                    '%(inst)s', dict(action=details, inst=inst.name))
                self._handle_inst_event(inst, pvm_uuid, details)
                return inst
        return None
Beispiel #4
0
def add_map(vios_w, scsi_mapping):
    """Will add the mapping to the VIOS wrapper, if not already included.

    This method has the logic in place to detect if the storage from the
    mapping is already part of a SCSI mapping.  If so, it will not re-add
    the mapping to the VIOS wrapper.

    The new mapping is added to the wrapper, but it is up to the invoker to
    call the update method on the wrapper.

    :param vios_w: The Virtual I/O Server wrapping to add the mapping to.
    :param scsi_mapping: The scsi mapping to include in the VIOS.
    :return: The scsi_mapping that was added.  None if the mapping was already
             on the vios_w.
    """
    # Check to see if the mapping is already in the system.
    lpar_uuid = util.get_req_path_uuid(scsi_mapping.client_lpar_href,
                                       preserve_case=True)
    existing_mappings = find_maps(vios_w.scsi_mappings,
                                  client_lpar_id=lpar_uuid,
                                  stg_elem=scsi_mapping.backing_storage)
    if len(existing_mappings) > 0:
        return None
    vios_w.scsi_mappings.append(scsi_mapping)
    return scsi_mapping
Beispiel #5
0
    def disconnect_image_disk(self, context, instance, lpar_uuid,
                              disk_type=None):
        """Disconnects the storage adapters from the image disk.

        :param context: nova context for operation
        :param instance: instance to disconnect the image for.
        :param lpar_uuid: The UUID for the pypowervm LPAR element.
        :param disk_type: The list of disk types to remove or None which means
            to remove all disks from the VM.
        :return: A list of all the backing storage elements that were
                 disconnected from the I/O Server and VM.
        """
        lpar_qps = vm.get_vm_qp(self.adapter, lpar_uuid)
        lpar_id = lpar_qps['PartitionID']
        host_uuid = pvm_u.get_req_path_uuid(
            lpar_qps['AssociatedManagedSystem'], preserve_case=True)
        lu_set = set()
        # The mappings will normally be the same on all VIOSes, unless a VIOS
        # was down when a disk was added.  So for the return value, we need to
        # collect the union of all relevant mappings from all VIOSes.
        for vios_uuid in self._vios_uuids(host_uuid=host_uuid):
            for lu in tsk_map.remove_lu_mapping(
                    self.adapter, vios_uuid, lpar_id, disk_prefixes=disk_type):
                lu_set.add(lu)
        return list(lu_set)
Beispiel #6
0
    def vios_uuid(self):
        """The UUID of the Node (VIOS).

        This is only set if the VIOS is on this system!
        """
        uri = self.vios_uri
        if uri is not None:
            return u.get_req_path_uuid(uri, preserve_case=True)
Beispiel #7
0
    def vios_uuid(self):
        """The UUID of the Node (VIOS).

        This is only set if the VIOS is on this system!
        """
        uri = self.vios_uri
        if uri is not None:
            return u.get_req_path_uuid(uri, preserve_case=True)
Beispiel #8
0
    def for_event(cls, agent, event):
        """Factory method to produce a ProvisionRequest for an Event.

        :param agent: The neutron agent making the request.
        :param event: pypowervm.wrappers.event.Event to be processed.
        :return: A new ProvisionRequest.  Returns None if the event is not of
                 interest to the agent.  If the event indicates a PLUG, returns
                 None if a corresponding device can't be found in neutron.
        """
        # Today, we're only handling CUSTOM_CLIENT_EVENTS provided by
        # nova-powervm's vif driver.  In the future, if PowerVM provides
        # official events for VIF types (CNA, VNIC, etc.), this method can be
        # converted to use them.
        if event.etype != pvm_evt.EventType.CUSTOM_CLIENT_EVENT:
            return None
        try:
            edetail = jsonutils.loads(event.detail)
        except (ValueError, TypeError):
            # Not a custom event we recognize
            return None
        if edetail.get('provider') != EVENT_PROVIDER_NOVA_PVM_VIF:
            # Not provided by nova-powervm's vif driver
            return None

        # The actions in the event should match our PLUG/UNPLUG consts, but
        # account for mismatched future versions
        action = edetail['action']
        if action not in (PLUG, UNPLUG):
            LOG.debug("Ignoring event due to unhandled 'action' type.  %s",
                      str(event))
            return None

        device_detail = agent.get_device_details(edetail['mac'])
        if not utils.device_detail_valid(device_detail, edetail['mac']):
            # device_detail_valid logged why
            return None

        # The event data is the URI.  For this kind of event, it looks like:
        # .../LogicalPartition/<LPAR_UUID>/VirtualNICDedicated/<vnic_uuid>
        lpar_uuid = pvm_util.get_req_path_uuid(event.data,
                                               preserve_case=True,
                                               root=True)
        vif_type = edetail['type']
        if agent.vif_type != vif_type:
            return None

        LOG.info(
            "Creating event-based %(action)s ProvisionRequest for VIF "
            "%(uri)s with MAC %(mac)s associated with LPAR %(lpar_uuid)s "
            "and source %(vif_type)s.", {
                'action': edetail['action'],
                'uri': event.data,
                'mac': edetail['mac'],
                'lpar_uuid': lpar_uuid,
                'vif_type': vif_type
            })
        return cls(edetail['action'], device_detail, lpar_uuid, vif_type)
Beispiel #9
0
def index_mappings(maps):
    """Create an index dict of SCSI mappings to facilitate reverse lookups.

    :param maps: Iterable of VSCSIMapping to index.
    :return: A dict of the form:
        { 'by-lpar-id': { str(lpar_id): [VSCSIMapping, ...], ... },
          'by-lpar-uuid': { lpar_uuid: [VSCSIMapping, ...], ... },
          'by-storage-udid': { storage_udid: [VSCSIMapping, ...], ... }
        }
        ...where:
        - lpar_id is the short integer ID (not UUID) of the LPAR, stringified.
        - lpar_uuid is the UUID of the LPAR.
        - storage_udid is the Unique Device Identifier (UDID) of the backing
            Storage element associated with the mapping.

        While the outermost dict is guaranteed to have all keys, the inner
        dicts may be empty.  However, if an inner dict has a member, its list
        of mappings is guaranteed to be nonempty.
    """
    ret = {'by-lpar-id': {}, 'by-lpar-uuid': {}, 'by-storage-udid': {}}

    def add(key, ident, smap):
        """Add a mapping to an index.

        :param key: The top-level key name ('by-lpar-uuid', etc.)
        :param ident: The lower-level key name (e.g. the lpar_uuid)
        :param smap: The mapping to add to the index.
        """
        ident = str(ident)
        if not ident:
            return
        if ident not in ret[key]:
            ret[key][ident] = []
        ret[key][ident].append(smap)

    for smap in maps:
        clhref = smap.client_lpar_href
        if clhref:
            add('by-lpar-uuid',
                util.get_req_path_uuid(clhref, preserve_case=True), smap)

        clid = None
        # Mapping may not have a client adapter, but will always have a server
        # adapter - so get the LPAR ID from the server adapter.
        if smap.server_adapter:
            clid = smap.server_adapter.lpar_id
        add('by-lpar-id', clid, smap)

        stg = smap.backing_storage
        if stg:
            add('by-storage-udid', stg.udid, smap)

    return ret
Beispiel #10
0
def index_mappings(maps):
    """Create an index dict of SCSI mappings to facilitate reverse lookups.

    :param maps: Iterable of VSCSIMapping to index.
    :return: A dict of the form:
        { 'by-lpar-id': { str(lpar_id): [VSCSIMapping, ...], ... },
          'by-lpar-uuid': { lpar_uuid: [VSCSIMapping, ...], ... },
          'by-storage-udid': { storage_udid: [VSCSIMapping, ...], ... }
        }
        ...where:
        - lpar_id is the short integer ID (not UUID) of the LPAR, stringified.
        - lpar_uuid is the UUID of the LPAR.
        - storage_udid is the Unique Device Identifier (UDID) of the backing
            Storage element associated with the mapping.

        While the outermost dict is guaranteed to have all keys, the inner
        dicts may be empty.  However, if an inner dict has a member, its list
        of mappings is guaranteed to be nonempty.
    """
    ret = {'by-lpar-id': {}, 'by-lpar-uuid': {}, 'by-storage-udid': {}}

    def add(key, ident, smap):
        """Add a mapping to an index.

        :param key: The top-level key name ('by-lpar-uuid', etc.)
        :param ident: The lower-level key name (e.g. the lpar_uuid)
        :param smap: The mapping to add to the index.
        """
        ident = str(ident)
        if not ident:
            return
        if ident not in ret[key]:
            ret[key][ident] = []
        ret[key][ident].append(smap)

    for smap in maps:
        clhref = smap.client_lpar_href
        if clhref:
            add('by-lpar-uuid',
                util.get_req_path_uuid(clhref, preserve_case=True), smap)

        clid = None
        # Mapping may not have a client adapter, but will always have a server
        # adapter - so get the LPAR ID from the server adapter.
        if smap.server_adapter:
            clid = smap.server_adapter.lpar_id
        add('by-lpar-id', clid, smap)

        stg = smap.backing_storage
        if stg:
            add('by-storage-udid', stg.udid, smap)

    return ret
Beispiel #11
0
 def remove(self, key, delete=False):
     with self._lock:
         i_key = self._get_internal_key(key.split('?', 1)[0])
         keys_to_remove = []
         for k in self._keys:
             if k == i_key or k.startswith(i_key + '?'):
                 keys_to_remove.append(k)
         for k in keys_to_remove:
             LOG.debug("%s cache remove for %s via %s" %
                       (self.host, k, key))
             self._keys.remove(k)
             del self._data[k]
             entry_uuid = util.get_req_path_uuid(key)
             if delete and entry_uuid:
                 # change this part
                 del self._uuid_feeds_map[entry_uuid]
Beispiel #12
0
    def pg83(self):
        encoded = self._get_val_str(_PV_PG83)
        # TODO(efried): Temporary workaround until VIOS supports pg83 in Events
        # >>>CUT HERE>>>
        if not encoded:
            # The PhysicalVolume XML doesn't contain the DescriptorPage83
            # property.  (This could be because the disk really doesn't have
            # this attribute; but if the caller is asking for pg83, they likely
            # expect that it should.)  More likely, it is because their VIOS is
            # running at a level which supplies this datum in a fresh inventory
            # query, but not in a PV ADD Event.  In that case, use the
            # LUARecovery Job to perform the fresh inventory query to retrieve
            # this value.  Since this is expensive, we cache the value.
            if not hasattr(self, '_pg83_encoded'):
                # Get the VIOS UUID from the parent_entry of this PV.  Raise if
                # it doesn't exist.
                if not hasattr(self, 'parent_entry') or not self.parent_entry:
                    raise ex.UnableToBuildPG83EncodingMissingParent(
                        dev_name=self.name)
                # The parent_entry is either a VG or a VIOS.  If a VG, it is a
                # child of the owning VIOS, so pull out the ROOT UUID of its
                # href. If a VIOS, we can't count on the href being a root URI,
                # so pull the target UUID regardless.
                use_root_uuid = isinstance(self.parent_entry, VG)
                vio_uuid = u.get_req_path_uuid(self.parent_entry.href,
                                               preserve_case=True,
                                               root=use_root_uuid)

                # Local import to prevent circular dependency
                from pypowervm.tasks import hdisk
                # Cache the encoded value for performance
                self._pg83_encoded = hdisk.get_pg83_via_job(
                    self.adapter, vio_uuid, self.udid)
            encoded = self._pg83_encoded
        # <<<CUT HERE<<<
        try:
            return base64.b64decode(encoded).decode(
                'utf-8') if encoded else None
        except (TypeError, binascii.Error) as te:
            LOG.warn(
                _('PV had encoded pg83 descriptor "%(pg83_raw)s", but it '
                  'failed to decode (%(type_error)s).'), {
                      'pg83_raw': encoded,
                      'type_error': te.args[0]
                  })
        return None
Beispiel #13
0
    def process(self, events):
        """Process the event that comes back from PowerVM.

        :param events: The pypowervm Event wrapper.
        """
        inst_cache = {}
        for pvm_event in events:
            try:
                if pvm_event.etype in (pvm_evt.EventType.NEW_CLIENT,
                                       pvm_evt.EventType.CACHE_CLEARED):
                    # TODO(efried): Should we pull and check all the LPARs?
                    self._uuid_cache.clear()
                    continue
                # See if this uri (from data) ends with a PowerVM UUID.
                pvm_uuid = pvm_util.get_req_path_uuid(pvm_event.data,
                                                      preserve_case=True)
                if pvm_uuid is None:
                    continue
                # Is it an instance event?
                if not pvm_event.data.endswith('LogicalPartition/' + pvm_uuid):
                    continue

                # Are we deleting? Meaning we need to clear the cache entry.
                if pvm_event.etype == pvm_evt.EventType.DELETE_URI:
                    try:
                        del self._uuid_cache[pvm_uuid]
                    except KeyError:
                        pass
                    continue
                # Pull all the pieces of the event.
                details = (pvm_event.detail.split(',')
                           if pvm_event.detail else [])
                # Is it one we care about?
                if not _INST_ACTIONS_HANDLED & set(details):
                    continue

                inst_cache[pvm_event.data] = self._handle_inst_event(
                    inst_cache.get(pvm_event.data), pvm_uuid, details)

            except Exception:
                # We deliberately keep this exception clause as broad as
                # possible - we don't want *any* error to stop us from
                # attempting to process the next event.
                LOG.exception('Unable to process PowerVM event %s',
                              str(pvm_event))
    def _prov_reqs_for_uri(self, uri):
        """Returns set of ProvisionRequests for a URI.

        When the API indicates that a URI is invalid, it will return a
        List of ProvisionRequests for a given URI.  If the URI is not valid
        for a ClientNetworkAdapter (CNA) then an empty list will be returned.
        """
        try:
            if not pvm_util.is_instance_path(uri):
                return []
        except Exception:
            LOG.warn(_LW('Unable to parse URI %s for provision request '
                         'assessment.'), uri)
            return []

        # The event queue will only return URI's for 'root like' objects.
        # This is essentially just the LogicalPartition, you can't get the
        # ClientNetworkAdapter.  So if we find an add/invalidate for the
        # LogicalPartition, we'll get all the CNAs.
        #
        # This check will throw out everything that doesn't include the
        # LogicalPartition's
        uuid = pvm_util.get_req_path_uuid(uri, preserve_case=True)
        if not uri.endswith('LogicalPartition/' + uuid):
            return []

        # For the LPAR, get the CNAs.
        cna_wraps = utils.list_cnas(self.adapter, self.host_uuid, uuid)
        resp = []
        for cna_w in cna_wraps:
            # Build a provision request for each type
            device_mac = utils.norm_mac(cna_w.mac)
            device_detail = self.agent.get_device_details(device_mac)

            # A device detail will always come back...even if neutron has
            # no idea what the port is.  This WILL happen for PowerVM, maybe
            # an event for the mgmt partition or the secure RMC VIF.  We can
            # detect if Neutron has full device details by simply querying for
            # the mac from the device_detail
            if not device_detail.get('mac_address'):
                continue

            # Must be good!
            resp.append(agent_base.ProvisionRequest(device_detail, uuid))
        return resp
Beispiel #15
0
    def _vios_uuids(self):
        """List the UUIDs of our cluster's VIOSes on this host.

        (If a VIOS is not on this host, we can't interact with it, even if its
        URI and therefore its UUID happen to be available in the pypowervm
        wrapper.)

        :return: A list of VIOS UUID strings.
        """
        ret = []
        for n in self._clust.nodes:
            # Skip any nodes that we don't have the vios uuid or uri
            if not (n.vios_uuid and n.vios_uri):
                continue
            if self._host_uuid == pvm_u.get_req_path_uuid(
                    n.vios_uri, preserve_case=True, root=True):
                ret.append(n.vios_uuid)
        return ret
Beispiel #16
0
    def pg83(self):
        encoded = self._get_val_str(_PV_PG83)
        # TODO(efried): Temporary workaround until VIOS supports pg83 in Events
        # >>>CUT HERE>>>
        if not encoded:
            # The PhysicalVolume XML doesn't contain the DescriptorPage83
            # property.  (This could be because the disk really doesn't have
            # this attribute; but if the caller is asking for pg83, they likely
            # expect that it should.)  More likely, it is because their VIOS is
            # running at a level which supplies this datum in a fresh inventory
            # query, but not in a PV ADD Event.  In that case, use the
            # LUARecovery Job to perform the fresh inventory query to retrieve
            # this value.  Since this is expensive, we cache the value.
            if not hasattr(self, '_pg83_encoded'):
                # Get the VIOS UUID from the parent_entry of this PV.  Raise if
                # it doesn't exist.
                if not hasattr(self, 'parent_entry') or not self.parent_entry:
                    raise ex.UnableToBuildPG83EncodingMissingParent(
                        dev_name=self.name)
                # The parent_entry is either a VG or a VIOS.  If a VG, it is a
                # child of the owning VIOS, so pull out the ROOT UUID of its
                # href. If a VIOS, we can't count on the href being a root URI,
                # so pull the target UUID regardless.
                use_root_uuid = isinstance(self.parent_entry, VG)
                vio_uuid = u.get_req_path_uuid(
                    self.parent_entry.href, preserve_case=True,
                    root=use_root_uuid)

                # Local import to prevent circular dependency
                from pypowervm.tasks import hdisk
                # Cache the encoded value for performance
                self._pg83_encoded = hdisk.get_pg83_via_job(
                    self.adapter, vio_uuid, self.udid)
            encoded = self._pg83_encoded
        # <<<CUT HERE<<<
        try:
            return base64.b64decode(encoded).decode(
                'utf-8') if encoded else None
        except (TypeError, binascii.Error) as te:
            LOG.warn(_('PV had encoded pg83 descriptor "%(pg83_raw)s", but it '
                       'failed to decode (%(type_error)s).'),
                     {'pg83_raw': encoded, 'type_error': te.args[0]})
        return None
Beispiel #17
0
    def _vios_uuids(self):
        """List the UUIDs of our cluster's VIOSes on this host.

        (If a VIOS is not on this host, we can't interact with it, even if its
        URI and therefore its UUID happen to be available in the pypowervm
        wrapper.)

        :return: A list of VIOS UUID strings.
        """
        ret = []
        for n in self._clust.nodes:
            # Skip any nodes that we don't have the VIOS uuid or uri
            if not (n.vios_uuid and n.vios_uri):
                continue
            if self._host_uuid == pvm_u.get_req_path_uuid(n.vios_uri,
                                                          preserve_case=True,
                                                          root=True):
                ret.append(n.vios_uuid)
        return ret
Beispiel #18
0
    def connect_disk(self, context, instance, disk_info, lpar_uuid):
        """Connects the disk image to the Virtual Machine.

        :param context: nova context for the transaction.
        :param instance: nova instance to connect the disk to.
        :param disk_info: The pypowervm storage element returned from
                          create_disk_from_image.  Ex. VOptMedia, VDisk, LU,
                          or PV.
        :param: lpar_uuid: The pypowervm UUID that corresponds to the VM.
        """
        # Create the LU structure
        lu = pvm_stg.LU.bld_ref(self.adapter, disk_info.name, disk_info.udid)

        # Add the mapping to *each* VIOS on the LPAR's host.
        # Note that the LPAR's host is likely to be the same as self.host_uuid,
        # but this is safer.
        host_href = vm.get_vm_qp(self.adapter, lpar_uuid,
                                 'AssociatedManagedSystem')
        host_uuid = pvm_u.get_req_path_uuid(host_href, preserve_case=True)
        for vios_uuid in self._vios_uuids(host_uuid=host_uuid):
            tsk_map.add_vscsi_mapping(host_uuid, vios_uuid, lpar_uuid, lu)
Beispiel #19
0
    def _vios_uuids(self, host_uuid=None):
        """List the UUIDs of our cluster's VIOSes (on a specific host).

        (If a VIOS is not on this host, its URI and therefore its UUID will not
        be available in the pypowervm wrapper.)

        :param host_uuid: Restrict the response to VIOSes residing on the host
                          with the specified UUID.  If None/unspecified, VIOSes
                          on all hosts are included.
        :return: A list of VIOS UUID strings.
        """
        ret = []
        for n in self._cluster.nodes:
            # Skip any nodes that we don't have the vios uuid or uri
            if not (n.vios_uuid and n.vios_uri):
                continue
            if host_uuid:
                node_host_uuid = pvm_u.get_req_path_uuid(
                    n.vios_uri, preserve_case=True, root=True)
                if host_uuid != node_host_uuid:
                    continue
            ret.append(n.vios_uuid)
        return ret
Beispiel #20
0
def find_maps(mapping_list, client_lpar_id=None, match_func=None,
              stg_elem=None, include_orphans=False):
    """Filter a list of scsi mappings by LPAR ID/UUID and a matching function.

    :param mapping_list: The mappings to filter.  Iterable of VSCSIMapping.
    :param client_lpar_id: Integer short ID or string UUID of the LPAR on the
                           client side of the mapping.  Note that the UUID form
                           relies on the presence of the client_lpar_href
                           field.  Some mappings lack this field, and would
                           therefore be ignored. If client_lpar_id is not
                           passed it will return matching mappings for all
                           the lpar_ids.
    :param match_func: Callable with the following specification:

        def match_func(storage_elem)
            param storage_elem: A backing storage element wrapper (VOpt, VDisk,
                                PV, or LU) to be analyzed.  May be None (some
                                mappings have no backing storage).
            return: True if the storage_elem's mapping should be included;
                    False otherwise.

                       If neither match_func nor stg_elem is specified, the
                       default is to match everything - that is, find_maps will
                       return all mappings for the specified client_lpar_id.
                       It is illegal to specify both match_func and stg_elem.
    :param stg_elem: Match mappings associated with a specific storage element.
                     Effectively, this generates a default match_func which
                     matches on the type and name of the storage element.
                     If neither match_func nor stg_elem is specified, the
                     default is to match everything - that is, find_maps will
                     return all mappings for the specified client_lpar_id.
                     It is illegal to specify both match_func and stg_elem.
    :param include_orphans: An "orphan" contains a server adapter but no client
                            adapter.  If this parameter is True, mappings with
                            no client adapter will still be considered for
                            inclusion.  If False, mappings with no client
                            adapter will be skipped entirely, regardless of any
                            other criteria.
    :return: A list comprising the subset of the input mapping_list whose
             client LPAR IDs match client_lpar_id and whose backing storage
             elements satisfy match_func.
    :raise ValueError: If both match_func and stg_elem are specified.
    """
    if match_func and stg_elem:
        raise ValueError(_("Must not specify both match_func and stg_elem."))
    if not match_func:
        # Default no filter
        match_func = lambda x: True
    if stg_elem:
        # Match storage element on type and name
        match_func = lambda stg_el: (
            stg_el is not None and
            stg_el.schema_type == stg_elem.schema_type and
            stg_el.name == stg_elem.name)

    is_uuid = False
    client_id = None
    if client_lpar_id:
        is_uuid, client_id = uuid.id_or_uuid(client_lpar_id)
    matching_maps = []
    for existing_scsi_map in mapping_list:
        # No client, continue on unless including orphans.
        if not include_orphans and existing_scsi_map.client_adapter is None:
            continue

        # If to a different VM, continue on.
        href = existing_scsi_map.client_lpar_href
        if is_uuid and (not href or client_id != util.get_req_path_uuid(
                href, preserve_case=True)):
            continue
        elif (client_lpar_id and not is_uuid and
                # Use the server adapter in case this is an orphan.
                existing_scsi_map.server_adapter.lpar_id != client_id):
            continue

        if match_func(existing_scsi_map.backing_storage):
            # Found a match!
            matching_maps.append(existing_scsi_map)
    return matching_maps
Beispiel #21
0
def find_maps(mapping_list,
              client_lpar_id=None,
              match_func=None,
              stg_elem=None,
              include_orphans=False):
    """Filter a list of scsi mappings by LPAR ID/UUID and a matching function.

    :param mapping_list: The mappings to filter.  Iterable of VSCSIMapping.
    :param client_lpar_id: Integer short ID or string UUID of the LPAR on the
                           client side of the mapping.  Note that the UUID form
                           relies on the presence of the client_lpar_href
                           field.  Some mappings lack this field, and would
                           therefore be ignored. If client_lpar_id is not
                           passed it will return matching mappings for all
                           the lpar_ids.
    :param match_func: Callable with the following specification:

        def match_func(storage_elem)
            param storage_elem: A backing storage element wrapper (VOpt, VDisk,
                                PV, or LU) to be analyzed.  May be None (some
                                mappings have no backing storage).
            return: True if the storage_elem's mapping should be included;
                    False otherwise.

                       If neither match_func nor stg_elem is specified, the
                       default is to match everything - that is, find_maps will
                       return all mappings for the specified client_lpar_id.
                       It is illegal to specify both match_func and stg_elem.
    :param stg_elem: Match mappings associated with a specific storage element.
                     Effectively, this generates a default match_func which
                     matches on the type and name of the storage element.
                     If neither match_func nor stg_elem is specified, the
                     default is to match everything - that is, find_maps will
                     return all mappings for the specified client_lpar_id.
                     It is illegal to specify both match_func and stg_elem.
    :param include_orphans: An "orphan" contains a server adapter but no client
                            adapter.  If this parameter is True, mappings with
                            no client adapter will still be considered for
                            inclusion.  If False, mappings with no client
                            adapter will be skipped entirely, regardless of any
                            other criteria.
    :return: A list comprising the subset of the input mapping_list whose
             client LPAR IDs match client_lpar_id and whose backing storage
             elements satisfy match_func.
    :raise ValueError: If both match_func and stg_elem are specified.
    """
    if match_func and stg_elem:
        raise ValueError(_("Must not specify both match_func and stg_elem."))
    if not match_func:
        # Default no filter
        match_func = lambda x: True
    if stg_elem:
        # Match storage element on type and name
        match_func = lambda stg_el: (stg_el is not None and stg_el.schema_type
                                     == stg_elem.schema_type and stg_el.name ==
                                     stg_elem.name)

    is_uuid = False
    client_id = None
    if client_lpar_id:
        is_uuid, client_id = uuid.id_or_uuid(client_lpar_id)
    matching_maps = []
    for existing_scsi_map in mapping_list:
        # No client, continue on unless including orphans.
        if not include_orphans and existing_scsi_map.client_adapter is None:
            continue

        # If to a different VM, continue on.
        href = existing_scsi_map.client_lpar_href
        if is_uuid and (not href or client_id != util.get_req_path_uuid(
                href, preserve_case=True)):
            continue
        elif (client_lpar_id and not is_uuid and
              # Use the server adapter in case this is an orphan.
              existing_scsi_map.server_adapter.lpar_id != client_id):
            continue

        if match_func(existing_scsi_map.backing_storage):
            # Found a match!
            matching_maps.append(existing_scsi_map)
    return matching_maps
Beispiel #22
0
 def test_get_req_path_uuid_and_is_instance_path(self):
     # Fail: no '/'
     path = dummyuuid1
     self.assertIsNone(util.get_req_path_uuid(path))
     self.assertRaises(IndexError, util.is_instance_path, path)
     path = '/' + dummyuuid1
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
     self.assertTrue(util.is_instance_path(path))
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
     self.assertTrue(util.is_instance_path(path))
     # Fail: last path element is not a UUID
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child'
     self.assertIsNone(util.get_req_path_uuid(path))
     self.assertFalse(util.is_instance_path(path))
     # Fail: last path element is not quiiiite a UUID
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1[1:]
     self.assertIsNone(util.get_req_path_uuid(path))
     self.assertFalse(util.is_instance_path(path))
     # Ignore query/fragment
     path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
             '?group=One,Two#frag')
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
     self.assertTrue(util.is_instance_path(path))
     # Fail: last path element (having removed query/fragment) is not a UUID
     path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
             '/Child?group=One,Two#frag')
     self.assertIsNone(util.get_req_path_uuid(path))
     self.assertFalse(util.is_instance_path(path))
     # Default case conversion
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1.upper()
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
     self.assertEqual(dummyuuid1,
                      util.get_req_path_uuid(path, preserve_case=False))
     self.assertTrue(util.is_instance_path(path))
     # Force no case conversion
     self.assertEqual(dummyuuid1.upper(),
                      util.get_req_path_uuid(path, preserve_case=True))
     # Child URI gets child UUID by default
     path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
             '/Child/' + dummyuuid2)
     self.assertEqual(dummyuuid2, util.get_req_path_uuid(path))
     self.assertTrue(util.is_instance_path(path))
     # Get root UUID from child URI
     path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
             '/Child/' + dummyuuid2)
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
     self.assertTrue(util.is_instance_path(path))
     # root=True redundant on a root path
     path = '/' + dummyuuid1
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
Beispiel #23
0
def find_maps(mapping_list, client_lpar_id, client_adpt=None, port_map=None):
    """Filter a list of VFC mappings by LPAR ID.

    This is based on scsi_mapper.find_maps, but does not yet provide all the
    same functionality.

    :param mapping_list: The mappings to filter.  Iterable of VFCMapping.
    :param client_lpar_id: Integer short ID or string UUID of the LPAR on the
                           client side of the mapping.  Note that the UUID form
                           relies on the presence of the client_lpar_href
                           field.  Some mappings lack this field, and would
                           therefore be ignored.
    :param client_adpt: (Optional, Default=None) If set, will only include the
                        mapping if the client adapter's WWPNs match as well.
    :param port_map: (Optional, Default=None) If set, will look for a matching
                     mapping based off the client WWPNs as specified by the
                     port mapping.  The format of this is defined by the
                     derive_npiv_map method.
    :return: A list comprising the subset of the input mapping_list whose
             client LPAR IDs match client_lpar_id.
    """
    is_uuid, client_id = uuid.id_or_uuid(client_lpar_id)
    matching_maps = []

    if port_map:
        v_wwpns = [u.sanitize_wwpn_for_api(x) for x in port_map[1].split()]

    for vfc_map in mapping_list:
        # If to a different VM, continue on.
        href = vfc_map.client_lpar_href
        if is_uuid and (not href or client_id != u.get_req_path_uuid(
                href, preserve_case=True)):
            continue
        elif (not is_uuid and
                # Use the server adapter in case this is an orphan.
                vfc_map.server_adapter.lpar_id != client_id):
            continue

        # If there is a client adapter, and it is not a 'ANY WWPN', then
        # check to see if the mappings match.
        if client_adpt and client_adpt.wwpns != {_ANY_WWPN}:
            # If they passed in a client adapter, but the map doesn't have
            # one, then we have to ignore
            if not vfc_map.client_adapter:
                continue

            # Check to make sure the WWPNs between the two match.  This should
            # be an order independence check (as this query shouldn't care...
            # but the API itself does care about order).
            if set(client_adpt.wwpns) != set(vfc_map.client_adapter.wwpns):
                continue

        # If the user had a port map, do the virtual WWPNs from that port
        # map match the client adapter wwpn map.
        if port_map:
            if vfc_map.client_adapter is None:
                continue

            # If it is a new mapping with generated WWPNs, then the client
            # adapter can't have WWPNs.
            if v_wwpns == [_ANY_WWPN, _ANY_WWPN]:
                if vfc_map.client_adapter.wwpns != []:
                    continue
            elif set(vfc_map.client_adapter.wwpns) != set(v_wwpns):
                continue

        # Found a match!
        matching_maps.append(vfc_map)

    return matching_maps
Beispiel #24
0
 def ssp_uuid(self):
     """The UUID of this Tier's parent SharedStoragePool."""
     return u.get_req_path_uuid(self.get_href(_TIER_ASSOC_SSP,
                                              one_result=True))
Beispiel #25
0
 def ssp_uuid(self):
     """The UUID of the SharedStoragePool associated with this Cluster."""
     uri = self.ssp_uri
     if uri is not None:
         return u.get_req_path_uuid(uri)
Beispiel #26
0
 def assoc_sys_uuid(self):
     """UUID of the associated ManagedSystem."""
     href = self.get_href(_BP_ASSOC_SYSTEM, one_result=True)
     return u.get_req_path_uuid(href, preserve_case=True) if href else None
Beispiel #27
0
def parse_sea_mappings(adapter, host_uuid, mapping):
    """This method will parse the sea mappings, and return a UUID map.

    The UUID of the NetworkBridges are required for modification of the
    VLANs that are bridged through the system (via the
    SharedEthernetAdapters). However, UUIDs are not user consumable.  This
    method will read in the string from the CONF file and return a mapping
    for the physical networks.

    Input:
     - <ph_network>:<sea>:<vios_name>,<next ph_network>:<sea2>:<vios_name>
     - Example: default:ent5:vios_lpar,speedy:ent6:vios_lpar

    Output:
    {
      'default': <Network Bridge UUID>, 'speedy': <Network Bridge 2 UUID>
    }

    :param adapter: The pypowervm adapter.
    :param host_uuid: The UUID for the host system.
    :param mapping: The mapping string as defined above to parse.
    :return: The output dictionary described above.
    """
    # Read all the network bridges.
    nb_wraps = list_bridges(adapter, host_uuid)

    if len(nb_wraps) == 0:
        raise np_exc.NoNetworkBridges()
    # Did the user specify the mapping?
    if mapping == '':
        return _parse_empty_bridge_mapping(nb_wraps)

    # Need to find a list of all the VIOSes names to hrefs
    vio_wraps = pvm_vios.VIOS.get(adapter, xag=[pvm_const.XAG.VIO_NET])

    # Response dictionary
    resp = {}

    # Parse the strings
    trios = mapping.split(',')
    for trio in trios:
        # Keys
        # 0 - physical network
        # 1 - SEA name
        # 2 - VIO name
        keys = trio.split(':')

        # Find the VIOS wrapper for the name
        vio_w = next(v for v in vio_wraps if v.name == keys[2])

        # For each network bridge, see if it maps to the SEA name/VIOS href
        matching_nb = None
        for nb_wrap in nb_wraps:
            for sea in nb_wrap.seas:
                sea_vio_uuid = pvm_util.get_req_path_uuid(sea.vio_uri,
                                                          preserve_case=True)
                if sea.dev_name == keys[1] and sea_vio_uuid == vio_w.uuid:
                    # Found the matching SEA.
                    matching_nb = nb_wrap
                    break

        # Assuming we found a matching SEA, add it to the dictionary
        if matching_nb is not None:
            resp[keys[0]] = matching_nb.uuid
        else:
            raise np_exc.DeviceNotFound(dev=keys[1],
                                        vios=keys[2],
                                        phys_net=keys[0])

    return resp
Beispiel #28
0
 def assoc_sys_uuid(self):
     """UUID of the associated ManagedSystem."""
     href = self.get_href(_BP_ASSOC_SYSTEM, one_result=True)
     return u.get_req_path_uuid(href, preserve_case=True) if href else None
Beispiel #29
0
 def ssp_uuid(self):
     """The UUID of the SharedStoragePool associated with this Cluster."""
     uri = self.ssp_uri
     if uri is not None:
         return u.get_req_path_uuid(uri)
Beispiel #30
0
 def test_get_req_path_uuid_and_is_instance_path(self):
     # Fail: no '/'
     path = dummyuuid1
     self.assertIsNone(util.get_req_path_uuid(path))
     self.assertRaises(IndexError, util.is_instance_path, path)
     path = '/' + dummyuuid1
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
     self.assertTrue(util.is_instance_path(path))
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
     self.assertTrue(util.is_instance_path(path))
     # Fail: last path element is not a UUID
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child'
     self.assertIsNone(util.get_req_path_uuid(path))
     self.assertFalse(util.is_instance_path(path))
     # Fail: last path element is not quiiiite a UUID
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1[1:]
     self.assertIsNone(util.get_req_path_uuid(path))
     self.assertFalse(util.is_instance_path(path))
     # Ignore query/fragment
     path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
             '?group=One,Two#frag')
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
     self.assertTrue(util.is_instance_path(path))
     # Fail: last path element (having removed query/fragment) is not a UUID
     path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
             '/Child?group=One,Two#frag')
     self.assertIsNone(util.get_req_path_uuid(path))
     self.assertFalse(util.is_instance_path(path))
     # Default case conversion
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1.upper()
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(
         path, preserve_case=False))
     self.assertTrue(util.is_instance_path(path))
     # Force no case conversion
     self.assertEqual(dummyuuid1.upper(), util.get_req_path_uuid(
         path, preserve_case=True))
     # Child URI gets child UUID by default
     path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
             '/Child/' + dummyuuid2)
     self.assertEqual(dummyuuid2, util.get_req_path_uuid(path))
     self.assertTrue(util.is_instance_path(path))
     # Get root UUID from child URI
     path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
             '/Child/' + dummyuuid2)
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
     self.assertTrue(util.is_instance_path(path))
     # root=True redundant on a root path
     path = '/' + dummyuuid1
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
     path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
     self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))