Пример #1
0
    def _can_modify(self, dlpar_cap, cap_desc):
        """Checks to determine if the LPAR can be modified.

        :param dlpar_cap: The appropriate DLPAR attribute to validate.  Only
                          used if system is active.
        :param cap_desc: A translated string indicating the DLPAR capability.
        :return capable: True if HW can be added/removed.  False otherwise.
        :return reason: A translated message that will indicate why it was not
                        capable of modification.  If capable is True, the
                        reason will be None.
        """
        # If we are in the LPAR, we have access to the operating system type.
        # If it is an OS400 type, then we can add/remove HW no matter what.
        if self.env == bp.LPARType.OS400:
            return True, None

        # First check is the not activated state
        if self.state == bp.LPARState.NOT_ACTIVATED:
            return True, None

        if self.rmc_state != bp.RMCState.ACTIVE:
            return False, _('LPAR does not have an active RMC connection.')
        if not dlpar_cap:
            return False, _('LPAR does not have an active DLPAR capability '
                            'for %s.') % cap_desc
        return True, None
Пример #2
0
    def _validate_resize_common(self):
        """Validation rules common for both active and inactive resizes.

        Helper method to enforce validation rules that are common for
        both active and inactive resizes.
        """
        curr_has_dedicated = self.cur_lpar_w.proc_config.has_dedicated
        curr_proc_pool_id = self.cur_lpar_w.proc_config.shared_proc_cfg.pool_id
        if curr_has_dedicated and not self.has_dedicated:
            # Resize from Dedicated Mode to Shared Mode
            if self.pool_id != 0:
                msg = (_("The shared processor pool for %s must be "
                         "DefaultPool.") % self.cur_lpar_w.name)
                raise ValidatorException(msg)

        if not self.has_dedicated and not curr_has_dedicated:
            curr_proc_pool_id = self.cur_lpar_w.proc_config.\
                shared_proc_cfg.pool_id
            if curr_proc_pool_id != self.pool_id:
                msg = (
                    _("The shared processor pool for %s cannot be changed.") %
                    self.cur_lpar_w.name)
                raise ValidatorException(msg)

        self._validate_host_has_available_res(self.delta_des_vcpus,
                                              self.procs_avail, self.res_name)
Пример #3
0
def parent_spec(parent, parent_type, parent_uuid):
    """Produce a canonical parent type and UUID suitable for read().

    :param parent: EntryWrapper representing the parent.  If specified,
                   parent_type and parent_uuid are ignored.
    :param parent_type: EntryWrapper class or schema_type string representing
                        the schema type of the parent.
    :param parent_uuid: String UUID of the parent.
    :return parent_type: String schema type of the parent.  The parent_type and
                         parent_uuid returns are both None or both valid
                         strings.
    :return parent_uuid: String UUID of the parent.  The parent_type and
                         parent_uuid returns are both None or both valid
                         strings.
    :raise ValueError: If parent is None and parent_type xor parent_uuid is
                       specified.
    """
    if all(param is None for param in (parent, parent_type, parent_uuid)):
        return None, None
    if parent is not None:
        return parent.schema_type, parent.uuid
    if any(param is None for param in (parent_type, parent_uuid)):
        # parent_type xor parent_uuid specified
        raise ValueError(_("Developer error: partial parent specification."))
    # Allow either string or class for parent_type
    if hasattr(parent_type, 'schema_type'):
        parent_type = parent_type.schema_type
    elif type(parent_type) is not str:
        raise ValueError(_("Developer error: parent_type must be either a "
                           "string schema type or a Wrapper subclass."))
    return parent_type, parent_uuid
Пример #4
0
    def validate(self):
        super(BoundField, self).validate()
        # If value was not converted to the type, then don't validate bounds
        if self.typed_value is None:
            return
        if (self._min_bound is not None
                and self.typed_value < self._convert_value(self._min_bound)):
            values = dict(field=self.name,
                          value=self.typed_value,
                          minimum=self._min_bound)
            msg = _("Field '%(field)s' has a value below the minimum. "
                    "Value: %(value)s; Minimum: %(minimum)s") % values
            LOG.error(msg)
            raise ValueError(msg)

        if (self._max_bound is not None
                and self.typed_value > self._convert_value(self._max_bound)):
            values = dict(field=self.name,
                          value=self.typed_value,
                          maximum=self._max_bound)
            msg = _("Field '%(field)s' has a value above the maximum. "
                    "Value: %(value)s; Maximum: %(maximum)s") % values

            LOG.error(msg)
            raise ValueError(msg)
Пример #5
0
def _rm_dev_by_udid(dev, devlist):
    """Use UDID matching to remove a device from a list.

    Use this method in favor of devlist.remove(dev) when the dev originates
    from somewhere other than the devlist, and may have some non-matching
    properties which would cause normal equality comparison to fail.

    For example, use this method when using a VSCSI mapping's backing_storage
    to decide which LogicalUnit to remove from the list of SSP.logical_units.

    Note: This method relies on UDIDs being present in both dev and the
    corresponding item in devlist.

    :param dev: The EntryWrapper representing the device to remove.  May be
                VDisk, VOpt, PV, or LU.
    :param devlist: The list from which to remove the device.
    :return: The device removed, as it existed in the devlist.  None if the
             device was not found by UDID.
    """
    if not dev.udid:
        LOG.warn(_("Ignoring device because it lacks a UDID:\n%s"),
                 dev.toxmlstring())
        return None

    matches = [realdev for realdev in devlist if realdev.udid == dev.udid]
    if len(matches) == 0:
        LOG.warn(_("Device %s not found in list."), dev.name)
        return None
    if len(matches) > 1:
        raise exc.FoundDevMultipleTimes(devname=dev.name, count=len(matches))

    LOG.debug("Removing %s from devlist.", dev.name)
    match = matches[0]
    devlist.remove(match)
    return match
Пример #6
0
def _parse_pg83_xml(xml_resp):
    """Parse LUARecovery XML response, looking for pg83 descriptor.

    :param xml_resp: Tuple containing OutputXML and StdOut results of the
                     LUARecovery Job
    :return: pg83 descriptor text, or None if not found.
    """
    # QUERY_INVENTORY response may contain more than one element.  Each will be
    # delimited by its own <?xml?> tag.  etree will only parse one at a time.
    for chunk in xml_resp.split('<?xml version="1.0"?>'):
        if not chunk:
            continue
        try:
            parsed = etree.fromstring(chunk)
        except etree.XMLSyntaxError as e:
            LOG.warning(_('QUERY_INVENTORY produced invalid chunk of XML '
                          '(%(chunk)s).  Error: %(err)s'),
                        {'chunk': chunk, 'err': e.args[0]})
            continue
        for elem in parsed.getiterator():
            if (etree.QName(elem.tag).localname == 'PhysicalVolume_base' and
                    elem.attrib.get('desType') == "NAA"):
                return elem.attrib.get('descriptor')
    LOG.warning(_('Failed to find pg83 descriptor in XML output:\n%s'),
                xml_resp)
    return None
Пример #7
0
def sanitize_file_name_for_api(name, prefix='', suffix='',
                               max_len=const.MaxLen.FILENAME_DEFAULT):
    """Generate a sanitized file name based on PowerVM's FileName.Pattern.

    :param name: The base name to sanitize.
    :param prefix: (Optional) A prefix to prepend to the 'name'.  No delimiter
                   is added.
    :param suffix: (Optional) A suffix to append to the 'name'.  No delimiter
                   is added.
    :param max_len: (Optional) The maximum allowable length of the final
                    sanitized string.  Defaults to the API's defined length for
                    FileName.Pattern.
    :return: A string scrubbed of all forbidden characters and trimmed for
             length as necessary.
    """
    def _scrub(in_name):
        """Returns in_name with illegal characters replaced with '_'."""
        return re.sub(r'[^.0-9A-Z_a-z]', '_', in_name)

    name, prefix, suffix = (_scrub(val) for val in (name, prefix, suffix))
    base_len = max_len - len(prefix) - len(suffix)
    if base_len <= 0:
        raise ValueError(_("Prefix and suffix together may not be more than "
                           "%d characters."), max_len - 1)
    name = name[:base_len]
    ret = prefix + name + suffix
    if not len(ret):
        raise ValueError(_("Total length must be at least 1 character."))
    return ret
Пример #8
0
def sanitize_partition_name_for_api(name, trunc_ok=True):
    """Sanitize a string to be suitable for use as a partition name.

    PowerVM's partition name restrictions are:
    - Between 1 and 31 characters, inclusive;
    - Containing ASCII characters between 0x20 (space) and 0x7E (~), inclusive,
      except ()\<>*$&?|[]'"`

    :param name: The name to scrub.  Invalid characters will be replaced with
                 '_'.
    :param trunc_ok: If True, and name exceeds 31 characters, it is truncated.
                     If False, and name exceeds 31 characters, ValueError is
                     raised.
    :return: The scrubbed string.
    :raise ValueError: If name is None or zero length; or if it exceeds length
                       31 and trunk_ok=False.
    """
    max_len = 31
    if not name:
        raise ValueError(
            _("The name parameter must be at least one character "
              "long."))
    if not trunc_ok and len(name) > max_len:
        raise ValueError(
            _("The name parameter must not exceed %d characters "
              "when trunk_ok is False."), max_len)
    return re.sub(r'[^- !#%+,./0-9:;=@A-Z^_a-z{}]', '_', name)[:max_len]
Пример #9
0
def _rm_dev_by_udid(dev, devlist):
    """Use UDID matching to remove a device from a list.

    Use this method in favor of devlist.remove(dev) when the dev originates
    from somewhere other than the devlist, and may have some non-matching
    properties which would cause normal equality comparison to fail.

    For example, use this method when using a VSCSI mapping's backing_storage
    to decide which LogicalUnit to remove from the list of SSP.logical_units.

    Note: This method relies on UDIDs being present in both dev and the
    corresponding item in devlist.

    :param dev: The EntryWrapper representing the device to remove.  May be
                VDisk, VOpt, PV, or LU.
    :param devlist: The list from which to remove the device.
    :return: The device removed, as it existed in the devlist.  None if the
             device was not found by UDID.
    """
    if not dev.udid:
        LOG.warn(_("Ignoring device because it lacks a UDID:\n%s"),
                 dev.toxmlstring())
        return None

    matches = [realdev for realdev in devlist if realdev.udid == dev.udid]
    if len(matches) == 0:
        LOG.warn(_("Device %s not found in list."), dev.name)
        return None
    if len(matches) > 1:
        raise exc.FoundDevMultipleTimes(devname=dev.name, count=len(matches))

    LOG.debug("Removing %s from devlist.", dev.name)
    match = matches[0]
    devlist.remove(match)
    return match
Пример #10
0
    def _can_modify(self, dlpar_cap, cap_desc):
        """Checks to determine if the LPAR can be modified.

        :param dlpar_cap: The appropriate DLPAR attribute to validate.  Only
                          used if system is active.
        :param cap_desc: A translated string indicating the DLPAR capability.
        :return capable: True if HW can be added/removed.  False otherwise.
        :return reason: A translated message that will indicate why it was not
                        capable of modification.  If capable is True, the
                        reason will be None.
        """
        # If we are in the LPAR, we have access to the operating system type.
        # If it is an OS400 type, then we can add/remove HW no matter what.
        if self.env == bp.LPARType.OS400:
            return True, None

        # First check is the not activated state
        if self.state == bp.LPARState.NOT_ACTIVATED:
            return True, None

        if self.rmc_state != bp.RMCState.ACTIVE:
            return False, _('LPAR does not have an active RMC connection.')
        if not dlpar_cap:
            return False, _('LPAR does not have an active DLPAR capability '
                            'for %s.') % cap_desc
        return True, None
Пример #11
0
def sanitize_file_name_for_api(name,
                               prefix='',
                               suffix='',
                               max_len=const.MaxLen.FILENAME_DEFAULT):
    """Generate a sanitized file name based on PowerVM's FileName.Pattern.

    :param name: The base name to sanitize.
    :param prefix: (Optional) A prefix to prepend to the 'name'.  No delimiter
                   is added.
    :param suffix: (Optional) A suffix to append to the 'name'.  No delimiter
                   is added.
    :param max_len: (Optional) The maximum allowable length of the final
                    sanitized string.  Defaults to the API's defined length for
                    FileName.Pattern.
    :return: A string scrubbed of all forbidden characters and trimmed for
             length as necessary.
    """
    def _scrub(in_name):
        """Returns in_name with illegal characters replaced with '_'."""
        return re.sub(r'[^.0-9A-Z_a-z]', '_', in_name)

    name, prefix, suffix = (_scrub(val) for val in (name, prefix, suffix))
    base_len = max_len - len(prefix) - len(suffix)
    if base_len <= 0:
        raise ValueError(
            _("Prefix and suffix together may not be more than "
              "%d characters."), max_len - 1)
    name = name[:base_len]
    ret = prefix + name + suffix
    if not len(ret):
        raise ValueError(_("Total length must be at least 1 character."))
    return ret
Пример #12
0
def sanitize_partition_name_for_api(name, trunc_ok=True):
    """Sanitize a string to be suitable for use as a partition name.

    PowerVM's partition name restrictions are:
    - Between 1 and 31 characters, inclusive;
    - Containing ASCII characters between 0x20 (space) and 0x7E (~), inclusive,
      except ()\<>*$&?|[]'"`

    :param name: The name to scrub.  Invalid characters will be replaced with
                 '_'.
    :param trunc_ok: If True, and name exceeds 31 characters, it is truncated.
                     If False, and name exceeds 31 characters, ValueError is
                     raised.
    :return: The scrubbed string.
    :raise ValueError: If name is None or zero length; or if it exceeds length
                       31 and trunk_ok=False.
    """
    max_len = 31
    if not name:
        raise ValueError(_("The name parameter must be at least one character "
                           "long."))
    if not trunc_ok and len(name) > max_len:
        raise ValueError(_("The name parameter must not exceed %d characters "
                           "when trunk_ok is False."), max_len)
    return re.sub(r'[^- !#%+,./0-9:;=@A-Z^_a-z{}]', '_', name)[:max_len]
Пример #13
0
    def _validate_resize_common(self):
        """Validation rules common for both active and inactive resizes.

        Helper method to enforce validation rules that are common for
        both active and inactive resizes.
        """
        curr_has_dedicated = self.cur_lpar_w.proc_config.has_dedicated
        curr_proc_pool_id = self.cur_lpar_w.proc_config.shared_proc_cfg.pool_id
        if curr_has_dedicated and not self.has_dedicated:
            # Resize from Dedicated Mode to Shared Mode
            if self.pool_id != 0:
                msg = (_("The shared processor pool for %s must be "
                         "DefaultPool.") % self.cur_lpar_w.name)
                raise ValidatorException(msg)

        if not self.has_dedicated and not curr_has_dedicated:
            curr_proc_pool_id = self.cur_lpar_w.proc_config.\
                shared_proc_cfg.pool_id
            if curr_proc_pool_id != self.pool_id:
                msg = (_("The shared processor pool for %s cannot be changed.")
                       % self.cur_lpar_w.name)
                raise ValidatorException(msg)

        self._validate_host_has_available_res(self.delta_des_vcpus,
                                              self.procs_avail,
                                              self.res_name)
Пример #14
0
def _upload_conflict(tier, luname, mkr_luname):
    """Detect an upload conflict with another host (our thread should bail).

    :param tier: Tier EntryWrapper representing the Tier to search.
    :param luname: The name of the LU we intend to upload.
    :param mkr_luname: The name of the marker LU we use to signify our upload
                       is in progress.
    :return: True if we find a winning conflict and should abandon our upload;
             False otherwise.
    """
    # Refetch the feed.  We must do this in case one or more other threads
    # created their marker LU since our last feed GET.
    lus = _find_lus(tier, luname)

    # First, if someone else already started the upload, we clean up
    # and wait for that one.
    if any([lu for lu in lus if lu.name == luname]):
        LOG.info(_('Abdicating in favor of in-progress upload.'))
        return True

    # The lus list should be all markers at this point.  If there's
    # more than one (ours), then the first (by alpha sort) wins.
    if len(lus) > 1:
        lus.sort(key=lambda l: l.name)
        winner = lus[0].name
        if winner != mkr_luname:
            # We lose.  Delete our LU and let the winner proceed
            LOG.info(_('Abdicating upload in favor of marker %s.'), winner)
            # Remove just our LU - other losers take care of theirs
            return True

    return False
Пример #15
0
    def _validate_active_resize(self):
        """Enforce validation rules specific to active resize."""
        # Extract current values from existing LPAR.
        curr_has_dedicated = self.cur_lpar_w.proc_config.has_dedicated
        if curr_has_dedicated:
            lpar_proc_config = self.cur_lpar_w.proc_config.dedicated_proc_cfg
            curr_max_vcpus = lpar_proc_config.max
            curr_min_vcpus = lpar_proc_config.min
        else:
            lpar_proc_config = self.cur_lpar_w.proc_config.shared_proc_cfg
            curr_max_vcpus = lpar_proc_config.max_virtual
            curr_min_vcpus = lpar_proc_config.min_virtual
            curr_max_proc_units = lpar_proc_config.max_units
            curr_min_proc_units = lpar_proc_config.min_units

        # min/max cannot be changed when lpar is not powered off.
        if (self.max_vcpus != curr_max_vcpus or
                self.min_vcpus != curr_min_vcpus):
            msg = (_("The virtual machine must be powered off before changing "
                     "the minimum or maximum processors. Power off virtual "
                     "machine %s and try again.") % self.cur_lpar_w.name)
            raise ValidatorException(msg)

        if not self.has_dedicated and not curr_has_dedicated:
            curr_min_proc_units = round(float(curr_min_proc_units), 2)
            curr_max_proc_units = round(float(curr_max_proc_units), 2)
            if (round(self.max_proc_units, 2) != curr_max_proc_units or
                    round(self.min_proc_units, 2) != curr_min_proc_units):
                msg = (_("The virtual machine must be powered off before "
                         "changing the minimum or maximum processor units. "
                         "Power off virtual machine %s and try again.") %
                       self.cur_lpar_w.name)
                raise ValidatorException(msg)

        # Processor compatibility mode cannot be changed when lpar is not
        # powered off.
        curr_proc_compat = self.cur_lpar_w.proc_compat_mode
        curr_pend_proc_compat = self.cur_lpar_w.pending_proc_compat_mode
        if self.proc_compat_mode is not None:
            proc_compat = self.proc_compat_mode.lower()
            if (proc_compat != curr_proc_compat.lower() and
                    (proc_compat != curr_pend_proc_compat.lower())):
                # If requested was not the same as current, this is
                # not supported when instance is not powered off.
                msg = (_("The virtual machine must be powered off before "
                         "changing the processor compatibility mode. "
                         "Power off virtual machine %s and try again.") %
                       self.cur_lpar_w.name)
                raise ValidatorException(msg)

        # Processing mode cannot be changed when lpar is not powered off.
        if self.has_dedicated != curr_has_dedicated:
            msg = (_("The virtual machine must be powered off before changing "
                     "the processing mode. Power off virtual machine %s and "
                     "try again.") % self.cur_lpar_w.name)
            raise ValidatorException(msg)

        # Validations common for both active & inactive resizes.
        self._validate_resize_common()
Пример #16
0
    def _validate_active_resize(self):
        """Enforce validation rules specific to active resize."""
        # Extract current values from existing LPAR.
        curr_has_dedicated = self.cur_lpar_w.proc_config.has_dedicated
        if curr_has_dedicated:
            lpar_proc_config = self.cur_lpar_w.proc_config.dedicated_proc_cfg
            curr_max_vcpus = lpar_proc_config.max
            curr_min_vcpus = lpar_proc_config.min
        else:
            lpar_proc_config = self.cur_lpar_w.proc_config.shared_proc_cfg
            curr_max_vcpus = lpar_proc_config.max_virtual
            curr_min_vcpus = lpar_proc_config.min_virtual
            curr_max_proc_units = lpar_proc_config.max_units
            curr_min_proc_units = lpar_proc_config.min_units

        # min/max cannot be changed when lpar is not powered off.
        if (self.max_vcpus != curr_max_vcpus
                or self.min_vcpus != curr_min_vcpus):
            msg = (_("The virtual machine must be powered off before changing "
                     "the minimum or maximum processors. Power off virtual "
                     "machine %s and try again.") % self.cur_lpar_w.name)
            raise ValidatorException(msg)

        if not self.has_dedicated and not curr_has_dedicated:
            curr_min_proc_units = round(float(curr_min_proc_units), 2)
            curr_max_proc_units = round(float(curr_max_proc_units), 2)
            if (round(self.max_proc_units, 2) != curr_max_proc_units
                    or round(self.min_proc_units, 2) != curr_min_proc_units):
                msg = (_("The virtual machine must be powered off before "
                         "changing the minimum or maximum processor units. "
                         "Power off virtual machine %s and try again.") %
                       self.cur_lpar_w.name)
                raise ValidatorException(msg)

        # Processor compatibility mode cannot be changed when lpar is not
        # powered off.
        curr_proc_compat = self.cur_lpar_w.proc_compat_mode
        curr_pend_proc_compat = self.cur_lpar_w.pending_proc_compat_mode
        if self.proc_compat_mode is not None:
            proc_compat = self.proc_compat_mode.lower()
            if (proc_compat != curr_proc_compat.lower()
                    and (proc_compat != curr_pend_proc_compat.lower())):
                # If requested was not the same as current, this is
                # not supported when instance is not powered off.
                msg = (_("The virtual machine must be powered off before "
                         "changing the processor compatibility mode. "
                         "Power off virtual machine %s and try again.") %
                       self.cur_lpar_w.name)
                raise ValidatorException(msg)

        # Processing mode cannot be changed when lpar is not powered off.
        if self.has_dedicated != curr_has_dedicated:
            msg = (_("The virtual machine must be powered off before changing "
                     "the processing mode. Power off virtual machine %s and "
                     "try again.") % self.cur_lpar_w.name)
            raise ValidatorException(msg)

        # Validations common for both active & inactive resizes.
        self._validate_resize_common()
Пример #17
0
def _pwroff_soft_standard_flow(part, restart, timeout):
    """Normal (non-hard) power-off retry flow for non-IBMi partitions.

    START
    |     +---VMPowerOffTimeout-------------------------------------+
    V     |                                                         V
    ========  VMPowerOffFailure  ==========  VMPowerOffFailure  ============
    OS immed ----   or      ---> VSP normal ----   or      ---> return False
    ========   OSShutdownNoRMC   ==========  VMPowerOffTimeout  ============
          | _________________________/
          |/
       SUCCESS
          V
     ===========
     return True
     ===========

    :param part restart timeout: See power_off.
    :return: True if the power-off succeeded; False otherwise.
    :raise VMPowerOffTimeout: If the last power-off attempt timed out.
    :raise VMPowerOffFailure: If the last power-off attempt failed.
    """
    # For backward compatibility, OS shutdown is always immediate.  We don't
    # let PowerOn decide whether to use OS or VSP; instead we trap
    # OSShutdownNoRMC (which is very quick) so we can keep this progression
    # linear.

    opts = popts.PowerOffOpts().restart(value=restart)
    # ==> OS immediate
    try:
        PowerOp.stop(part, opts=opts.os_immediate(), timeout=timeout)
        return True
    except pexc.VMPowerOffTimeout:
        LOG.warning(
            _("Non-IBMi OS immediate shutdown timed out.  Trying VSP "
              "hard shutdown.  Partition: %s"), part.name)
        return False
    except pexc.VMPowerOffFailure:
        LOG.warning(
            _("Non-IBMi OS immediate shutdown failed.  Trying VSP "
              "normal shutdown.  Partition: %s"), part.name)
        # Fall through to VSP normal, but with default timeout
        timeout = CONF.pypowervm_job_request_timeout
    except pexc.OSShutdownNoRMC as error:
        LOG.warning(error.args[0])
        # Fall through to VSP normal

    # ==> VSP normal
    try:
        PowerOp.stop(part, opts.vsp_normal(), timeout=timeout)
        return True
    except pexc.VMPowerOffFailure:
        LOG.warning("Non-IBMi VSP normal shutdown failed.  Partition: %s",
                    part.name)

    return False
Пример #18
0
 def _validate_choices(cls, value, choices):
     if value is None:
         raise ValueError(_('None value is not valid.'))
     for choice in choices:
         if value.lower() == choice.lower():
             return choice
     # If we didn't find it, that's a problem...
     values = dict(value=value, field=cls._name, choices=choices)
     msg = _("Value '%(value)s' is not valid for field '%(field)s' with "
             "acceptable choices: %(choices)s") % values
     raise ValueError(msg)
Пример #19
0
    def execute(self):
        """Run this FeedTask's WrapperTasks in parallel TaskFlow engine.

        :return: Dictionary of results provided by subtasks and post-execs.
                 The shape of this dict is as normally expected from TaskFlow,
                 noting that the WrapperTasks are executed in a subflow and
                 their results processed into wrapper_task_rets.  For example:
            {'wrapper_task_rets': { uuid: {...}, uuid: {...}, ...}
             'post_exec_x_provides': ...,
             'post_exec_y_provides': ...,
             ...}
        """
        # Ensure a true no-op (in particular, we don't want to GET the feed) if
        # there are no Subtasks
        if not any(
            [self._tx_by_uuid, self._common_tx.subtasks, self._post_exec]):
            LOG.info(_("FeedTask %s has no Subtasks; no-op execution."),
                     self.name)
            return
        rets = {'wrapper_task_rets': {}}
        try:
            # Calling .wrapper_tasks will cause the feed to be fetched and
            # WrapperTasks to be replicated, if not already done.  Only do this
            # if there exists at least one WrapperTask with Subtasks.
            # (NB: It is legal to have a FeedTask that *only* has post-execs.)
            if self._tx_by_uuid or self._common_tx.subtasks:
                pflow = tf_uf.Flow("%s_parallel_flow" % self.name)
                pflow.add(*self.wrapper_tasks.values())
                # Execute the parallel flow now so the results can be provided
                # to any post-execs.
                rets['wrapper_task_rets'] = self._process_subtask_rets(
                    tf_eng.run(pflow,
                               engine='parallel',
                               executor=ContextThreadPoolExecutor(
                                   self.max_workers)))
            if self._post_exec:
                flow = tf_lf.Flow('%s_post_execs' % self.name)
                flow.add(*self._post_exec)
                eng = tf_eng.load(flow, store=rets)
                eng.run()
                rets = eng.storage.fetch_all()
        except tf_ex.WrappedFailure as wfail:
            LOG.error(
                _("FeedTask %s experienced multiple exceptions. They "
                  "are logged individually below."), self.name)
            for fail in wfail:
                LOG.exception(fail.pformat(fail.traceback_str))
            raise ex.MultipleExceptionsInFeedTask(self.name, wfail)

        # Let a non-wrapped exception (which happens if there's only one
        # element in the feed) bubble up as-is.

        return rets
Пример #20
0
def _log_lua_status(status, dev_name, message):
    """Logs any issues with the LUA."""

    if status == LUAStatus.DEVICE_AVAILABLE:
        LOG.info(_("LUA Recovery Successful. Device Found: %s"), dev_name)
    elif status == LUAStatus.FOUND_ITL_ERR:
        # Message is already set.
        LOG.warning(_("ITL Error encountered: %s"), message)
    elif status == LUAStatus.DEVICE_IN_USE:
        LOG.warning(_("%s Device is currently in use."), dev_name)
    elif status == LUAStatus.FOUND_DEVICE_UNKNOWN_UDID:
        LOG.warning(_("%s Device discovered with unknown UDID."), dev_name)
    elif status == LUAStatus.INCORRECT_ITL:
        LOG.warning(_("Failed to Discover the Device : %s"), dev_name)
Пример #21
0
def _remove_lpar_maps(vwrap, lpar_ids, type_str):
    """Remove VFC or VSCSI mappings for the specified LPAR IDs.

    :param vwrap: VIOS EntryWrapper containing the mappings to scrub.
    :param lpar_ids: Iterable of short IDs (not UUIDs) of the LPARs whose
                     mappings are to be removed.
    :param type_str: The type of mapping being removed.  Must be either 'VFC'
                     or 'VSCSI'.
    :return: The list of mappings removed.
    """
    # This will raise KeyError if a bogus type_str is passed in
    rm_maps = dict(VSCSI=sm.remove_maps, VFC=fm.remove_maps)[type_str]
    msgargs = dict(stg_type=type_str, vios_name=vwrap.name)
    removals = []
    for lpar_id in lpar_ids:
        msgargs['lpar_id'] = lpar_id
        _removals = rm_maps(vwrap, lpar_id)
        if _removals:
            LOG.warn(_("Removing %(num_maps)d %(stg_type)s mappings "
                       "associated with LPAR ID %(lpar_id)d from VIOS "
                       "%(vios_name)s."),
                     dict(msgargs, num_maps=len(_removals)))
            removals.extend(_removals)
        else:
            LOG.debug("No %(stg_type)s mappings found for LPAR ID "
                      "%(lpar_id)d on VIOS %(vios_name)s.", msgargs)
    return removals
Пример #22
0
def _upload_in_progress(lus, luname, first):
    """Detect whether another host has an upload is in progress.

    :param lus: List of LUs to be considered (i.e. whose names contain the name
                of the LU we intend to upload).
    :param luname: The name of the LU we intend to upload.
    :param first: Boolean indicating whether this is this the first time we
                  detected an upload in progress.  Should be True the first
                  and until the first time this method returns True.
                  Thereafter, should be False.
    :return: True if another host has an upload in progress; False otherwise.
    """
    mkr_lus = [
        lu for lu in lus if lu.name != luname and lu.name.endswith(luname)
    ]
    if mkr_lus:
        # Info the first time; debug thereafter to avoid flooding the log.
        if first:
            LOG.info(
                _('Waiting for in-progress upload(s) to complete.  '
                  'Marker LU(s): %s'), str([lu.name for lu in mkr_lus]))
        else:
            LOG.debug(
                'Waiting for in-progress upload(s) to complete. '
                'Marker LU(s): %s', str([lu.name for lu in mkr_lus]))
        return True

    return False
Пример #23
0
 def rr_state(self):
     """Deprecated (n/a for NovaLink) - use srr_enabled instead."""
     import warnings
     warnings.warn(_("This is not the property you are looking for.  Use "
                     "srr_enabled in a NovaLink environment."),
                   DeprecationWarning)
     return None
Пример #24
0
def vm_metrics(phyp, vioses):
    """Reduces the metrics to a per VM basis.

    The metrics returned by PCM are on a global level.  The anchor points are
    PHYP and the Virtual I/O Servers.

    Typical consumption models for metrics are on a 'per-VM' basis.  The
    dictionary returned contains the LPAR UUID and a LparMetric object.  That
    object breaks down the PHYP and VIOS statistics to be approached on a LPAR
    level.

    :param phyp: The PhypInfo for the metrics.
    :param vioses: A list of the ViosInfos for the Virtual I/O Server
                   components.
    :return vm_data: A dictionary where the UUID is the client LPAR UUID, but
                     the data is a LparMetric for that VM.

                     Note: Data can not be guaranteed.  It may exist in one
                     sample, but then not in another (ex. VM was powered off
                     between gathers).  Always validate that data is 'not
                     None' before use.
    """

    # If the metrics just started, there may not be data yet.  Log this, but
    # return no data
    if phyp is None:
        LOG.warn(
            _("Metric data is not available.  This may be due to "
              "the metrics being recently initialized."))
        return {}

    vm_data = {}
    for lpar_sample in phyp.sample.lpars:
        lpar_metric = lpar_mon.LparMetric(lpar_sample.uuid)

        # Fill in the Processor data.
        lpar_metric.processor = lpar_mon.LparProc(lpar_sample.processor)

        # Fill in the Memory data.
        lpar_metric.memory = lpar_mon.LparMemory(lpar_sample.memory)

        # All partitions require processor and memory.  They may not have
        # storage (ex. network boot) or they may not have network.  Therefore
        # these metrics can not be guaranteed like the others.

        # Fill in the Network data.
        if lpar_sample.network is None:
            lpar_metric.network = None
        else:
            lpar_metric.network = lpar_mon.LparNetwork(lpar_sample.network)

        # Fill in the Storage metrics
        if lpar_sample.storage is None:
            lpar_metric.storage = None
        else:
            lpar_metric.storage = lpar_mon.LparStorage(lpar_sample.storage,
                                                       vioses)

        vm_data[lpar_metric.uuid] = lpar_metric
    return vm_data
Пример #25
0
    def format_request(req):
        body = None
        # Parse the arguments if we're passed a tuple else its a string
        if isinstance(req, tuple):
            req_args = req[0]
            req_kwds = req[1]
            dump = dict(method=req_args[0], path=req_args[1])
            for key in req_kwds:
                if key == 'body':
                    # special format for body
                    body = req_kwds.get('body')
                elif key == 'headers':
                    # deep copy the header and change what we can't dump
                    headers = copy.deepcopy(req_kwds.get(key))
                    if 'X-API-Session' in headers:
                        headers['X-API-Session'] = '<SENSITIVE>'
                    dump[key] = str(headers)
                else:
                    dump[key] = str(req_kwds.get(key))
        else:
            dump = req

        # Dump all fields besides the body
        LOG.info(_('REQUEST: %s') % dump)
        # Now dump the full body
        if body is not None:
            LOG.info(body)
Пример #26
0
    def poll_while_status(self, statuses, timeout, sensitive):
        """Poll the Job as long as its status is in the specified list.

        :param statuses: Iterable of JobStatus enum values.  This method
                         continues to poll the Job as long as its status is
                         in the specified list, or until the timeout is
                         reached (whichever comes first).
        :param timeout: Maximum number of seconds to keep checking job status.
                        If zero, poll indefinitely.
        :param sensitive: If True, mask the Job payload in the logs.
        :return: timed_out: True if the timeout was reached before the Job
                            left the specified set of states.
        """
        start_time = time.time()
        iteration_count = 1
        while self.job_status in statuses:
            elapsed_time = time.time() - start_time
            if timeout:
                # wait up to timeout seconds
                if elapsed_time > timeout:
                    return True
            # Log a warning every 5 minutes
            if not iteration_count % 300:
                msg = _("Job %(job_id)s monitoring for %(time)i seconds.")
                LOG.warn(msg, {'job_id': self.job_id, 'time': elapsed_time})
            time.sleep(1)
            self.entry = self.adapter.read_job(
                self.job_id, sensitive=sensitive).entry
            iteration_count += 1
        return False
Пример #27
0
def _rm_vopts(vg_wrap, vopts):
    """Delete some number of virtual optical media from a volume group wrapper.

    The wrapper is not updated back to the REST server.

    :param vg_wrap: VG wrapper representing the Volume Group to update.
    :param vopts: Iterable of VOptMedia wrappers representing the devices to
                  delete.
    :return: The number of VOptMedia removed from vg_wrap.  The consumer may
             use this to decide whether to run vg_wrap.update() or not.
    """
    vg_om = vg_wrap.vmedia_repos[0].optical_media
    changes = []
    for vopt in vopts:
        try:
            vg_om.remove(vopt)
            LOG.info(_('Deleting virtual optical device %(vopt)s from volume '
                       'group %(vg)s'), {'vopt': vopt.name,
                                         'vg': vg_wrap.name})
            changes.append(vopt)
        except ValueError:
            # It's okay if the vopt was already absent.
            pass

    return changes
Пример #28
0
def _remove_orphan_maps(vwrap, type_str, lpar_id=None):
    """Remove orphan storage mappings (no client adapter) from a list.

    This works for both VSCSI and VFC mappings.

    :param vwrap: VIOS wrapper containing the mappings to inspect.  If type_str
                  is 'VFC', the VIOS wrapper must have been retrieved with the
                  FC_MAPPING extended attribute group; if type_str is 'VSCSI',
                  the SCSI_MAPPING extended attribute group must have been
                  used.
    :param type_str: The type of mapping being removed.  Must be either 'VFC'
                     or 'VSCSI'.
    :param lpar_id: (Optional) Only orphan mappings associated with the
                    specified LPAR ID will be removed.  If None (the default),
                    all LPARs' mappings will be considered.
    :return: The list of mappings removed.  May be empty.
    """
    # This will raise KeyError if type_str isn't one of 'VFC' or 'VSCSI'
    maps = dict(VSCSI=vwrap.scsi_mappings, VFC=vwrap.vfc_mappings)[type_str]
    msgargs = dict(vios_name=vwrap.name, stg_type=type_str)
    # Make a list of orphans first (since we can't remove while iterating).
    # If requested, limit candidates to those matching the specified LPAR ID.
    removals = [mp for mp in maps if mp.client_adapter is None and (
        lpar_id is None or mp.server_adapter.lpar_id == lpar_id)]
    for rm_map in removals:
        maps.remove(rm_map)
    if removals:
        LOG.warn(_("Removing %(num_maps)d orphan %(stg_type)s mappings from "
                   "VIOS %(vios_name)s."),
                 dict(msgargs, num_maps=len(removals)))
    else:
        LOG.debug("No orphan %(stg_type)s mappings found on VIOS "
                  "%(vios_name)s.", msgargs)
    return removals
Пример #29
0
def _process_iscsi_result(result, iqn, host_ip):
    """Process iSCSIDiscovery Job results

    Checks the job result return status code and return.
    :param result: ISCSI command job result.
    :param iqn: The IQN or list of IQNs for the created volume on the target.
    :host_ip: The portal or list of portals for the iscsi target.
    :return: status, device_name and udid
    """
    status = result.get('RETURN_CODE')
    # Ignore if command performed on unsupported AIX VIOS
    if not status:
        LOG.warning("ISCSI discovery job failed, no command status returned")
        return None, None, None

    if status == ISCSIStatus.ISCSI_COMMAND_NOT_FOUND:
        LOG.warning(_("ISCSI command performed on unsupported VIOS "))
        return None, None, None

    # DEV_OUTPUT: ["IQN1 dev1 udid", "IQN2 dev2 udid"]
    output = ast.literal_eval(result.get('DEV_OUTPUT', '[]'))

    # Find dev corresponding to given IQN
    dev_name, udid = _find_dev_by_iqn(output, iqn, host_ip)

    return status, dev_name, udid
Пример #30
0
class OrphanVLANFoundOnProvision(AbstractMsgFmtError):
    msg_fmt = _("Unable to provision VLAN %(vlan_id)d.  It appears to be "
                "contained on device '%(dev_name)s' on Virtual I/O Server "
                "%(vios)s.  That device is not connected to any Network "
                "Bridge (Shared Ethernet Adapter).  Please manually remove "
                "the device or add it to the Network Bridge before "
                "continuing.")
Пример #31
0
 def adapter(self):
     """DEPRECATED - use 'io_adapter' method instead."""
     import warnings
     warnings.warn(
         _("IOSlot.adapter is deprecated!  Use IOSlot.io_adapter instead."),
         DeprecationWarning)
     return self.io_adapter
Пример #32
0
    def format_request(req):
        body = None
        # Parse the arguments if we're passed a tuple else its a string
        if isinstance(req, tuple):
            req_args = req[0]
            req_kwds = req[1]
            dump = dict(method=req_args[0], path=req_args[1])
            for key in req_kwds:
                if key == 'body':
                    # special format for body
                    body = req_kwds.get('body')
                elif key == 'headers':
                    # deep copy the header and change what we can't dump
                    headers = copy.deepcopy(req_kwds.get(key))
                    if 'X-API-Session' in headers:
                        headers['X-API-Session'] = '<SENSITIVE>'
                    dump[key] = str(headers)
                else:
                    dump[key] = str(req_kwds.get(key))
        else:
            dump = req

        # Dump all fields besides the body
        LOG.info(_('REQUEST: %s') % dump)
        # Now dump the full body
        if body is not None:
            LOG.info(body)
Пример #33
0
    def poll_while_status(self, statuses, timeout, sensitive):
        """Poll the Job as long as its status is in the specified list.

        :param statuses: Iterable of JobStatus enum values.  This method
                         continues to poll the Job as long as its status is
                         in the specified list, or until the timeout is
                         reached (whichever comes first).
        :param timeout: Maximum number of seconds to keep checking job status.
                        If zero, poll indefinitely.
        :param sensitive: If True, mask the Job payload in the logs.
        :return: timed_out: True if the timeout was reached before the Job
                            left the specified set of states.
        """
        start_time = time.time()
        iteration_count = 1
        while self.job_status in statuses:
            elapsed_time = time.time() - start_time
            if timeout:
                # wait up to timeout seconds
                if elapsed_time > timeout:
                    return True
            # Log a warning every 5 minutes
            if not iteration_count % 300:
                msg = _("Job %(job_id)s monitoring for %(time)i seconds.")
                LOG.warn(msg, {'job_id': self.job_id, 'time': elapsed_time})
            time.sleep(1)
            self.entry = self.adapter.read_job(self.job_id,
                                               sensitive=sensitive).entry
            iteration_count += 1
        return False
Пример #34
0
def _remove_hdisk_classic(adapter, host_name, dev_name, vios_uuid):
    """Command to remove the device from the VIOS.

    Runs a remote command to perform the action.

    :param adapter: The pypowervm adapter.
    :param host_name: The name of the host.
    :param dev_name: The name of the device to remove.
    :param vios_uuid: The Virtual I/O Server UUID.
    """
    try:
        # Execute a read on the vios to get the vios name
        resp = adapter.read(pvm_vios.VIOS.schema_type, root_id=vios_uuid)
        vios_w = pvm_vios.VIOS.wrap(resp)
        # build command
        rm_cmd = ('viosvrcmd -m ' + host_name + ' -p ' + vios_w.name +
                  ' -c \"rmdev -dev ' + dev_name + '\"')
        LOG.debug('RMDEV Command Input: %s' % rm_cmd)

        # Get the response for the CLIRunner command
        resp = adapter.read(_MGT_CONSOLE, None,
                            suffix_type=c.SUFFIX_TYPE_DO,
                            suffix_parm='CLIRunner')

        # Create the job parameters
        job_wrapper = pvm_job.Job.wrap(resp)
        ack_parm = 'acknowledgeThisAPIMayGoAwayInTheFuture'
        job_parms = [job_wrapper.create_job_parameter('cmd', rm_cmd),
                     job_wrapper.create_job_parameter(ack_parm,
                                                      'true')]

        job_wrapper.run_job(None, job_parms=job_parms)
        return job_wrapper.job_status()
    except pexc.JobRequestFailed as error:
        LOG.warning(_('CLIRunner Error: %s') % error)
Пример #35
0
def _vnics_using_pport(pport, lpar2vnics):
    """Determine (and warn about) usage of SRIOV physical port by VNICs.

    Ascertain whether an SRIOV physical port is being used as a backing device
    for any VNICs.  The method returns a list of warning messages for each such
    usage found.

    :param pport: pypowervm.wrappers.iocard.SRIOV*PPort wrapper to check.
    :param lpar2vnics: Dict of {LPAR: [VNIC, ...]} gleaned from get_lpar_vnics
    :return: A list of warning messages for found usages of the physical port.
             If no usages were found, the empty list is returned.
    """
    warnings = []
    for lpar, vnics in six.iteritems(lpar2vnics):
        for vnic in vnics:
            if any([
                    backdev for backdev in vnic.back_devs
                    if backdev.sriov_adap_id == pport.sriov_adap_id
                    and backdev.pport_id == pport.port_id
            ]):
                warnings.append(
                    _("SR-IOV Physical Port at location %(loc_code)s is "
                      "backing a vNIC belonging to LPAR %(lpar_name)s (LPAR "
                      "UUID: %(lpar_uuid)s; vNIC UUID: %(vnic_uuid)s).") % {
                          'loc_code': pport.loc_code,
                          'lpar_name': lpar.name,
                          'lpar_uuid': lpar.uuid,
                          'vnic_uuid': vnic.uuid
                      })
    return warnings
Пример #36
0
def _find_or_create_vswitch(adapter, vs_name, crt_vswitch):
    """Finds (or creates) the appropriate virtual switch.

    :param adapter: The pypowervm adapter to perform the update through.
    :param vs_name: The name of the virtual switch that this CNA will be
                    attached to.
    :param crt_vswitch: A boolean to indicate that if the vSwitch can not be
                        found, the system should attempt to create one (with
                        the default parameters - ex: Veb mode).
    """
    vswitch_w = pvm_net.VSwitch.search(adapter,
                                       parent_type=pvm_ms.System,
                                       parent_uuid=adapter.sys_uuid,
                                       one_result=True,
                                       name=vs_name)

    if vswitch_w is None:
        if crt_vswitch:
            vswitch_w = pvm_net.VSwitch.bld(adapter, vs_name)
            vswitch_w = vswitch_w.create(parent_type=pvm_ms.System,
                                         parent_uuid=adapter.sys_uuid)
        else:
            raise exc.Error(
                _('Unable to find the Virtual Switch %s on the '
                  'system.') % vs_name)
    return vswitch_w
Пример #37
0
    def _run_mkvterm_cmd(lpar_uuid, force):
        cmd = ['mkvterm', '--id', str(lpar_id), '--vnc', '--local']
        ret_code, std_out, std_err = _run_proc(cmd)

        # If the vterm was already started, the mkvterm command will always
        # return an error message with a return code of 3.  However, there
        # are 2 scenarios here, one where it was started with the VNC option
        # previously, which we will get a valid port number back (which is
        # the good path scenario), and one where it was started out-of-band
        # where we will get no port.  If it is the out-of-band scenario and
        # they asked us to force the connection, then we will attempt to
        # terminate the old vterm session so we can start up one with VNC.
        if force and ret_code == 3 and not _parse_vnc_port(std_out):
            LOG.warning(_("Invalid output on vterm open.  Trying to reset the "
                          "vterm.  Error was %s"), std_err)
            close_vterm(adapter, lpar_uuid)
            ret_code, std_out, std_err = _run_proc(cmd)

        # The only error message that is fine is a return code of 3 that a
        # session is already started, where we got back the port back meaning
        # that it was started as VNC.  Else, raise up the error message.
        if ret_code != 0 and not (ret_code == 3 and _parse_vnc_port(std_out)):
            raise pvm_exc.VNCBasedTerminalFailedToOpen(err=std_err)

        # Parse the VNC Port out of the stdout returned from mkvterm
        return _parse_vnc_port(std_out)
Пример #38
0
def _find_free_vlan(adapter, vswitch_w):
    """Finds a free VLAN on the vswitch specified."""

    # A Virtual Network (VNet) will exist for every PowerVM vSwitch / VLAN
    # combination in the system.  Getting the feed is a quick way to determine
    # which VLANs are in use.
    vnets = pvm_net.VNet.get(adapter,
                             parent_type=pvm_ms.System.schema_type,
                             parent_uuid=adapter.sys_uuid)
    # Use that feed to get the VLANs in use, but only get the ones in use for
    # the vSwitch passed in.
    used_vids = [
        x.vlan for x in vnets
        if x.associated_switch_uri == vswitch_w.related_href
    ]

    # Walk through the VLAN range, and as soon as one is found that is not in
    # use, return it to the user.
    for x in range(1, 4094):
        if x not in used_vids:
            return x

    raise exc.Error(
        _('Unable to find a valid VLAN for Virtual Switch %s.') %
        vswitch_w.name)
Пример #39
0
    def _enable_x509_authentication(self, client_socket, server_socket):
        """Enables and Handshakes VeNCrypt using X509 Authentication.

        :param client_socket:  The client-side socket to receive data from.
        :param server_socket:  The server-side socket to forward data to.
        :return ssl_socket:  A client-side socket wrappered for SSL or None
                             if there is an error.
        """
        try:
            # First perform the RFB Version negotiation between client/server
            self._version_negotiation(client_socket, server_socket)
            # Next perform the Security Authentication Type Negotiation
            if not self._auth_type_negotiation(client_socket):
                return None
            # Next perform the Security Authentication SubType Negotiation
            if not self._auth_subtype_negotiation(client_socket):
                return None
            # Now that the VeNCrypt handshake is done, do the SSL wrapper
            ca_certs = self.x509_certs.get('ca_certs')
            server_key = self.x509_certs.get('server_key')
            server_cert = self.x509_certs.get('server_cert')
            return ssl.wrap_socket(
                client_socket, server_side=True, ca_certs=ca_certs,
                certfile=server_cert, keyfile=server_key,
                ssl_version=ssl.PROTOCOL_TLSv1_2, cert_reqs=ssl.CERT_REQUIRED)
        # If we got an error, log and handle to not take down the thread
        except Exception as exc:
            LOG.warning(_("Error negotiating SSL for VNC Repeater: %s") % exc)
            LOG.exception(exc)
            return None
Пример #40
0
    def _validate_general(self, attrs=None, partial=False):
        if attrs is None:
            attrs = self.attr
        name_len = len(attrs[NAME])
        if name_len < 1 or name_len > MAX_LPAR_NAME_LEN:

            msg = _("Logical partition name has invalid length. "
                    "Name: %s") % attrs[NAME]
            raise LPARBuilderException(msg)
        LPARType(attrs.get(ENV), allow_none=partial).validate()
        IOSlots(attrs.get(MAX_IO_SLOTS), allow_none=partial).validate()
        AvailPriority(attrs.get(AVAIL_PRIORITY), allow_none=partial).validate()
        EnableLparMetric(attrs.get(ENABLE_LPAR_METRIC),
                         allow_none=partial).validate()
        IDBoundField(attrs.get(ID), allow_none=True).validate()
        # SRR is always optional since the host may not be capable of it.
        SimplifiedRemoteRestart(attrs.get(SRR_CAPABLE),
                                allow_none=True).validate()
        ProcCompatMode(attrs.get(PROC_COMPAT),
                       host_modes=self.mngd_sys.proc_compat_modes,
                       allow_none=partial).validate()
        secure_boot_cap = self._can_secure_boot_for_lpar(attrs.get(ENV, ''))
        SecureBoot(attrs.get(SECURE_BOOT, DEF_SECURE_BOOT),
                   secure_boot_cap).validate()

        # Validate fields specific to IBMi
        if attrs.get(ENV, '') == bp.LPARType.OS400:
            RestrictedIO(attrs.get(RESTRICTED_IO), allow_none=True).validate()

        # Validate affinity check attribute based on host capability
        host_affinity_cap = self.mngd_sys.get_capability(
            'affinity_check_capable')
        EnforceAffinityCheck(attrs.get(ENFORCE_AFFINITY_CHECK),
                             host_affinity_cap).validate()
Пример #41
0
def validate_vios_ready(adapter, max_wait_time=None):
    """Check whether VIOS rmc is up and running on this host.

    Will query the VIOSes for a period of time attempting to ensure all
    running VIOSes get an active RMC.  If no VIOSes are ready by the timeout,
    ViosNotAvailable is raised.  If only some of the VIOSes had RMC go active
    by the end of the wait period, the method will complete.

    :param adapter: The pypowervm adapter for the query.
    :param max_wait_time: Integer maximum number of seconds to wait for running
                          VIOSes to get an active RMC connection.  Defaults to
                          None, in which case the system will determine an
                          appropriate amount of time to wait.  This can be
                          influenced by whether or not the VIOS just booted.
    :raises: A ViosNotAvailable exception if a VIOS is not available by a
             given timeout.
    """
    # Used to keep track of VIOSes and reduce queries to API
    vwraps, rmc_down_vioses, waited = _wait_for_vioses(adapter, max_wait_time)

    if rmc_down_vioses:
        LOG.warning(
            _('Timed out waiting for the RMC state of all the powered on '
              'Virtual I/O Servers to be active. Wait time was: %(time)d '
              'seconds. VIOSes that did not go active were: %(vioses)s.'), {
                  'time': waited,
                  'vioses': ', '.join([vio.name for vio in rmc_down_vioses])
              })

    # If we didn't get a single active VIOS then raise an exception
    if not get_active_vioses(adapter, vios_wraps=vwraps):
        raise ex.ViosNotAvailable(wait_time=waited)
Пример #42
0
 def _populate_new_values(self):
     """Set newly desired LPAR attributes as instance attributes."""
     mem_cfg = self.lpar_w.mem_config
     self.des_mem = mem_cfg.desired
     self.max_mem = mem_cfg.max
     self.min_mem = mem_cfg.min
     self.avail_mem = self.host_w.memory_free
     self.res_name = _('memory')
Пример #43
0
def _rm_lus(ssp_wrap, lus, del_unused_images=True):
    ssp_lus = ssp_wrap.logical_units
    changes = []
    backing_images = set()

    for lu in lus:
        # Is it a linked clone?  (We only care if del_unused_images.)
        if del_unused_images and lu.lu_type == stor.LUType.DISK:
            # Note: This can add None to the set
            backing_images.add(_image_lu_for_clone(ssp_wrap, lu))
        msg_args = dict(lu_name=lu.name, ssp_name=ssp_wrap.name)
        removed = _rm_dev_by_udid(lu, ssp_lus)
        if removed:
            LOG.info(_("Removing LU %(lu_name)s from SSP %(ssp_name)s"),
                     msg_args)
            changes.append(lu)
        else:
            # It's okay if the LU was already absent.
            LOG.info(_("LU %(lu_name)s was not found in SSP %(ssp_name)s"),
                     msg_args)

    # Now remove any unused backing images.  This set will be empty if
    # del_unused_images=False
    for backing_image in backing_images:
        # Ignore None, which could have appeared in the unusual event that a
        # clone existed with no backing image.
        if backing_image is not None:
            msg_args = dict(lu_name=backing_image.name, ssp_name=ssp_wrap.name)
            # Only remove backing images that are not in use.
            if _image_lu_in_use(ssp_wrap, backing_image):
                LOG.debug("Not removing Image LU %(lu_name)s from SSP "
                          "%(ssp_name)s because it is still in use." %
                          msg_args)
            else:
                removed = _rm_dev_by_udid(backing_image, ssp_lus)
                if removed:
                    LOG.info(_("Removing Image LU %(lu_name)s from SSP "
                               "%(ssp_name)s because it is no longer in use."),
                             msg_args)
                    changes.append(backing_image)
                else:
                    # This would be wildly unexpected
                    LOG.warn(_("Backing LU %(lu_name)s was not found in SSP "
                               "%(ssp_name)s"), msg_args)
    return changes
Пример #44
0
 def _log_response_retry(try_, max_tries, uri, resp_code):
     LOG.warn(
         _(
             "Attempt %(retry)d of total %(total)d for URI "
             "%(uri)s.  Error was a known retry response code: "
             "%(resp_code)s"
         ),
         {"retry": try_, "total": max_tries, "uri": uri, "resp_code": resp_code},
     )
Пример #45
0
    def can_modify_proc(self):
        """Determines if a LPAR is capable of adding/removing processors.

        :return capable: True if procs can be added/removed.  False otherwise.
        :return reason: A translated message that will indicate why it was not
                        capable of modification.  If capable is True, the
                        reason will be None.
        """
        return self._can_modify(self.capabilities.proc_dlpar, _('Processors'))
Пример #46
0
def _argmod(this_try, max_tries, *args, **kwargs):
    """Retry argmod to change 'vios' arg from VIOS wrapper to a string UUID.

    This is so that etag mismatches trigger a fresh GET.
    """
    LOG.warn(_('Retrying modification of SCSI Mapping.'))
    argl = list(args)
    # Second argument is vios.
    if isinstance(argl[1], pvm_vios.VIOS):
        argl[1] = argl[1].uuid
    return argl, kwargs
Пример #47
0
 def _validate_host_has_available_res(self, des, avail, res_name):
     if round(des, 2) > round(avail, 2):
         ex_args = {'requested': '%.2f' % des,
                    'avail': '%.2f' % avail,
                    'instance_name': self.lpar_w.name,
                    'res_name': res_name}
         msg = _("Insufficient available %(res_name)s on host for virtual "
                 "machine '%(instance_name)s' (%(requested)s "
                 "requested, %(avail)s available)") % ex_args
         LOG.error(msg)
         raise ValidatorException(msg)
Пример #48
0
 def _validate_host_max_allowed_procs_per_lpar(self):
     if self.des_vcpus > self.max_procs_per_aix_linux_lpar:
         ex_args = {'vcpus': self.des_vcpus,
                    'max_allowed': self.max_procs_per_aix_linux_lpar,
                    'instance_name': self.lpar_w.name}
         msg = _("The desired processors (%(vcpus)d) cannot be above "
                 "the maximum allowed processors per partition "
                 "(%(max_allowed)d) for virtual machine "
                 "'%(instance_name)s'.") % ex_args
         LOG.error(msg)
         raise ValidatorException(msg)
Пример #49
0
 def _validate_host_max_sys_procs_limit(self):
     if self.max_vcpus > self.max_sys_procs_limit:
         ex_args = {'vcpus': self.max_vcpus,
                    'max_allowed': self.max_sys_procs_limit,
                    'instance_name': self.lpar_w.name}
         msg = _("The maximum processors (%(vcpus)d) cannot be above "
                 "the maximum system capacity processor limit "
                 "(%(max_allowed)d) for virtual machine "
                 "'%(instance_name)s'.") % ex_args
         LOG.error(msg)
         raise ValidatorException(msg)
Пример #50
0
 def _validate_active_resize(self):
     """Enforce validation rules specific to active resize."""
     # Simplified Remote Restart capability cannot be changed when lpar is
     # not powered off.
     curr_srr_enabled = self.cur_lpar_w.srr_enabled
     if curr_srr_enabled != self.srr_enabled:
         msg = (_("The virtual machine must be powered off before changing "
                  "the simplified remote restart capability. Power off "
                  "virtual machine %s and try again.") %
                self.cur_lpar_w.name)
         raise ValidatorException(msg)
Пример #51
0
def add_map(vios_w, host_uuid, lpar_uuid, port_map, error_if_invalid=True):
    """Adds a vFC mapping to a given VIOS wrapper.

    These changes are not flushed back to the REST server.  The wrapper itself
    is simply modified.

    :param vios_w: VIOS EntryWrapper representing the Virtual I/O Server whose
                   VFC mappings are to be updated.
    :param host_uuid: The pypowervm UUID of the host.
    :param lpar_uuid: The pypowervm UUID of the client LPAR to attach to.
    :param port_map: The port mapping (as defined by the derive_npiv_map
                     method).
    :param error_if_invalid: (Optional, Default: True) If the port mapping
                             physical port can not be found, raise an error.
    :return: The VFCMapping that was added.  If the mapping already existed
             then None is returned.
    """
    # This is meant to find the physical port.  Can run against a single
    # element.  We assume invoker has passed correct VIOS.
    new_vios_w, p_port = find_vios_for_wwpn([vios_w], port_map[0])
    if new_vios_w is None:
        if error_if_invalid:
            # Log the payload in the response.
            LOG.warn(_("Unable to find appropriate VIOS.  The payload "
                       "provided was likely insufficient.  The payload data "
                       "is:\n %s)"), vios_w.toxmlstring())
            raise e.UnableToDerivePhysicalPortForNPIV(wwpn=port_map[0],
                                                      vio_uri=vios_w.href)
        else:
            return None

    v_wwpns = None
    if port_map[1] != _FUSED_ANY_WWPN:
        v_wwpns = [u.sanitize_wwpn_for_api(x) for x in port_map[1].split()]

    if v_wwpns is not None:
        for vfc_map in vios_w.vfc_mappings:
            if (vfc_map.client_adapter is None or
                    vfc_map.client_adapter.wwpns is None):
                continue
            if set(vfc_map.client_adapter.wwpns) != set(v_wwpns):
                continue

            # If we reach this point, we know that we have a matching map.  So
            # the attach of this volume, for this vFC mapping is complete.
            # Nothing else needs to be done, exit the method.
            return None

    # However, if we hit here, then we need to create a new mapping and
    # attach it to the VIOS mapping
    vfc_map = pvm_vios.VFCMapping.bld(vios_w.adapter, host_uuid, lpar_uuid,
                                      p_port.name, client_wwpns=v_wwpns)
    vios_w.vfc_mappings.append(vfc_map)
    return vfc_map
Пример #52
0
    def add_subtask(self, task):
        """Add a Subtask to this WrapperTask.

        Subtasks will be invoked serially and synchronously in the order in
        which they are added.

        :param task: Instance of a Subtask subclass containing the logic to
                     invoke.
        :return: self, for chaining convenience.
        """
        if not isinstance(task, Subtask):
            raise ValueError(_("Must supply a valid Subtask."))
        # Seed the 'provided' dict and ensure no duplicate names
        if task.provides is not None:
            if task.provides in self.provided_keys:
                raise ValueError(_("Duplicate 'provides' name %s.") %
                                 task.provides)
            self.provided_keys.add(task.provides)
        self._tasks.append(task)
        return self
Пример #53
0
 def _validate_active_resize(self):
     """Enforce validation rules specific to active resize."""
     curr_mem_cfg = self.cur_lpar_w.mem_config
     curr_min_mem = curr_mem_cfg.min
     curr_max_mem = curr_mem_cfg.max
     # min/max values cannot be changed when lpar is not powered off.
     if self.max_mem != curr_max_mem or self.min_mem != curr_min_mem:
         msg = (_("The virtual machine must be powered off before changing "
                  "the minimum or maximum memory. Power off virtual "
                  "machine %s and try again.") % self.cur_lpar_w.name)
         raise ValidatorException(msg)
     # Common validations for both active & inactive resizes.
     self._validate_resize_common()
Пример #54
0
    def __init__(self, name, feed_or_getter, max_workers=10,
                 update_timeout=-1):
        """Create a FeedTask with a FeedGetter (preferred) or existing feed.

        :param name: A descriptive string name.  This will be used along with
                     each wrapper's UUID to generate the name for that
                     wrapper's WrapperTask.
        :param feed_or_getter: pypowervm.wrappers.entry_wrapper.FeedGetter or
                               an already-fetched feed (list of EntryWrappers)
                               over which to operate.
        :param max_workers: (Optional) Integer indicating the maximum number of
                            worker threads to run in parallel within the .flow
                            or by the .execute method. See
                            concurrent.futures.ThreadPoolExecutor(max_workers).
        :param update_timeout: (Optional) Integer number of seconds after which
                               to time each WrapperTask's POST request.  -1,
                               the default, causes the request to use the
                               timeout value configured on the Session
                               belonging to the Adapter.
        """
        super(FeedTask, self).__init__(name)
        if isinstance(feed_or_getter, ewrap.FeedGetter):
            self._feed = None
            self._getter = feed_or_getter
        elif isinstance(feed_or_getter, list):
            # Make sure the feed has something in it.
            if len(feed_or_getter) == 0:
                raise ex.FeedTaskEmptyFeed()
            # Make sure it's a list of EntryWrapper
            if [i for i in feed_or_getter
                    if not isinstance(i, ewrap.EntryWrapper)]:
                raise ValueError("List must contain EntryWrappers "
                                 "exclusively.")
            self._feed = feed_or_getter
            self._getter = None
        else:
            raise ValueError(_("Must supply either a list of EntryWrappers or "
                               "a FeedGetter."))
        # Max WrapperTasks to run in parallel
        self.max_workers = max_workers
        self.update_timeout = update_timeout
        # Map of {uuid: WrapperTask}.  We keep this empty until we need the
        # individual WraperTasks.  This is triggered by .wrapper_tasks and
        # .get_wrapper(uuid) (and obviously executing).
        self._tx_by_uuid = {}
        # Until we *need* individual WrapperTasks, save subtasks in one place.
        # EntryWrapperGetter is a cheat to allow us to build the WrapperTask.
        self._common_tx = WrapperTask(
            'internal', ewrap.EntryWrapperGetter(None, ewrap.Wrapper,
                                                 None))
        self._post_exec = []
Пример #55
0
    def delete_job(self):
        """Cleans this Job off of the REST server, if it is completed.

        :raise JobRequestFailed: if the Job is detected to be running.
        """
        if self.job_status == JobStatus.RUNNING:
            error = (_("Job %s not deleted. Job is in running state.")
                     % self.job_id)
            LOG.error(error)
            raise pvmex.Error(error)
        try:
            self.adapter.delete(_JOBS, self.job_id)
        except pvmex.Error as exc:
            LOG.exception(exc)
Пример #56
0
    def _populate_dedicated_proc_values(self):
        """Set dedicated proc values as instance attributes."""
        ded_proc_cfg = self.lpar_w.proc_config.dedicated_proc_cfg
        self.des_procs = ded_proc_cfg.desired
        self.res_name = _('CPUs')
        # Proc host limits for dedicated proc
        self.max_procs_per_aix_linux_lpar = (
            self.host_w.max_procs_per_aix_linux_lpar)
        self.max_sys_procs_limit = self.host_w.max_sys_procs_limit

        # VCPUs doesn't mean anything in dedicated proc cfg
        # FAIP in dedicated proc cfg vcpus == procs for naming convention
        self.des_vcpus = self.des_procs
        self.max_vcpus = ded_proc_cfg.max
        self.min_vcpus = ded_proc_cfg.min
Пример #57
0
    def _populate_shared_proc_values(self):
        """Set shared proc values as instance attributes."""
        shr_proc_cfg = self.lpar_w.proc_config.shared_proc_cfg
        self.des_procs = shr_proc_cfg.desired_units
        self.res_name = _('processing units')
        # VCPU host limits for shared proc
        self.max_procs_per_aix_linux_lpar = (
            self.host_w.max_vcpus_per_aix_linux_lpar)
        self.max_sys_procs_limit = self.host_w.max_sys_vcpus_limit

        self.des_vcpus = shr_proc_cfg.desired_virtual
        self.max_vcpus = shr_proc_cfg.max_virtual
        self.min_vcpus = shr_proc_cfg.min_virtual
        self.max_proc_units = shr_proc_cfg.max_units
        self.min_proc_units = shr_proc_cfg.min_units
        self.pool_id = shr_proc_cfg.pool_id
Пример #58
0
    def __init__(self, name, wrapper_or_getter, subtasks=None,
                 allow_empty=False, update_timeout=-1):
        """Initialize this WrapperTask.

        :param name: A descriptive string name for the WrapperTask.
        :param wrapper_or_getter: An EntryWrapper or EntryWrapperGetter
                                  representing the PowerVM object on which this
                                  WrapperTask is to be performed.
        :param subtasks: (Optional) Iterable of Subtask subclass instances with
                         which to seed this WrapperTask.
        :param allow_empty: (Optional) By default, executing a WrapperTask
                            containing no Subtasks will result in exception
                            WrapperTaskNoSubtasks.  If this flag is set to
                            True, this condition will instead log an info
                            message and return None (NOT the wrapper - note,
                            this is different from "subtasks ran, but didn't
                            change anything," which returns the wrapper).
        :param update_timeout: (Optional) Integer number of seconds after which
                               to time out the POST request.  -1, the default,
                               causes the request to use the timeout value
                               configured on the Session belonging to the
                               Adapter.
        :raise WrapperTaskNoSubtasks: If allow_empty is False and this
                                      WrapperTask is executed without any
                                      Subtasks having been added.
        """
        if isinstance(wrapper_or_getter, ewrap.EntryWrapperGetter):
            self._wrapper = None
            self._getter = wrapper_or_getter
        elif isinstance(wrapper_or_getter, ewrap.EntryWrapper):
            self._wrapper = wrapper_or_getter
            self._getter = None
        else:
            raise ValueError(_("Must supply either EntryWrapper or "
                               "EntryWrapperGetter."))
        super(WrapperTask, self).__init__(
            name, provides=('wrapper_%s' % wrapper_or_getter.uuid,
                            'subtask_rets_%s' % wrapper_or_getter.uuid))
        self._tasks = [] if subtasks is None else list(subtasks)
        self.allow_empty = allow_empty
        self.update_timeout = update_timeout
        # Dict of return values provided by Subtasks using the 'provides' arg.
        self.provided = {}
        # Set of 'provided' names to prevent duplicates.  (Some day we may want
        # to make this a list and use it to denote the order in which subtasks
        # were run.)
        self.provided_keys = set()
Пример #59
0
def _close_vterm_non_local(adapter, lpar_uuid):
    """Job to force the close of the terminal when the API is remote.

    :param adapter: The adapter to talk over the API.
    :param lpar_uuid: partition uuid
    """
    # Close vterm on the lpar
    resp = adapter.read(pvm_lpar.LPAR.schema_type, lpar_uuid,
                        suffix_type=c.SUFFIX_TYPE_DO,
                        suffix_parm=_SUFFIX_PARM_CLOSE_VTERM)
    job_wrapper = job.Job.wrap(resp.entry)

    try:
        job_wrapper.run_job(lpar_uuid)
    except Exception:
        LOG.exception(_('Unable to close vterm.'))
        raise
Пример #60
0
    def format_response(resp):
        body = None
        # Parse the arguments if we're passed a dict else it's a string
        if isinstance(resp, dict):
            dump = {}
            for key in resp:
                if key == 'body':
                    # special format for body
                    body = resp.get('body')
                else:
                    dump[key] = str(resp.get(key))
        else:
            dump = resp

        # Dump all fields besides the body first
        LOG.info(_('RESPONSE: %s') % dump)
        # Now dump the full body, on the next line, if available
        if body is not None:
            LOG.info(body)