Example #1
0
    def unbind_default_limited_pkeys_with_regular_guids(self,
                                                        node_ib_client_ids,
                                                        node_ib_guids):
        """unbind default limited keys when using regular GUID solution

        :param node_ib_client_ids:
        :param node_ib_guids:
        :return:
        """
        if self.conf.default_limited_pkeys:
            sriov_enabled = self.conf.enable_sriov
            if sriov_enabled:
                LOG.info(_('SR-IOV is enabled, remove virtual guids '
                           'from default limited pkeys now.'))
                vf_count = len(self.conf.default_limited_pkeys)
                virtual_guids = [
                    list(utils.generate_random_virtual_guids(count=vf_count))
                    for physical_guid in node_ib_client_ids]
                grouped_guids = dict(zip(self.conf.default_limited_pkeys,
                                         zip(*virtual_guids)))
                LOG.info(_('Virtual guids for default limited pkeys is '
                           '%(grouped_guids)s .'),
                         {'grouped_guids': grouped_guids})
                for pkey, vf_guids in grouped_guids.iteritems():
                    self.ufm_client.pkey.remove_guids(pkey, vf_guids)
                    LOG.info(_('Successfully remove IB virtual guids '
                               '%(guids)s from limited pkey %(pkey)s.'),
                             {'guids': vf_guids, 'pkey': pkey})
            else:
                for pkey in self.conf.default_limited_pkeys:
                    self.ufm_client.pkey.remove_guids(pkey, node_ib_guids)
                    LOG.info(_('Successfully remove IB physical guids '
                               '%(guids)s from limited pkey %(pkey)s.'),
                             {'guids': node_ib_guids, 'pkey': pkey})
Example #2
0
    def delete_network_postcommit(self, context):
        """Delete a network.

        :param context: NetworkContext instance describing the current
            state of the network, prior to the call to delete it.

        Called after the transaction commits. Call can block, though
        will block the entire process so care should be taken to not
        drastically affect performance. Runtime errors are not
        expected, and will not prevent the resource from being
        deleted.
        """
        # TODO(qianbiao.ng): if an UFM partition has no guid, it will be auto
        #  deleted. So, if port unbound logic is stable (remove guid when
        #  unbound), we may ignore delete_network_postcommit callback?
        for segment in context.network_segments:
            if self._is_segment_supported(segment):
                segmentation_id = segment.get(api.SEGMENTATION_ID)
                pkey = hex(segmentation_id)
                try:
                    self.ufm_client.pkey.delete(pkey)
                except ufm_exc.ResourceNotFoundError:
                    # NOTE(turnbig): ignore 404 exception, because of that the
                    #  UFM partition key may have not been setup at this point.
                    LOG.info(_("UFM partition key %(pkey)s does not exists, "
                               "could not be deleted."),
                             {'pkey': pkey})
                except ufm_exc.UfmClientError as e:
                    LOG.error(_("Failed to delete UFM partition key %(pkey)s, "
                                "reason is %(reason)s."),
                              {'pkey': pkey, 'reason': e})
                    raise
Example #3
0
    def _is_network_supported(self, network):
        """Return whether a network is supported by this driver.

        :param network: The network(
            :class: openstack.network.v2.network.Network) instance to check
        :returns: true if network is supported else false
        """
        _this = InfiniBandBaremetalMechanismDriver
        LOG.debug("Checking whether network is supported: %(network)s.",
                  {'network': network})

        network_id = network.get('id')
        network_type = network.get('provider_network_type')
        segmentation_id = network.get('provider_segmentation_id')
        physical_network = network.get('provider_physical_network')

        if network_type not in self.allowed_network_types:
            LOG.debug(_(
                'Network %(network_id)s with segmentation-id '
                '%(segmentation_id)s has network type %(network_type)s '
                'but mlnx_ib_bm mechanism driver only '
                'support %(allowed_network_types)s.'),
                {'network_id': network_id,
                 'segmentation_id': segmentation_id,
                 'network_type': network_type,
                 'allowed_network_types': self.allowed_network_types})
            return False

        if not segmentation_id:
            LOG.debug(_(
                'Network %(network_id)s with segment %(id)s does not has a '
                'segmentation id, mlnx_ib_bm requires a segmentation id to '
                'create UFM partition.'),
                {'network_id': network_id, 'id': segmentation_id})
            return False

        if not self._is_physical_network_matches(physical_network):
            LOG.debug(_(
                'Network %(network_id)s with segment %(id)s is connected '
                'to physical network %(physnet)s, but mlnx_ib_bm mechanism '
                'driver was pre-configured to watch on physical networks '
                '%(allowed_physical_networks)s.'),
                {'network_id': network_id,
                 'id': segmentation_id,
                 'physnet': physical_network,
                 'allowed_physical_networks': self.allowed_physical_networks})
            return False

        return True
Example #4
0
def get_client():
    """Create an UFM REST API client instance.

    :return: an UFM REST API client instance.
    """
    global UFM_CLIENT
    if not UFM_CLIENT:
        conf = CONF[constants.MLNX_BAREMETAL_DRIVER_GROUP_NAME]
        verify_ca = conf.get('verify_ca', 'True')
        if isinstance(verify_ca, str):
            if not os.path.exists(verify_ca):
                try:
                    verify_ca = strutils.bool_from_string(verify_ca,
                                                          strict=True)
                except ValueError:
                    option = ('[%s]/verify_ca' %
                              constants.MLNX_BAREMETAL_DRIVER_GROUP_NAME)
                    details = _("The value should be a Boolean or a path "
                                "to a ca file/directory.")
                    raise exceptions.InvalidConfigValueException(
                        details=details, option=option, value=verify_ca)
        UFM_CLIENT = client.UfmClient(conf.endpoint, conf.username,
                                      conf.password, verify_ca)

    return UFM_CLIENT
Example #5
0
    def unbind_default_limited_pkeys(self, port, node_ib_guids):
        """unbind virtual guids from default limited pkeys

        NOTE(qianbiao.ng): port can not be updated when node is locked, so,
        binding profile can not be set when binding port (node is locked).
        This solution is deprecated for now.

        :param port:
        :param node_ib_guids:
        :return:
        """
        mac_address = port.get('mac_address')
        eth_port = self._get_ironic_port_by_mac(mac_address)

        binding_profile = eth_port.extra or {}
        default_limited_pkeys = binding_profile.get(BD_DFT_LIMITED_PKEYS)
        if default_limited_pkeys:
            sriov_enabled = binding_profile.get(BD_ENABLE_SRIOV)
            if sriov_enabled:
                LOG.info(_('SR-IOV is enabled, remove virtual guids '
                           'from default limited pkeys now.'))
                virtual_guids = binding_profile.get(BD_VIRTUAL_GUIDS)
                grouped_guids = dict(zip(default_limited_pkeys,
                                         virtual_guids))
                LOG.info(_('Virtual guids for default limited pkeys is '
                           '%(grouped_guids)s .'),
                         {'grouped_guids': grouped_guids})
                for pkey, vf_guids in grouped_guids.iteritems():
                    self.ufm_client.pkey.remove_guids(pkey, vf_guids)
                    LOG.info(_('Successfully remove IB virtual guids '
                               '%(guids)s from limited pkey %(pkey)s.'),
                             {'guids': vf_guids, 'pkey': pkey})
            else:
                for pkey in default_limited_pkeys:
                    self.ufm_client.pkey.remove_guids(pkey, node_ib_guids)
                    LOG.info(_('Successfully remove IB physical guids '
                               '%(guids)s from limited pkey %(pkey)s.'),
                             {'guids': node_ib_guids, 'pkey': pkey})

            # restore Ironic PXE port extra
            mac_address = port.get('mac_address')
            self.remove_ironic_port_extra(mac_address)
Example #6
0
    def bind_default_limited_pkeys(self, node_ib_client_ids):
        """Binding guids to default limited pkeys, if SR-IOV is enable,
        will auto generate virtual GUID from source physical GUID.

        :param node_ib_client_ids:  source physical infiniband client-id list
        :return: binding profiles of those default limited pkeys
        """
        if not self.conf.default_limited_pkeys:
            LOG.info(_('No default limited pkeys is configured.'))
            return

        LOG.info(_('Default limited pkeys %(pkeys)s is configured.'),
                 {'pkeys': self.conf.default_limited_pkeys})
        binding_profile = {
            BD_ENABLE_SRIOV: self.conf.enable_sriov,
            BD_DFT_LIMITED_PKEYS: self.conf.default_limited_pkeys
        }

        if self.conf.enable_sriov:
            LOG.info(_('SR-IOV is enabled, will generate virtual guids for '
                       'default limited pkeys.'))
            vf_count = len(self.conf.default_limited_pkeys)
            virtual_guids = [
                list(utils.generate_random_virtual_guids(count=vf_count))
                for physical_guid in node_ib_client_ids]
            grouped_guids = dict(zip(self.conf.default_limited_pkeys,
                                     zip(*virtual_guids)))
            LOG.info(_('Virtual guids for default limited pkeys is '
                       '%(grouped_guids)s .'),
                     {'grouped_guids': grouped_guids})
            for pkey, vf_guids in grouped_guids.iteritems():
                self.ufm_client.pkey.add_guids(pkey, guids=vf_guids,
                                               index0=True,
                                               full_membership=False)
                LOG.info(_('Successfully bound IB virtual guids %(guids)s to '
                           'limited pkey %(pkey)s.'),
                         {'guids': vf_guids, 'pkey': pkey})

            binding_profile[BD_VIRTUAL_GUIDS] = zip(*virtual_guids)
        else:
            guids = [ufm_utils.mlnx_ib_client_id_to_guid(client_id)
                     for client_id in node_ib_client_ids]
            for pkey in self.conf.default_limited_pkeys:
                self.ufm_client.pkey.add_guids(pkey, guids=guids,
                                               index0=False,
                                               full_membership=False)
                LOG.info(_('Successfully bound IB physical guids %(guids)s to '
                           'limited pkey %(pkey)s with option index0 False.'),
                         {'guids': guids, 'pkey': pkey})

        return binding_profile
Example #7
0
    def _is_segment_supported(self, segment):
        """Return whether a network segment is supported by this driver. A
        segment dictionary looks like:

        {
            "network_id": "9425b757-339d-4954-a17b-dbb3f7061006",
            "segmentation_id": 15998,
            "physical_network": null,
            "id": "3a0946cc-1f61-4211-8a33-b8e2b0b7a2a0",
            "network_type": "vxlan"
        },

        Segment supported by this driver must:
         - have network type 'vxlan' or 'vlan'.
         - have physical networks in pre-configured physical-networks
         - have a segmentation_id

        :param segment: indicates the segment to check
        :returns: true if segment is supported else false
        """
        LOG.debug("Checking whether segment is supported: %(segment)s ",
                  {'segment': segment})

        segment_id = segment[api.ID]
        network_id = segment[api.NETWORK_ID]
        network_type = segment[api.NETWORK_TYPE]
        segmentation_id = segment[api.SEGMENTATION_ID]
        physical_network = segment[api.PHYSICAL_NETWORK]

        if network_type not in self.allowed_network_types:
            LOG.debug(_(
                'Network %(network_id)s with segment %(id)s has '
                'network type %(network_type)s but mlnx_ib_bm mechanism '
                'driver only support %(allowed_network_types)s.'),
                {'network_id': network_id,
                 'id': segment_id,
                 'network_type': network_type,
                 'allowed_network_types': self.allowed_network_types})
            return False

        if not segmentation_id:
            LOG.debug(_(
                'Network %(network_id)s with segment %(id)s does not has a '
                'segmentation id, mlnx_ib_bm requires a segmentation id to '
                'create UFM partition.'),
                {'network_id': network_id, 'id': segment_id})
            return False

        if not self._is_physical_network_matches(physical_network):
            LOG.debug(_(
                'Network %(network_id)s with segment %(id)s is connected '
                'to physical network %(physnet)s, but mlnx_ib_bm mechanism '
                'driver was pre-configured to watch on physical networks '
                '%(allowed_physical_networks)s.'),
                {'network_id': network_id,
                 'id': segment_id,
                 'physnet': physical_network,
                 'allowed_physical_networks': self.allowed_physical_networks})
            return False

        return True
Example #8
0
    def bind_port(self, context):
        """Attempt to bind a port.

        :param context: PortContext instance describing the port

        This method is called outside any transaction to attempt to
        establish a port binding using this mechanism driver. Bindings
        may be created at each of multiple levels of a hierarchical
        network, and are established from the top level downward. At
        each level, the mechanism driver determines whether it can
        bind to any of the network segments in the
        context.segments_to_bind property, based on the value of the
        context.host property, any relevant port or network
        attributes, and its own knowledge of the network topology. At
        the top level, context.segments_to_bind contains the static
        segments of the port's network. At each lower level of
        binding, it contains static or dynamic segments supplied by
        the driver that bound at the level above. If the driver is
        able to complete the binding of the port to any segment in
        context.segments_to_bind, it must call context.set_binding
        with the binding details. If it can partially bind the port,
        it must call context.continue_binding with the network
        segments to be used to bind at the next lower level.

        If the binding results are committed after bind_port returns,
        they will be seen by all mechanism drivers as
        update_port_precommit and update_port_postcommit calls. But if
        some other thread or process concurrently binds or updates the
        port, these binding results will not be committed, and
        update_port_precommit and update_port_postcommit will not be
        called on the mechanism drivers with these results. Because
        binding results can be discarded rather than committed,
        drivers should avoid making persistent state changes in
        bind_port, or else must ensure that such state changes are
        eventually cleaned up.

        Implementing this method explicitly declares the mechanism
        driver as having the intention to bind ports. This is inspected
        by the QoS service to identify the available QoS rules you
        can use with ports.
        """

        port = context.current
        is_baremetal_port = self._is_baremetal_port(port)
        if not is_baremetal_port:
            LOG.info(_('Port is not a baremetal port, skip binding.'))
            return

        # NOTE(turnbig): it seems ml2 driver will auto check whether a
        #  driver has been bound by a driver through binding_levels
        # has_port_bound = self._is_port_bound(port)
        # if has_port_bound:
        #     LOG.info(_('Port has been bound by this driver, skip binding.'))
        #     return

        # try to bind segment now
        LOG.info(_('Port is supported, will try binding IB partition now.'))
        for segment in context.segments_to_bind:
            if self._is_segment_supported(segment):
                node_uuid = port.get(portbindings.HOST_ID)
                node_ib_ports = self._get_ironic_ib_ports(node_uuid)
                if not node_ib_ports or len(node_ib_ports) == 0:
                    LOG.warning(_(
                        'For current port(%(port)s), could not find any IB '
                        'port presents in the same ironic '
                        'node(%(node_uuid)s), break bind port process now.'),
                        {port: port, 'node_uuid': node_uuid})
                    return

                node_ib_client_ids = [ib_port.extra.get('client-id')
                                      for ib_port in node_ib_ports
                                      if ib_port.extra.get('client-id')]
                node_ib_guids = [ufm_utils.mlnx_ib_client_id_to_guid(client_id)
                                 for client_id in node_ib_client_ids]
                LOG.info(_('Ironic node infiniband port guids: %s.')
                         % node_ib_guids)

                LOG.debug(_('Try to bind IB ports using segment: %s'), segment)
                # update partition key for relevant guids
                segment_id = segment[api.ID]
                segmentation_id = segment[api.SEGMENTATION_ID]

                try:
                    provisioning_blocks.add_provisioning_component(
                        context._plugin_context, port['id'], resources.PORT,
                        MLNX_IB_BAREMETAL_ENTITY)

                    # step1: bind PF guids to dynamic pkey
                    self.ufm_client.pkey.add_guids(hex(segmentation_id),
                                                   guids=node_ib_guids,
                                                   index0=True)
                    LOG.info(_('Successfully bound IB physical guids '
                               '%(guids)s to dynamic partition %(pkey)s.'),
                             {'guids': node_ib_guids,
                              'pkey': hex(segmentation_id)})

                    # step2: if there are default limited pkeys to bound,
                    binding_profile = self.bind_default_limited_pkeys(
                        node_ib_client_ids)
                    binding_profile[BD_DYNAMIC_PKEY] = hex(segmentation_id)
                    binding_profile[BD_PHYSICAL_GUIDS] = node_ib_guids
                    LOG.info(_("Mellanox infiniband port binding profile: "
                               "%(profile)s."),
                             {'profile': binding_profile})

                    # NOTE(turnbig): node is locked when deploying, and port
                    # can not be updated when node is locked
                    # mac_address = port.get('mac_address')
                    # self.append_ironic_port_extra(mac_address,
                    #                               binding_profile)
                    context._binding.vif_details = jsonutils.dumps(
                        binding_profile)

                    # NOTE(turnbig): chain current segment again to next driver
                    new_segment = copy.deepcopy(segment)
                    context.continue_binding(segment_id, [new_segment])
                    return
                except ufm_exc.UfmClientError as e:
                    LOG.error(_("Failed to add guids %(guids)s to UFM "
                                "partition key %(pkey)s, "
                                "reason is %(reason)s."),
                              {'guids': node_ib_client_ids,
                               'pkey': hex(segmentation_id),
                               'reason': str(e)})

                    # TODO(qianbiao.ng): if IB partition binding fails,
                    #   we should abort the bind_port process and exit.
                    vif_details = {'guids': node_ib_client_ids,
                                   'pkey': hex(segmentation_id),
                                   'driver': const.DRIVE_NAME,
                                   'reason': str(e)}
                    context.set_binding(segment[api.ID],
                                        portbindings.VIF_TYPE_BINDING_FAILED,
                                        vif_details,
                                        status=n_const.PORT_STATUS_ERROR)
Example #9
0
    def update_port_postcommit(self, context):
        # type: (api.PortContext) -> None
        """Update a port.

        :param context: PortContext instance describing the new
            state of the port, as well as the original state prior
            to the update_port call.

        Called after the transaction completes. Call can block, though
        will block the entire process so care should be taken to not
        drastically affect performance.  Raising an exception will
        result in the deletion of the resource.

        update_port_postcommit is called for all changes to the port
        state. It is up to the mechanism driver to ignore state or
        state changes that it does not know or care about.
        """
        port = context.current
        original_port = context.original

        if not self._is_baremetal_port(port):
            LOG.info(_('Port is not a baremetal port, '
                       'skip update_port_postcommit callback.'))
            return

        if not self._is_port_bound(context):
            LOG.info(_('Port is not bound by current driver, '
                       'skip update_port_postcommit callback.'))
            return

        binding_level = self._get_binding_level(context)
        LOG.info(_('Port is bound by current driver with binding '
                   'level %(binding_level)s.'),
                 {'binding_level': binding_level})

        current_vif_type = context.vif_type
        original_vif_type = context.original_vif_type

        # when port is unbound, unbind relevant guids from IB partition.
        if (current_vif_type == portbindings.VIF_TYPE_UNBOUND
                and original_vif_type not in const.UNBOUND_VIF_TYPES):
            LOG.info(_("Port's VIF type changed from bound to unbound"))
            LOG.info(_("Remove infiniband guids from partition key now."))

            LOG.info(_("Original VIF details: %(original_vif_details)s, "
                       "current VIF details: %(current_vif_details)s"),
                     {'original_vif_details': context.original_vif_details,
                      'current_vif_details': context.vif_details})

            binding_profile = context.original_vif_details

            # step1: unbound physical guids from dynamic pkey
            dynamic_pkey = binding_profile.get(BD_DYNAMIC_PKEY)
            physical_guids = binding_profile.get(BD_PHYSICAL_GUIDS)
            LOG.info(_('To be unbound dynamic pkey %(pkey)s, '
                       'physical guids %(guids)s.'),
                     {'guids': physical_guids,
                      'pkey': dynamic_pkey})
            self.ufm_client.pkey.remove_guids(dynamic_pkey, physical_guids)
            LOG.info(_('Infiniband port physical guids %(guids)s has been '
                       'removed from dynamic allocated pkey %(pkey)s.'),
                     {'guids': physical_guids,
                      'pkey': physical_guids})

            # step2: unbound default limited pkeys
            default_limited_pkeys = binding_profile.get(BD_DFT_LIMITED_PKEYS)
            if default_limited_pkeys:
                sriov_enabled = binding_profile.get(BD_ENABLE_SRIOV)
                if sriov_enabled:
                    LOG.info(_('SR-IOV is enabled, remove virtual guids '
                               'from default limited pkeys now.'))
                    # virtual guids here has been zipped
                    virtual_guids = binding_profile.get(BD_VIRTUAL_GUIDS)
                    grouped_guids = dict(zip(default_limited_pkeys,
                                             virtual_guids))
                    LOG.info(_('Virtual guids for default limited pkeys is '
                               '%(grouped_guids)s .'),
                             {'grouped_guids': grouped_guids})
                    for pkey, vf_guids in grouped_guids.iteritems():
                        self.ufm_client.pkey.remove_guids(pkey, vf_guids)
                        LOG.info(_('Successfully remove IB virtual guids '
                                   '%(guids)s from limited pkey %(pkey)s.'),
                                 {'guids': vf_guids, 'pkey': pkey})
                else:
                    for pkey in default_limited_pkeys:
                        self.ufm_client.pkey.remove_guids(pkey, physical_guids)
                        LOG.info(_('Successfully remove IB physical guids '
                                   '%(guids)s from limited pkey %(pkey)s.'),
                                 {'guids': physical_guids, 'pkey': pkey})

                # restore Ironic PXE port extra
                mac_address = port.get('mac_address')
                self.remove_ironic_port_extra(mac_address)

        # when port is bound, mark port as provision completed.
        if (current_vif_type not in const.UNBOUND_VIF_TYPES
                and original_vif_type in const.UNBOUND_VIF_TYPES):
            LOG.info(_("Port's VIF type changed from unbound to bound."))
            # NOTE(qianbiao.ng): this provisioning_complete action maps to
            #  provisioning_blocks.add_provisioning_component called in
            #  bind_port process.
            provisioning_blocks.provisioning_complete(
                context._plugin_context, port['id'], resources.PORT,
                MLNX_IB_BAREMETAL_ENTITY)

        # when port binding fails, raise exception
        if (port.get('status') == n_const.PORT_STATUS_ERROR
                and current_vif_type == portbindings.VIF_TYPE_BINDING_FAILED):
            LOG.info(_("Port binding failed, Port's VIF details: "
                       "%(vif_details)s."),
                     {'vif_details': context.vif_details})
            if context.vif_details.get('driver') == const.DRIVE_NAME:
                LOG.info(_("Port binding failure is caused by current driver. "
                           "Raise an exception to abort port update "
                           "process."))
                raise exceptions.PortBindingException(**context.vif_details)
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

from oslo_config import cfg

from networking_mlnx_baremetal import constants
from networking_mlnx_baremetal._i18n import _

DRIVER_OPTS = [
    cfg.StrOpt('endpoint',
               default='http://127.0.0.1',
               help=_('UFM REST API endpoint.')),
    cfg.StrOpt('username',
               help=_('Username for UFM REST API authentication.')),
    cfg.StrOpt('password',
               help=_('Password for UFM REST API authentication.')),
    cfg.StrOpt('verify_ca',
               default='True',
               help=_('Either a Boolean value, a path to a CA_BUNDLE '
                      'file or directory with certificates of trusted '
                      'CAs. If set to True the driver will verify the UFM'
                      'host certificates; if False the driver will '
                      'ignore verifying the SSL certificate. If it\'s '
                      'a path the driver will use the specified '
                      'certificate or one of the certificates in the '
                      'directory. Defaults to True. Optional.')),
    cfg.IntOpt('timeout',
Example #11
0
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

from ironicclient import client
from keystoneauth1 import loading
from networking_mlnx_baremetal import constants
from networking_mlnx_baremetal._i18n import _
from oslo_config import cfg

CONF = cfg.CONF
KEYSTONE_SESSION = None

IRONIC_OPTS = [
    cfg.StrOpt('os_region',
               help=_('Keystone region used to get Ironic endpoints.')),
    cfg.StrOpt('auth_strategy',
               default='keystone',
               choices=('keystone', 'noauth'),
               help=_('Authentication method: noauth or keystone.')),
    cfg.StrOpt('endpoint',
               default='http://localhost:6385/',
               help=_('Ironic API endpoint, used to connect to Ironic when '
                      'auth_strategy option is noauth to work with standalone '
                      'Ironic without keystone.')),
    cfg.IntOpt('retry_interval',
               default=2,
               help=_('Interval between retries in case of conflict error '
                      '(HTTP 409).')),
    cfg.IntOpt('max_retries',
               default=30,
class PortBindingException(MlnxIbBmDriverException):
    message = _("Failed to add guids %(guids)s to UFM partition "
                "key %(pkey)s, reason is %(reason)s.")
class InvalidConfigValueException(MlnxIbBmDriverException):
    message = _('Invalid value "%(value)s" was set to configuration '
                'option: %(option)s. %(details)s')