def test_adding_component_idempotent(self):
     for i in range(5):
         pb.add_provisioning_component(self.ctx, self.port.id,
                                       resources.PORT, 'entity1')
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'entity1')
     self.assertTrue(self.provisioned.called)
 def _prepare_initial_port_status(self, port_context):
     port_status = nl_const.PORT_STATUS_ACTIVE
     if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS):
         port_status = nl_const.PORT_STATUS_DOWN
         provisioning_blocks.add_provisioning_component(
             port_context._plugin_context, port_context.current['id'],
             resources.PORT, provisioning_blocks.L2_AGENT_ENTITY)
     return port_status
 def test_provisioning_of_correct_item(self):
     port2 = self._make_port()
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity1')
     pb.provisioning_complete(self.ctx, port2.id,
                              resources.PORT, 'entity1')
     self.provisioned.assert_called_once_with(
         resources.PORT, pb.PROVISIONING_COMPLETE, mock.ANY,
         context=self.ctx, object_id=port2.id)
 def test_is_object_blocked(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e1')
     self.assertTrue(pb.is_object_blocked(self.ctx, self.port.id,
                                          resources.PORT))
     self.assertFalse(pb.is_object_blocked(self.ctx, 'xyz',
                                           resources.PORT))
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'e1')
     self.assertFalse(pb.is_object_blocked(self.ctx, self.port.id,
                                           resources.PORT))
示例#5
0
 def test_adding_component_for_new_resource_type(self):
     provisioned = mock.Mock()
     registry.subscribe(provisioned, 'NETWORK', pb.PROVISIONING_COMPLETE)
     net = self._make_net()
     # expect failed because the model was not registered for the type
     with testtools.ExpectedException(RuntimeError):
         pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent')
     pb.add_model_for_resource('NETWORK', models_v2.Network)
     pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent')
     pb.provisioning_complete(self.ctx, net.id, 'NETWORK', 'ent')
     self.assertTrue(provisioned.called)
 def test_adding_component_for_new_resource_type(self):
     provisioned = mock.Mock()
     registry.subscribe(provisioned, 'NETWORK', pb.PROVISIONING_COMPLETE)
     net = self._make_net()
     # expect failed because the model was not registered for the type
     with testtools.ExpectedException(RuntimeError):
         pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent')
     pb.add_model_for_resource('NETWORK', models_v2.Network)
     pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent')
     pb.provisioning_complete(self.ctx, net.id, 'NETWORK', 'ent')
     self.assertTrue(provisioned.called)
 def test_not_provisioned_until_final_component_complete(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity1')
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity2')
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity1')
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity2')
     self.assertTrue(self.provisioned.called)
示例#8
0
 def test_is_object_blocked(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e1')
     self.assertTrue(pb.is_object_blocked(self.ctx, self.port.id,
                                          resources.PORT))
     self.assertFalse(pb.is_object_blocked(self.ctx, 'xyz',
                                           resources.PORT))
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'e1')
     self.assertFalse(pb.is_object_blocked(self.ctx, self.port.id,
                                           resources.PORT))
示例#9
0
 def test_not_provisioned_until_final_component_complete(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity1')
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity2')
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity1')
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity2')
     self.assertTrue(self.provisioned.called)
示例#10
0
 def _insert_port_provisioning_block(self, port):
     vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL)
     if vnic_type not in self.supported_vnic_types:
         LOG.debug("No provisioning block due to unsupported vnic_type: %s", vnic_type)
         return
     # Insert a provisioning block to prevent the port from
     # transitioning to active until OVN reports back that
     # the port is up.
     if port["status"] != const.PORT_STATUS_ACTIVE:
         provisioning_blocks.add_provisioning_component(
             n_context.get_admin_context(), port["id"], resources.PORT, provisioning_blocks.L2_AGENT_ENTITY
         )
示例#11
0
    def _insert_provisioning_block(self, context):

        # we insert a status barrier to prevent the port from transitioning
        # to active until the agent reports back that the wiring is done
        port = context.current
        if port['status'] == n_const.PORT_STATUS_ACTIVE:
            # no point in putting in a block if the status is already ACTIVE
            return

        provisioning_blocks.add_provisioning_component(
            context._plugin_context, port['id'], resources.PORT,
            provisioning_blocks.L2_AGENT_ENTITY)
示例#12
0
 def _insert_port_provisioning_block(self, port):
     vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL)
     if vnic_type not in self.supported_vnic_types:
         LOG.debug("No provisioning block due to unsupported vnic_type: %s",
                   vnic_type)
         return
     # Insert a provisioning block to prevent the port from
     # transitioning to active until OVN reports back that
     # the port is up.
     if port['status'] != const.PORT_STATUS_ACTIVE:
         provisioning_blocks.add_provisioning_component(
             n_context.get_admin_context(), port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
    def test_provisioning_of_correct_item(self):
        port2 = self._make_port()
        pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                      'entity1')
        pb.provisioning_complete(self.ctx, port2.id, resources.PORT, 'entity1')
        self.provisioned.assert_called_once_with(resources.PORT,
                                                 pb.PROVISIONING_COMPLETE,
                                                 mock.ANY,
                                                 payload=mock.ANY)

        payload = self.provisioned.mock_calls[0][2]['payload']
        self.assertEqual(self.ctx, payload.context)
        self.assertEqual(port2.id, payload.resource_id)
 def test_remove_provisioning_component(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e1')
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e2')
     self.assertTrue(pb.remove_provisioning_component(
           self.ctx, self.port.id, resources.PORT, 'e1'))
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'other')
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'e2')
     self.assertTrue(self.provisioned.called)
 def test_remove_provisioning_component(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e1')
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e2')
     self.assertTrue(
         pb.remove_provisioning_component(self.ctx, self.port.id,
                                          resources.PORT, 'e1'))
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'other')
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'e2')
     self.assertTrue(self.provisioned.called)
示例#16
0
 def _insert_provisioning_block(self, context):
     # we insert a status barrier to prevent the port from transitioning
     # to active until the agent reports back that the wiring is done
     port = context.current
     if not context.host or port['status'] == constants.PORT_STATUS_ACTIVE:
         # no point in putting in a block if the status is already ACTIVE
         return
     vnic_type = context.current.get(portbindings.VNIC_TYPE,
                                     portbindings.VNIC_NORMAL)
     if vnic_type not in self.supported_vnic_types:
         # we check the VNIC type because there could be multiple agents
         # on a single host with different VNIC types
         return
     if context.host_agents(self.agent_type):
         provisioning_blocks.add_provisioning_component(
             context._plugin_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
示例#17
0
 def _insert_provisioning_block(self, context):
     # we insert a status barrier to prevent the port from transitioning
     # to active until the agent reports back that the wiring is done
     port = context.current
     if not context.host or port['status'] == const.PORT_STATUS_ACTIVE:
         # no point in putting in a block if the status is already ACTIVE
         return
     vnic_type = context.current.get(portbindings.VNIC_TYPE,
                                     portbindings.VNIC_NORMAL)
     if vnic_type not in self.supported_vnic_types:
         # we check the VNIC type because there could be multiple agents
         # on a single host with different VNIC types
         return
     if context.host_agents(self.agent_type):
         provisioning_blocks.add_provisioning_component(
             context._plugin_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
    def bind_port(self, context):
        port = context.current
        vnic_type = port['binding:vnic_type']
        if vnic_type != 'baremetal':
            return

        binding_profile = port['binding:profile']
        local_link_information = binding_profile.get('local_link_information')
        if local_link_information:
            switch_info = local_link_information[0].get('switch_info')
            if switch_info != 'static':
                return

        provisioning_blocks.add_provisioning_component(context._plugin_context,
                                                       port['id'],
                                                       resources.PORT,
                                                       'STATICALYBOUND')

        segments = context.segments_to_bind
        context.set_binding(segments[0][driver_api.ID],
                            portbindings.VIF_TYPE_OTHER, {})
 def test_not_provisioned_when_wrong_component_reports(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity1')
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'entity2')
     self.assertFalse(self.provisioned.called)
示例#20
0
    def bind_port(self, context):
        """Attempt to bind a port.

        :param context: PortContext instance describing the port

        This method is called outside any transaction to attempt to
        establish a port binding using this mechanism driver. Bindings
        may be created at each of multiple levels of a hierarchical
        network, and are established from the top level downward. At
        each level, the mechanism driver determines whether it can
        bind to any of the network segments in the
        context.segments_to_bind property, based on the value of the
        context.host property, any relevant port or network
        attributes, and its own knowledge of the network topology. At
        the top level, context.segments_to_bind contains the static
        segments of the port's network. At each lower level of
        binding, it contains static or dynamic segments supplied by
        the driver that bound at the level above. If the driver is
        able to complete the binding of the port to any segment in
        context.segments_to_bind, it must call context.set_binding
        with the binding details. If it can partially bind the port,
        it must call context.continue_binding with the network
        segments to be used to bind at the next lower level.

        If the binding results are committed after bind_port returns,
        they will be seen by all mechanism drivers as
        update_port_precommit and update_port_postcommit calls. But if
        some other thread or process concurrently binds or updates the
        port, these binding results will not be committed, and
        update_port_precommit and update_port_postcommit will not be
        called on the mechanism drivers with these results. Because
        binding results can be discarded rather than committed,
        drivers should avoid making persistent state changes in
        bind_port, or else must ensure that such state changes are
        eventually cleaned up.

        Implementing this method explicitly declares the mechanism
        driver as having the intention to bind ports. This is inspected
        by the QoS service to identify the available QoS rules you
        can use with ports.
        """

        port = context.current
        binding_profile = port['binding:profile']
        local_link_information = binding_profile.get('local_link_information')
        local_group_information = binding_profile.get(
            'local_group_information')
        LOG.debug(local_link_information)
        LOG.debug(local_group_information)
        #if self._is_port_supported(port) and local_link_information:
        if local_link_information:
            switch_info = local_link_information[0].get('switch_info')
            switch_id = local_link_information[0].get('switch_id')
            switch = device_utils.get_switch_device(self.switches,
                                                    switch_info=switch_info,
                                                    ngs_mac_address=switch_id)
            if not switch:
                return
            port_id = local_link_information[0].get('port_id')
            segments = context.segments_to_bind
            segmentation_id = segments[0].get('segmentation_id')
            # If segmentation ID is None, set vlan 1
            if not segmentation_id:
                segmentation_id = '1'
            provisioning_blocks.add_provisioning_component(
                context._plugin_context, port['id'], resources.PORT,
                GENERIC_SWITCH_ENTITY)
            LOG.debug("Putting port {port} on {switch_info} to vlan: "
                      "{segmentation_id}".format(
                          port=port_id,
                          switch_info=switch_info,
                          segmentation_id=segmentation_id))
            # If a request to create dynamic group is arrived
            if (len(local_link_information) > 1):
                #output = switch.create_port_channel(local_group_information.get('name'),segmentation_id)
                switch.vlan_configuration(segmentation_id)
                switch.create_port_channel(local_group_information.get('name'),
                                           segmentation_id)
                port_number_id = re.sub('.*?([0-9]*)$', r'\1',
                                        local_group_information.get('name'))
                #output = switch.create_port_channel(segmentation_id)
                LOG.debug("YYY: {port_number_id}".format(
                    port_number_id=port_number_id))
                for item in local_link_information:
                    i = item.get('port_id')
                    LOG.debug("XXX: {output}".format(output=i))
                    switch.configure_port_channel(i, port_number_id,
                                                  segmentation_id)
                #port_list = switch.show_port_list().splitlines()
                #port_list = [w.replace('interface Port-channel', '') for w in port_list]
                #for item in port_list:
                #    LOG.debug("YYY: {output}".format(output=item))
            else:
                # Move port to network
                switch.plug_port_to_network(port_id, segmentation_id)

            LOG.info(
                "Successfully bound port %(port_id)s in segment "
                " %(segment_id)s on device %(device)s", {
                    'port_id': port['id'],
                    'device': switch_info,
                    'segment_id': segmentation_id
                })
            context.set_binding(segments[0][driver_api.ID],
                                portbindings.VIF_TYPE_OTHER, {})
示例#21
0
    def try_to_bind_segment_for_agent(self, context, segment, agent):
        """Try to bind with segment for agent.

        :param context: PortContext instance describing the port
        :param segment: segment dictionary describing segment to bind
        :param agent: agents_db entry describing agent to bind
        :returns: True iff segment has been bound for agent

        Neutron segments api-ref:
          https://docs.openstack.org/api-ref/network/v2/#segments

        Example segment dictionary: {'segmentation_id': 'segmentation_id',
                                     'network_type': 'network_type',
                                     'id': 'segment_uuid'}

        Called outside any transaction during bind_port() so that
        derived MechanismDrivers can use agent_db data along with
        built-in knowledge of the corresponding agent's capabilities
        to attempt to bind to the specified network segment for the
        agent.

        If the segment can be bound for the agent, this function must
        call context.set_binding() with appropriate values and then
        return True. Otherwise, it must return False.
        """
        if not self.check_segment_for_agent(segment, agent):
            return False

        port = context.current
        binding_profile = port[portbindings.PROFILE] or {}
        local_link_information = binding_profile.get(constants.LOCAL_LINK_INFO,
                                                     [])
        local_group_information = binding_profile.get(
            constants.LOCAL_GROUP_INFO, {})
        bond_mode = local_group_information.get('bond_mode')

        by_device = {}
        for link in local_link_information:
            device = self._get_device(link)

            # If there is no device for all links the port will be bound, this
            # keeps backward compatibility.
            if not device:
                continue

            # Device was found, but no port_id in link - fail port binding
            if not link.get(constants.PORT_ID):
                LOG.warning(
                    'Cannot bind port %(port)s. no port_id in link '
                    'information: %(link)s', {
                        'port': port[api.ID],
                        'link': link
                    })
                return False

            # Check device on physnet, if not fail port binding
            if not self._is_device_on_physnet(device,
                                              segment[api.PHYSICAL_NETWORK]):
                LOG.warning(
                    'Cannot bind port %(port)s, device %(device)s is '
                    'not on physical network %(physnet)s', {
                        'port': port[api.ID],
                        'device': device,
                        'physnet': segment[api.PHYSICAL_NETWORK]
                    })
                return False

            by_device.setdefault(device, {})
            by_device[device].setdefault('links', [])

            if 'driver' not in by_device[device]:
                # Load the driver, fail port binding on load error
                try:
                    driver = common.driver_mgr(device)
                    by_device[device]['driver'] = driver
                except exceptions.DriverEntrypointLoadError as e:
                    LOG.warning(
                        'Cannot bind port %(port)s, failed to load '
                        'driver for device %(device)s', {
                            'link': link,
                            'port': port[api.ID],
                            'device': device
                        })
                    LOG.debug(e.message)
                    return False

            by_device[device]['links'].append(link)

        # Check driver(s) support the bond_mode - if not fail port binding
        if (bond_mode and by_device
                and not self._is_bond_mode_supported(bond_mode, by_device)):
            LOG.warning(
                'Cannot bind port %(port)s, unsupported '
                'bond_mode %(bond_mode)s', {
                    'port': port[api.ID],
                    'bond_mode': bond_mode
                })
            return False

        # Call each drivers create_port method to plug the device links
        for device, args in by_device.items():
            driver = args['driver']
            driver.create_port(context, segment, args['links'])

        # Complete the port binding
        provisioning_blocks.add_provisioning_component(context._plugin_context,
                                                       port[api.ID],
                                                       resources.PORT,
                                                       BAREMETAL_DRV_ENTITY)
        context.set_binding(segment[api.ID],
                            self.get_vif_type(context, agent, segment),
                            self.get_vif_details(context, agent, segment))

        return True
 def test_provisioned_after_component_finishes(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity')
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity')
     self.assertTrue(self.provisioned.called)
 def test_not_provisioned_when_wrong_component_reports(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity1')
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity2')
     self.assertFalse(self.provisioned.called)
 def test_provisioned_after_component_finishes(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity')
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity')
     self.assertTrue(self.provisioned.called)
示例#25
0
    def bind_port(self, context):
        """Attempt to bind a port.

        :param context: PortContext instance describing the port

        This method is called outside any transaction to attempt to
        establish a port binding using this mechanism driver. Bindings
        may be created at each of multiple levels of a hierarchical
        network, and are established from the top level downward. At
        each level, the mechanism driver determines whether it can
        bind to any of the network segments in the
        context.segments_to_bind property, based on the value of the
        context.host property, any relevant port or network
        attributes, and its own knowledge of the network topology. At
        the top level, context.segments_to_bind contains the static
        segments of the port's network. At each lower level of
        binding, it contains static or dynamic segments supplied by
        the driver that bound at the level above. If the driver is
        able to complete the binding of the port to any segment in
        context.segments_to_bind, it must call context.set_binding
        with the binding details. If it can partially bind the port,
        it must call context.continue_binding with the network
        segments to be used to bind at the next lower level.

        If the binding results are committed after bind_port returns,
        they will be seen by all mechanism drivers as
        update_port_precommit and update_port_postcommit calls. But if
        some other thread or process concurrently binds or updates the
        port, these binding results will not be committed, and
        update_port_precommit and update_port_postcommit will not be
        called on the mechanism drivers with these results. Because
        binding results can be discarded rather than committed,
        drivers should avoid making persistent state changes in
        bind_port, or else must ensure that such state changes are
        eventually cleaned up.

        Implementing this method explicitly declares the mechanism
        driver as having the intention to bind ports. This is inspected
        by the QoS service to identify the available QoS rules you
        can use with ports.
        """

        port = context.current
        is_baremetal_port = self._is_baremetal_port(port)
        if not is_baremetal_port:
            LOG.info(_('Port is not a baremetal port, skip binding.'))
            return

        # NOTE(turnbig): it seems ml2 driver will auto check whether a
        #  driver has been bound by a driver through binding_levels
        # has_port_bound = self._is_port_bound(port)
        # if has_port_bound:
        #     LOG.info(_('Port has been bound by this driver, skip binding.'))
        #     return

        # try to bind segment now
        LOG.info(_('Port is supported, will try binding IB partition now.'))
        for segment in context.segments_to_bind:
            if self._is_segment_supported(segment):
                node_uuid = port.get(portbindings.HOST_ID)
                node_ib_ports = self._get_ironic_ib_ports(node_uuid)
                if not node_ib_ports or len(node_ib_ports) == 0:
                    LOG.warning(_(
                        'For current port(%(port)s), could not find any IB '
                        'port presents in the same ironic '
                        'node(%(node_uuid)s), break bind port process now.'),
                        {port: port, 'node_uuid': node_uuid})
                    return

                node_ib_client_ids = [ib_port.extra.get('client-id')
                                      for ib_port in node_ib_ports
                                      if ib_port.extra.get('client-id')]
                node_ib_guids = [ufm_utils.mlnx_ib_client_id_to_guid(client_id)
                                 for client_id in node_ib_client_ids]
                LOG.info(_('Ironic node infiniband port guids: %s.')
                         % node_ib_guids)

                LOG.debug(_('Try to bind IB ports using segment: %s'), segment)
                # update partition key for relevant guids
                segment_id = segment[api.ID]
                segmentation_id = segment[api.SEGMENTATION_ID]

                try:
                    provisioning_blocks.add_provisioning_component(
                        context._plugin_context, port['id'], resources.PORT,
                        MLNX_IB_BAREMETAL_ENTITY)

                    # step1: bind PF guids to dynamic pkey
                    self.ufm_client.pkey.add_guids(hex(segmentation_id),
                                                   guids=node_ib_guids,
                                                   index0=True)
                    LOG.info(_('Successfully bound IB physical guids '
                               '%(guids)s to dynamic partition %(pkey)s.'),
                             {'guids': node_ib_guids,
                              'pkey': hex(segmentation_id)})

                    # step2: if there are default limited pkeys to bound,
                    binding_profile = self.bind_default_limited_pkeys(
                        node_ib_client_ids)
                    binding_profile[BD_DYNAMIC_PKEY] = hex(segmentation_id)
                    binding_profile[BD_PHYSICAL_GUIDS] = node_ib_guids
                    LOG.info(_("Mellanox infiniband port binding profile: "
                               "%(profile)s."),
                             {'profile': binding_profile})

                    # NOTE(turnbig): node is locked when deploying, and port
                    # can not be updated when node is locked
                    # mac_address = port.get('mac_address')
                    # self.append_ironic_port_extra(mac_address,
                    #                               binding_profile)
                    context._binding.vif_details = jsonutils.dumps(
                        binding_profile)

                    # NOTE(turnbig): chain current segment again to next driver
                    new_segment = copy.deepcopy(segment)
                    context.continue_binding(segment_id, [new_segment])
                    return
                except ufm_exc.UfmClientError as e:
                    LOG.error(_("Failed to add guids %(guids)s to UFM "
                                "partition key %(pkey)s, "
                                "reason is %(reason)s."),
                              {'guids': node_ib_client_ids,
                               'pkey': hex(segmentation_id),
                               'reason': str(e)})

                    # TODO(qianbiao.ng): if IB partition binding fails,
                    #   we should abort the bind_port process and exit.
                    vif_details = {'guids': node_ib_client_ids,
                                   'pkey': hex(segmentation_id),
                                   'driver': const.DRIVE_NAME,
                                   'reason': str(e)}
                    context.set_binding(segment[api.ID],
                                        portbindings.VIF_TYPE_BINDING_FAILED,
                                        vif_details,
                                        status=n_const.PORT_STATUS_ERROR)
示例#26
0
    def bind_port(self, context):
        """Attempt to bind a port.

        :param context: PortContext instance describing the port

        This method is called outside any transaction to attempt to
        establish a port binding using this mechanism driver. Bindings
        may be created at each of multiple levels of a hierarchical
        network, and are established from the top level downward. At
        each level, the mechanism driver determines whether it can
        bind to any of the network segments in the
        context.segments_to_bind property, based on the value of the
        context.host property, any relevant port or network
        attributes, and its own knowledge of the network topology. At
        the top level, context.segments_to_bind contains the static
        segments of the port's network. At each lower level of
        binding, it contains static or dynamic segments supplied by
        the driver that bound at the level above. If the driver is
        able to complete the binding of the port to any segment in
        context.segments_to_bind, it must call context.set_binding
        with the binding details. If it can partially bind the port,
        it must call context.continue_binding with the network
        segments to be used to bind at the next lower level.

        If the binding results are committed after bind_port returns,
        they will be seen by all mechanism drivers as
        update_port_precommit and update_port_postcommit calls. But if
        some other thread or process concurrently binds or updates the
        port, these binding results will not be committed, and
        update_port_precommit and update_port_postcommit will not be
        called on the mechanism drivers with these results. Because
        binding results can be discarded rather than committed,
        drivers should avoid making persistent state changes in
        bind_port, or else must ensure that such state changes are
        eventually cleaned up.

        Implementing this method explicitly declares the mechanism
        driver as having the intention to bind ports. This is inspected
        by the QoS service to identify the available QoS rules you
        can use with ports.
        """

        port = context.current
        binding_profile = port['binding:profile']
        local_link_information = binding_profile.get('local_link_information')
        if self._is_port_supported(port) and local_link_information:
            switch_info = local_link_information[0].get('switch_info')
            switch_id = local_link_information[0].get('switch_id')
            switch = device_utils.get_switch_device(self.switches,
                                                    switch_info=switch_info,
                                                    ngs_mac_address=switch_id)
            if not switch:
                return
            network = context.network.current
            physnet = network['provider:physical_network']
            switch_physnets = switch._get_physical_networks()
            if switch_physnets and physnet not in switch_physnets:
                LOG.error(
                    "Cannot bind port %(port)s as device %(device)s is "
                    "not on physical network %(physnet)", {
                        'port_id': port['id'],
                        'device': switch_info,
                        'physnet': physnet
                    })
                return
            port_id = local_link_information[0].get('port_id')
            segments = context.segments_to_bind
            # If segmentation ID is None, set vlan 1
            segmentation_id = segments[0].get('segmentation_id') or '1'
            provisioning_blocks.add_provisioning_component(
                context._plugin_context, port['id'], resources.PORT,
                GENERIC_SWITCH_ENTITY)
            LOG.debug("Putting port {port} on {switch_info} to vlan: "
                      "{segmentation_id}".format(
                          port=port_id,
                          switch_info=switch_info,
                          segmentation_id=segmentation_id))
            # Move port to network
            switch.plug_port_to_network(port_id, segmentation_id)
            LOG.info(
                "Successfully bound port %(port_id)s in segment "
                "%(segment_id)s on device %(device)s", {
                    'port_id': port['id'],
                    'device': switch_info,
                    'segment_id': segmentation_id
                })
            context.set_binding(segments[0][api.ID],
                                portbindings.VIF_TYPE_OTHER, {})
示例#27
0
    def bind_port(self, context):
        """Attempt to bind a port.

        :param context: PortContext instance describing the port

        This method is called outside any transaction to attempt to
        establish a port binding using this mechanism driver. Bindings
        may be created at each of multiple levels of a hierarchical
        network, and are established from the top level downward. At
        each level, the mechanism driver determines whether it can
        bind to any of the network segments in the
        context.segments_to_bind property, based on the value of the
        context.host property, any relevant port or network
        attributes, and its own knowledge of the network topology. At
        the top level, context.segments_to_bind contains the static
        segments of the port's network. At each lower level of
        binding, it contains static or dynamic segments supplied by
        the driver that bound at the level above. If the driver is
        able to complete the binding of the port to any segment in
        context.segments_to_bind, it must call context.set_binding
        with the binding details. If it can partially bind the port,
        it must call context.continue_binding with the network
        segments to be used to bind at the next lower level.

        If the binding results are committed after bind_port returns,
        they will be seen by all mechanism drivers as
        update_port_precommit and update_port_postcommit calls. But if
        some other thread or process concurrently binds or updates the
        port, these binding results will not be committed, and
        update_port_precommit and update_port_postcommit will not be
        called on the mechanism drivers with these results. Because
        binding results can be discarded rather than committed,
        drivers should avoid making persistent state changes in
        bind_port, or else must ensure that such state changes are
        eventually cleaned up.

        Implementing this method explicitly declares the mechanism
        driver as having the intention to bind ports. This is inspected
        by the QoS service to identify the available QoS rules you
        can use with ports.
        """
        port = context.current
        network = context.network.current
        switch_name, switch_port, segmentation_id = \
            self._link_info_from_port(port, network)
        if not self._is_port_supported(port):
            LOG.debug('Port {} has vnic_type set to %s which is not correct '
                      'to work with networking-ansible driver.'.format(
                          port['id'], port[portbindings.VNIC_TYPE]))
            return

        segments = context.segments_to_bind

        LOG.debug('Plugging in port {switch_port} on '
                  '{switch_name} to vlan: {segmentation_id}'.format(
                      switch_port=switch_port,
                      switch_name=switch_name,
                      segmentation_id=segmentation_id))

        provisioning_blocks.add_provisioning_component(
            context._plugin_context, port['id'], resources.PORT,
            ANSIBLE_NETWORKING_ENTITY)

        # Assign port to network
        try:
            self.ansnet.update_access_port(switch_name, switch_port,
                                           segmentation_id)
            context.set_binding(segments[0][ml2api.ID],
                                portbindings.VIF_TYPE_OTHER, {})
            LOG.info('Port {neutron_port} has been plugged into '
                     'network {net_id} on device {switch_name}'.format(
                         neutron_port=port['id'],
                         net_id=network['id'],
                         switch_name=switch_name))
        except Exception as e:
            LOG.error('Failed to plug in port {neutron_port} on '
                      'device: {switch_name} from network {net_id} '
                      'reason: {exc}'.format(neutron_port=port['id'],
                                             net_id=network['id'],
                                             switch_name=switch_name,
                                             exc=e))
            raise ml2_exc.MechanismDriverError(e)