Beispiel #1
0
    def test_get_nodes_ips(self):
        os_net = self.useFixture(k_fix.MockNetworkClient()).client
        ip1 = munch.Munch({
            'fixed_ips': [{
                'ip_address': '10.0.0.1'
            }],
            'trunk_details': True
        })
        ip2 = munch.Munch({
            'fixed_ips': [{
                'ip_address': '10.0.0.2'
            }],
            'trunk_details': True
        })
        ip3 = munch.Munch({
            'fixed_ips': [{
                'ip_address': '10.0.0.3'
            }],
            'trunk_details': None
        })
        ports = (p for p in [ip1, ip2, ip3])

        os_net.ports.return_value = ports
        trunk_ips = utils.get_nodes_ips()
        os_net.ports.assert_called_once_with(status='ACTIVE')
        self.assertEqual(
            trunk_ips,
            [ip1.fixed_ips[0]['ip_address'], ip2.fixed_ips[0]['ip_address']])
    def test_get_nodes_ips_tagged(self):
        CONF.set_override('resource_tags', ['foo'], group='neutron_defaults')
        self.addCleanup(CONF.clear_override,
                        'resource_tags',
                        group='neutron_defaults')

        os_net = self.useFixture(k_fix.MockNetworkClient()).client
        ip1 = munch.Munch({
            'fixed_ips': [{
                'ip_address': '10.0.0.1',
                'subnet_id': 'foo'
            }],
            'trunk_details':
            True
        })
        ip2 = munch.Munch({
            'fixed_ips': [{
                'ip_address': '10.0.0.2',
                'subnet_id': 'bar'
            }],
            'trunk_details':
            False
        })
        ports = (p for p in [ip1, ip2])

        os_net.ports.return_value = ports
        trunk_ips = utils.get_nodes_ips(['foo'])
        os_net.ports.assert_called_once_with(status='ACTIVE', tags=['foo'])
        self.assertEqual(trunk_ips, [ip1.fixed_ips[0]['ip_address']])
Beispiel #3
0
    def on_added(self, kuryrnet_crd):
        subnet_id = kuryrnet_crd['spec'].get('subnetId')
        if kuryrnet_crd['spec'].get('populated'):
            LOG.debug("Subnet %s already populated", subnet_id)
            return

        namespace = kuryrnet_crd['metadata']['annotations'].get(
            'namespaceName')
        namespace_obj = driver_utils.get_namespace(namespace)
        namespace_kuryrnet_annotations = driver_utils.get_annotations(
            namespace_obj, constants.K8S_ANNOTATION_NET_CRD)
        if namespace_kuryrnet_annotations != kuryrnet_crd['metadata']['name']:
            # NOTE(ltomasbo): Ensure pool is not populated if namespace is not
            # yet annotated with kuryrnet information
            return

        # NOTE(ltomasbo): using namespace name instead of object as it is not
        # required
        project_id = self._drv_project.get_project(namespace)
        subnets = self._drv_subnets.get_namespace_subnet(namespace, subnet_id)
        sg_id = kuryrnet_crd['spec'].get('sgId', [])

        nodes = utils.get_nodes_ips()
        # NOTE(ltomasbo): Patching the kuryrnet_crd here instead of after
        # populate_pool method to ensure initial repopulation is not happening
        # twice upon unexpected problems, such as neutron failing to
        # transition the ports to ACTIVE or being too slow replying.
        # In such case, even though the repopulation actions got triggered,
        # the pools will not get the ports loaded (as they are not ACTIVE)
        # and new population actions may be triggered if the controller was
        # restarted before performing the populated=true patching.
        driver_utils.patch_kuryrnet_crd(kuryrnet_crd, populated=True)
        # TODO(ltomasbo): Skip the master node where pods are not usually
        # allocated.
        for node_ip in nodes:
            LOG.debug("Populating subnet pool %s at node %s", subnet_id,
                      node_ip)
            try:
                self._drv_vif_pool.populate_pool(node_ip, project_id, subnets,
                                                 sg_id)
            except exceptions.ResourceNotReady:
                # Ensure the repopulation is retriggered if the system was not
                # yet ready to perform the repopulation actions
                driver_utils.patch_kuryrnet_crd(kuryrnet_crd, populated=False)
                raise
    def on_added(self, kuryrnet_crd):
        namespace = kuryrnet_crd['metadata']['annotations'].get(
            'namespaceName')
        # NOTE(ltomasbo): using namespace name instead of object as it is not
        # required
        project_id = self._drv_project.get_project(namespace)
        subnet_id = kuryrnet_crd['spec'].get('subnetId')
        subnets = self._drv_subnets.get_namespace_subnet(namespace, subnet_id)
        sg_id = kuryrnet_crd['spec'].get('sgId', [])

        nodes = utils.get_nodes_ips()
        # TODO(ltomasbo): Skip the master node where pods are not usually
        # allocated.
        for node_ip in nodes:
            LOG.debug("Populating subnet pool %s at node %s", subnet_id,
                      node_ip)
            self._drv_vif_pool.populate_pool(node_ip, project_id, subnets,
                                             sg_id)
Beispiel #5
0
    def on_present(self, kuryrnet_crd, *args, **kwargs):
        subnet_id = kuryrnet_crd.get('status', {}).get('subnetId')
        if not subnet_id:
            LOG.debug("No Subnet present for KuryrNetwork %s",
                      kuryrnet_crd['metadata']['name'])
            return

        if kuryrnet_crd['status'].get('populated'):
            LOG.debug("Subnet %s already populated for Namespace %s",
                      subnet_id, kuryrnet_crd['metadata']['name'])
            return

        namespace = kuryrnet_crd['spec'].get('nsName')
        project_id = kuryrnet_crd['spec'].get('projectId')
        # NOTE(ltomasbo): using namespace name instead of object as it is not
        # required
        subnets = self._drv_subnets.get_namespace_subnet(namespace, subnet_id)

        node_subnets = self._drv_nodes_subnets.get_nodes_subnets(
            raise_on_empty=True)
        nodes = utils.get_nodes_ips(node_subnets)
        # NOTE(ltomasbo): Patching the kuryrnet_crd here instead of after
        # populate_pool method to ensure initial repopulation is not happening
        # twice upon unexpected problems, such as neutron failing to
        # transition the ports to ACTIVE or being too slow replying.
        # In such case, even though the repopulation actions got triggered,
        # the pools will not get the ports loaded (as they are not ACTIVE)
        # and new population actions may be triggered if the controller was
        # restarted before performing the populated=true patching.
        self._patch_kuryrnetwork_crd(kuryrnet_crd, populated=True)
        # TODO(ltomasbo): Skip the master node where pods are not usually
        # allocated.
        for node_ip in nodes:
            LOG.debug("Populating subnet pool %s at node %s", subnet_id,
                      node_ip)
            try:
                self._drv_vif_pool.populate_pool(node_ip, project_id, subnets,
                                                 [])
            except exceptions.ResourceNotReady:
                # Ensure the repopulation is retriggered if the system was not
                # yet ready to perform the repopulation actions
                self._patch_kuryrnetwork_crd(kuryrnet_crd, populated=False)
                raise