예제 #1
0
    def _create_lb_security_group_rule(self, loadbalancer, listener):
        os_net = clients.get_network_client()
        sg_id = self._find_listeners_sg(loadbalancer)
        # if an SG for the loadbalancer has not being created, create one
        if not sg_id:
            sg = os_net.create_security_group(
                name=loadbalancer.name, project_id=loadbalancer.project_id)
            c_utils.tag_neutron_resources([sg])
            loadbalancer.security_groups.append(sg.id)
            vip_port = self._get_vip_port(loadbalancer)
            os_net.update_port(vip_port.id, security_groups=[sg.id])
            sg_id = sg.id

        try:
            os_net.create_security_group_rule(direction='ingress',
                                              port_range_min=listener.port,
                                              port_range_max=listener.port,
                                              protocol=listener.protocol,
                                              security_group_id=sg_id,
                                              description=listener.name)
        except os_exc.ConflictException:
            pass
        except os_exc.SDKException:
            LOG.exception('Failed when creating security group rule for '
                          'listener %s.', listener.name)
예제 #2
0
    def request_vifs(self, pod, project_id, subnets, security_groups,
                     num_ports):
        neutron = clients.get_neutron_client()

        rq = self._get_port_request(pod,
                                    project_id,
                                    subnets,
                                    security_groups,
                                    unbound=True)

        bulk_port_rq = {'ports': [rq] * num_ports}
        try:
            ports = neutron.create_port(bulk_port_rq).get('ports')
        except n_exc.NeutronClientException:
            LOG.exception("Error creating bulk ports: %s", bulk_port_rq)
            raise
        utils.tag_neutron_resources('ports', [port['id'] for port in ports])

        vif_plugin = self._get_vif_plugin(ports[0])

        # NOTE(ltomasbo): Due to the bug (1696051) on neutron bulk port
        # creation request returning the port objects without binding
        # information, an additional port show is performed to get the binding
        # information
        if vif_plugin == 'unbound':
            port_info = neutron.show_port(ports[0]['id']).get('port')
            vif_plugin = self._get_vif_plugin(port_info)

        vifs = []
        for port in ports:
            vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
            vifs.append(vif)
        return vifs
예제 #3
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        amount = self._get_remaining_sriov_vfs(pod)
        if not amount:
            LOG.error("SRIOV VIF request failed due to lack of "
                      "available VFs for the current pod creation")
            return None

        pod_name = pod['metadata']['name']
        neutron = clients.get_neutron_client()
        vif_plugin = 'sriov'
        subnet_id = next(iter(subnets))
        physnet = self._get_physnet_for_subnet_id(subnet_id)
        LOG.debug("Pod {} handling {}".format(pod_name, physnet))
        rq = self._get_port_request(pod, project_id, subnets, security_groups)

        port = neutron.create_port(rq).get('port')
        c_utils.tag_neutron_resources('ports', [port['id']])
        vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
        vif.physnet = physnet

        LOG.debug("{} vifs are available for the pod {}".format(
            amount, pod_name))

        self._reduce_remaining_sriov_vfs(pod)
        return vif
예제 #4
0
    def allocate_ip(self,
                    pub_net_id,
                    project_id,
                    pub_subnet_id=None,
                    description=None,
                    port_id_to_be_associated=None):
        os_net = clients.get_network_client()

        if port_id_to_be_associated is not None:
            floating_ips_list = os_net.ips(port_id=port_id_to_be_associated)
            for entry in floating_ips_list:
                if not entry:
                    continue
                if (entry['floating_ip_address']):
                    LOG.debug('FIP %s already allocated to port %s',
                              entry['floating_ip_address'],
                              port_id_to_be_associated)
                    return entry['id'], entry['floating_ip_address']

        try:
            fip = os_net.create_ip(floating_network_id=pub_net_id,
                                   project_id=project_id,
                                   subnet_id=pub_subnet_id,
                                   description=description)
        except os_exc.SDKException:
            LOG.exception("Failed to create floating IP - netid=%s ",
                          pub_net_id)
            raise
        utils.tag_neutron_resources([fip])
        return fip.id, fip.floating_ip_address
예제 #5
0
    def _create_lb_security_group_rule(self, loadbalancer, listener):
        neutron = clients.get_neutron_client()
        sg_id = self._find_listeners_sg(loadbalancer)
        # if an SG for the loadbalancer has not being created, create one
        if not sg_id:
            sg = neutron.create_security_group({
                'security_group': {
                    'name': loadbalancer.name,
                    'project_id': loadbalancer.project_id,
                    },
                })
            sg_id = sg['security_group']['id']
            c_utils.tag_neutron_resources('security-groups', [sg_id])
            loadbalancer.security_groups.append(sg_id)
            vip_port = self._get_vip_port(loadbalancer)
            neutron.update_port(
                vip_port.get('id'),
                {'port': {
                    'security_groups': [sg_id]}})

        try:
            neutron.create_security_group_rule({
                'security_group_rule': {
                    'direction': 'ingress',
                    'port_range_min': listener.port,
                    'port_range_max': listener.port,
                    'protocol': listener.protocol,
                    'security_group_id': sg_id,
                    'description': listener.name,
                },
            })
        except n_exc.NeutronClientException as ex:
            if ex.status_code != requests.codes.conflict:
                LOG.exception('Failed when creating security group rule '
                              'for listener %s.', listener.name)
    def create_namespace_sg(self, namespace, project_id, crd_spec):
        neutron = clients.get_neutron_client()

        sg_name = "ns/" + namespace + "-sg"
        # create the associated SG for the namespace
        try:
            # default namespace is different from the rest
            # Default allows traffic from everywhere
            # The rest can be accessed from the default one
            sg = neutron.create_security_group({
                "security_group": {
                    "name": sg_name,
                    "project_id": project_id
                }
            }).get('security_group')
            utils.tag_neutron_resources('security-groups', [sg['id']])
            neutron.create_security_group_rule({
                "security_group_rule": {
                    "direction": "ingress",
                    "remote_ip_prefix": crd_spec['subnetCIDR'],
                    "security_group_id": sg['id']
                }
            })
        except n_exc.NeutronClientException:
            LOG.exception(
                "Error creating security group for the namespace "
                "%s", namespace)
            raise
        return {'sgId': sg['id']}
예제 #7
0
    def request_vifs(self, pod, project_id, subnets, security_groups,
                     num_ports, trunk_ip=None):
        """This method creates subports and returns a list with their vifs.

        It creates up to num_ports subports and attaches them to the trunk
        port.

        If not enough vlan ids are available for all the subports to create,
        it creates as much as available vlan ids.

        Note the neutron trunk_add_subports is an atomic operation that will
        either attach all or none of the subports. Therefore, if there is a
        vlan id collision, all the created ports will be deleted and the
        exception is raised.
        """
        neutron = clients.get_neutron_client()
        if trunk_ip:
            parent_port = self._get_parent_port_by_host_ip(neutron, trunk_ip)
        else:
            parent_port = self._get_parent_port(neutron, pod)
        trunk_id = self._get_trunk_id(parent_port)

        port_rq, subports_info = self._create_subports_info(
            pod, project_id, subnets, security_groups,
            trunk_id, num_ports, unbound=True)

        if not subports_info:
            LOG.error("There are no vlan ids available to create subports")
            return []

        bulk_port_rq = {'ports': [port_rq] * len(subports_info)}
        try:
            ports = neutron.create_port(bulk_port_rq).get('ports')
        except n_exc.NeutronClientException:
            LOG.exception("Error creating bulk ports: %s", bulk_port_rq)
            raise
        utils.tag_neutron_resources('ports', [port['id'] for port in ports])

        for index, port in enumerate(ports):
            subports_info[index]['port_id'] = port['id']

        try:
            try:
                neutron.trunk_add_subports(trunk_id,
                                           {'sub_ports': subports_info})
            except n_exc.Conflict:
                LOG.error("vlan ids already in use on trunk")
                for port in ports:
                    neutron.delete_port(port['id'])
                raise
        except n_exc.NeutronClientException:
            LOG.exception("Error happened during subport addition to trunk")
            raise

        vifs = []
        for index, port in enumerate(ports):
            vlan_id = subports_info[index]['segmentation_id']
            vif = ovu.neutron_to_osvif_vif_nested_vlan(port, subnets, vlan_id)
            vifs.append(vif)
        return vifs
예제 #8
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        pod_name = pod['metadata']['name']
        os_net = clients.get_network_client()
        vif_plugin = 'sriov'
        subnet_id = next(iter(subnets))
        physnet = self._get_physnet_for_subnet_id(subnet_id)
        LOG.debug("Pod {} handling {}".format(pod_name, physnet))

        amount = self._get_remaining_sriov_vfs(pod, physnet)
        if not amount:
            LOG.error("SRIOV VIF request failed due to lack of "
                      "available VFs for the current pod creation")
            return None
        rq = self._get_port_request(pod, project_id,
                                    subnets, security_groups)

        port = os_net.create_port(**rq)
        self._check_port_binding([port])
        if not self._tag_on_creation:
            c_utils.tag_neutron_resources([port])
        vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
        vif.physnet = physnet
        vif.pod_name = pod_name
        vif.pod_link = pod['metadata']['selfLink']

        LOG.debug("{} vifs are available for the pod {}".format(
            amount, pod_name))

        self._reduce_remaining_sriov_vfs(pod, physnet)
        return vif
예제 #9
0
    def create_security_group(self, knp, project_id):
        sg_name = ("sg-" + knp['metadata']['namespace'] + "-" +
                   knp['metadata']['name'])
        desc = ("Kuryr-Kubernetes Network Policy %s SG" %
                utils.get_res_unique_name(knp))
        try:
            # Create initial security group
            sg = self.os_net.create_security_group(name=sg_name,
                                                   project_id=project_id,
                                                   description=desc)
            driver_utils.tag_neutron_resources([sg])
            # NOTE(dulek): Neutron populates every new SG with two rules
            #              allowing egress on IPv4 and IPv6. This collides with
            #              how network policies are supposed to work, because
            #              initially even egress traffic should be blocked.
            #              To work around this we will delete those two SG
            #              rules just after creation.
            for sgr in sg.security_group_rules:
                self.os_net.delete_security_group_rule(sgr['id'])
        except (os_exc.SDKException, exceptions.ResourceNotReady):
            LOG.exception("Error creating security group for network policy "
                          " %s", knp['metadata']['name'])
            raise

        return sg.id
예제 #10
0
    def create_network(self, ns_name, project_id):
        os_net = clients.get_network_client()
        net_name = 'ns/' + ns_name + '-net'
        tags = oslo_cfg.CONF.neutron_defaults.resource_tags
        if tags:
            networks = os_net.networks(name=net_name, tags=tags)
        else:
            networks = os_net.networks(name=net_name)

        try:
            # NOTE(ltomasbo): only one network must exists
            return next(networks).id
        except StopIteration:
            LOG.debug('Network does not exist. Creating.')

        # create network with namespace as name
        try:
            neutron_net = os_net.create_network(name=net_name,
                                                project_id=project_id)
            c_utils.tag_neutron_resources([neutron_net])
        except os_exc.SDKException:
            LOG.exception(
                "Error creating neutron resources for the namespace "
                "%s", ns_name)
            raise
        return neutron_net.id
예제 #11
0
    def create_subnet(self, ns_name, project_id, net_id):
        os_net = clients.get_network_client()
        subnet_name = "ns/" + ns_name + "-subnet"
        tags = oslo_cfg.CONF.neutron_defaults.resource_tags
        if tags:
            subnets = os_net.subnets(name=subnet_name, tags=tags)
        else:
            subnets = os_net.subnets(name=subnet_name)

        try:
            # NOTE(ltomasbo): only one subnet must exists
            subnet = next(subnets)
            return subnet.id, subnet.cidr
        except StopIteration:
            LOG.debug('Subnet does not exist. Creating.')

        # create subnet with namespace as name
        subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool
        ip_version = utils.get_subnetpool_version(subnet_pool_id)
        try:
            neutron_subnet = (os_net
                              .create_subnet(network_id=net_id,
                                             ip_version=ip_version,
                                             name=subnet_name,
                                             enable_dhcp=False,
                                             subnetpool_id=subnet_pool_id,
                                             project_id=project_id))
        except os_exc.ConflictException:
            LOG.debug("Max number of retries on neutron side achieved, "
                      "raising ResourceNotReady to retry subnet creation "
                      "for %s", subnet_name)
            raise exceptions.ResourceNotReady(subnet_name)
        c_utils.tag_neutron_resources([neutron_subnet])

        return neutron_subnet.id, neutron_subnet.cidr
예제 #12
0
    def create_security_group(self, knp, project_id):
        sg_name = driver_utils.get_resource_name(knp['metadata']['namespace'] +
                                                 '-' +
                                                 knp['metadata']['name'],
                                                 prefix='sg/')
        desc = ("Kuryr-Kubernetes Network Policy %s SG" %
                utils.get_res_unique_name(knp))
        try:
            # Create initial security group
            sg = self.os_net.create_security_group(name=sg_name,
                                                   project_id=project_id,
                                                   description=desc)
            driver_utils.tag_neutron_resources([sg])
            # NOTE(dulek): Neutron populates every new SG with two rules
            #              allowing egress on IPv4 and IPv6. This collides with
            #              how network policies are supposed to work, because
            #              initially even egress traffic should be blocked.
            #              To work around this we will delete those two SG
            #              rules just after creation.
            for sgr in sg.security_group_rules:
                self.os_net.delete_security_group_rule(sgr['id'])
        except (os_exc.SDKException, exceptions.ResourceNotReady) as exc:
            np = utils.get_referenced_object(knp, 'NetworkPolicy')
            if np:
                self.kubernetes.add_event(np, 'FailedToAddSecurityGroup',
                                          f'Adding new security group or '
                                          f'security group rules for '
                                          f'corresponding network policy has '
                                          f'failed: {exc}', 'Warning')
            LOG.exception("Error creating security group for network policy "
                          " %s", knp['metadata']['name'])
            raise

        return sg.id
예제 #13
0
    def request_vifs(self, pod, project_id, subnets, security_groups,
                     num_ports):
        os_net = clients.get_network_client()

        rq = self._get_port_request(pod,
                                    project_id,
                                    subnets,
                                    security_groups,
                                    unbound=True)

        bulk_port_rq = {'ports': [rq] * num_ports}
        try:
            ports = list(os_net.create_ports(bulk_port_rq))
        except os_exc.SDKException:
            LOG.exception("Error creating bulk ports: %s", bulk_port_rq)
            raise

        vif_plugin = ports[0].binding_vif_type

        # NOTE(ltomasbo): Due to the bug (1696051) on neutron bulk port
        # creation request returning the port objects without binding
        # information, an additional port show is performed to get the binding
        # information
        if vif_plugin == 'unbound':
            port_info = os_net.get_port(ports[0].id)
            vif_plugin = port_info.binding_vif_type

        self._check_port_binding(ports)
        if not self._tag_on_creation:
            utils.tag_neutron_resources(ports)
        vifs = []
        for port in ports:
            vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
            vifs.append(vif)
        return vifs
예제 #14
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        os_net = clients.get_network_client()

        rq = self._get_port_request(pod, project_id, subnets, security_groups)
        port = os_net.create_port(**rq)
        utils.tag_neutron_resources([port])

        return ovu.neutron_to_osvif_vif(port.binding_vif_type, port, subnets)
예제 #15
0
    def create_security_group_rules_from_network_policy(self, policy,
                                                        project_id):
        """Create initial security group and rules

        This method creates the initial security group for hosting security
        group rules coming out of network policies' parsing.
        """
        sg_name = ("sg-" + policy['metadata']['namespace'] + "-" +
                   policy['metadata']['name'])
        security_group_body = {
            "security_group":
                {
                    "name": sg_name,
                    "project_id": project_id,
                    "description": "Kuryr-Kubernetes NetPolicy SG"
                }
        }
        sg = None
        try:
            # Create initial security group
            sg = self.neutron.create_security_group(body=security_group_body)
            sg_id = sg['security_group']['id']
            driver_utils.tag_neutron_resources('security-groups', [sg_id])
            i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id)
            for i_rule in i_rules:
                sgr_id = driver_utils.create_security_group_rule(i_rule)
                i_rule['security_group_rule']['id'] = sgr_id

            for e_rule in e_rules:
                sgr_id = driver_utils.create_security_group_rule(e_rule)
                e_rule['security_group_rule']['id'] = sgr_id

            # Add default rules to allow traffic from host and svc subnet
            self._add_default_np_rules(sg_id)
        except (n_exc.NeutronClientException, exceptions.ResourceNotReady):
            LOG.exception("Error creating security group for network policy "
                          " %s", policy['metadata']['name'])
            # If there's any issue creating sg rules, remove them
            if sg:
                self.neutron.delete_security_group(sg['security_group']['id'])
            raise
        try:
            self._add_kuryrnetpolicy_crd(policy, project_id,
                                         sg['security_group']['id'], i_rules,
                                         e_rules)
        except exceptions.K8sClientException:
            LOG.exception("Rolling back security groups")
            # Same with CRD creation
            self.neutron.delete_security_group(sg['security_group']['id'])
            raise
        try:
            crd = self.get_kuryrnetpolicy_crd(policy)
            self.kubernetes.annotate(policy['metadata']['selfLink'],
                                     {"kuryrnetpolicy_selfLink":
                                      crd['metadata']['selfLink']})
        except exceptions.K8sClientException:
            LOG.exception('Error annotating network policy')
            raise
예제 #16
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        neutron = clients.get_neutron_client()

        rq = self._get_port_request(pod, project_id, subnets, security_groups)
        port = neutron.create_port(rq).get('port')
        utils.tag_neutron_resources('ports', [port['id']])
        vif_plugin = self._get_vif_plugin(port)

        return ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
예제 #17
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        os_net = clients.get_network_client()
        parent_port = self._get_parent_port(pod)
        trunk_id = self._get_trunk_id(parent_port)

        rq = self._get_port_request(pod, project_id, subnets, security_groups)
        port = os_net.create_port(**rq)
        utils.tag_neutron_resources([port])
        vlan_id = self._add_subport(trunk_id, port.id)

        return ovu.neutron_to_osvif_vif_nested_vlan(port, subnets, vlan_id)
예제 #18
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        neutron = clients.get_neutron_client()
        parent_port = self._get_parent_port(neutron, pod)
        trunk_id = self._get_trunk_id(parent_port)

        rq = self._get_port_request(pod, project_id, subnets, security_groups)
        port = neutron.create_port(rq).get('port')
        utils.tag_neutron_resources('ports', [port['id']])
        vlan_id = self._add_subport(neutron, trunk_id, port['id'])

        return ovu.neutron_to_osvif_vif_nested_vlan(port, subnets, vlan_id)
예제 #19
0
    def create_namespace_network(self, namespace, project_id):
        neutron = clients.get_neutron_client()

        router_id = oslo_cfg.CONF.namespace_subnet.pod_router
        subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool

        # create network with namespace as name
        network_name = "ns/" + namespace + "-net"
        subnet_name = "ns/" + namespace + "-subnet"
        try:
            neutron_net = neutron.create_network(
                {
                    "network": {
                        "name": network_name,
                        "project_id": project_id
                    }
                }).get('network')
            c_utils.tag_neutron_resources('networks', [neutron_net['id']])

            # create a subnet within that network
            try:
                neutron_subnet = neutron.create_subnet(
                    {
                        "subnet": {
                            "network_id": neutron_net['id'],
                            "ip_version": 4,
                            "name": subnet_name,
                            "enable_dhcp": False,
                            "subnetpool_id": subnet_pool_id,
                            "project_id": project_id
                        }
                    }).get('subnet')
            except n_exc.Conflict:
                LOG.debug("Max number of retries on neutron side achieved, "
                          "raising ResourceNotReady to retry subnet creation "
                          "for %s", subnet_name)
                raise exceptions.ResourceNotReady(subnet_name)
            c_utils.tag_neutron_resources('subnets', [neutron_subnet['id']])

            # connect the subnet to the router
            neutron.add_interface_router(router_id,
                                         {"subnet_id": neutron_subnet['id']})
        except n_exc.NeutronClientException:
            LOG.exception("Error creating neutron resources for the namespace "
                          "%s", namespace)
            raise
        return {'netId': neutron_net['id'],
                'routerId': router_id,
                'subnetId': neutron_subnet['id'],
                'subnetCIDR': neutron_subnet['cidr']}
    def request_vif(self, pod, project_id, subnets, security_groups):
        neutron = clients.get_neutron_client()
        req = self._get_port_request(pod, project_id, subnets,
                                     security_groups)
        vm_port = self._get_parent_port(neutron, pod)
        container_port = neutron.create_port(req).get('port')
        utils.tag_neutron_resources('ports', [container_port['id']])

        container_mac = container_port['mac_address']
        container_ips = frozenset(entry['ip_address'] for entry in
                                  container_port['fixed_ips'])

        with self.lock:
            self._add_to_allowed_address_pairs(neutron, vm_port,
                                               container_ips, container_mac)

        return ovu.neutron_to_osvif_vif_nested_macvlan(container_port, subnets)
예제 #21
0
    def create_namespace_network(self, namespace, project_id):
        os_net = clients.get_network_client()

        router_id = oslo_cfg.CONF.namespace_subnet.pod_router
        subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool

        # create network with namespace as name
        network_name = "ns/" + namespace + "-net"
        subnet_name = "ns/" + namespace + "-subnet"
        try:
            neutron_net = os_net.create_network(name=network_name,
                                                project_id=project_id)
            c_utils.tag_neutron_resources([neutron_net])

            # create a subnet within that network
            try:
                neutron_subnet = (os_net
                                  .create_subnet(network_id=neutron_net.id,
                                                 ip_version=4,
                                                 name=subnet_name,
                                                 enable_dhcp=False,
                                                 subnetpool_id=subnet_pool_id,
                                                 project_id=project_id))
            except os_exc.ConflictException:
                LOG.debug("Max number of retries on neutron side achieved, "
                          "raising ResourceNotReady to retry subnet creation "
                          "for %s", subnet_name)
                raise exceptions.ResourceNotReady(subnet_name)
            c_utils.tag_neutron_resources([neutron_subnet])

            # connect the subnet to the router
            clients.handle_neutron_errors(os_net.add_interface_to_router,
                                          router_id,
                                          subnet_id=neutron_subnet.id)
        except os_exc.SDKException:
            LOG.exception("Error creating neutron resources for the namespace "
                          "%s", namespace)
            raise
        return {'netId': neutron_net.id,
                'routerId': router_id,
                'subnetId': neutron_subnet.id,
                'subnetCIDR': neutron_subnet.cidr}
예제 #22
0
    def create_network(self, ns, project_id):
        os_net = clients.get_network_client()
        ns_name = ns['metadata']['name']
        ns_uid = ns['metadata']['uid']
        net_name = c_utils.get_resource_name(ns_name)
        old_net_name = c_utils.get_resource_name(ns_name, prefix='ns/',
                                                 suffix='-net')
        # TODO(gryf): remove old_net_name support in next release, and precise
        # the query by adding additional query parameter 'description' which
        # should contain namespace uid.
        networks = os_net.networks(name=(net_name, old_net_name))

        try:
            # NOTE(ltomasbo): only one network must exists
            net = next(networks)
            if net.name == net_name and net.description != ns_uid:
                # this condition would be unnecessary when guard for old names
                # would be eventually removed.
                raise ValueError
            # NOTE(gryf): It might happen, that network has been created, but
            # for some reason tagging has failed.
            if TAGS and not set(TAGS).issubset(set(net.tags)):
                c_utils.tag_neutron_resources([net], exceptions=True)
            return net.id
        except (StopIteration, ValueError):
            LOG.debug('Network does not exist. Creating.')

        mtu_cfg = oslo_cfg.CONF.neutron_defaults.network_device_mtu
        attrs = {'name': net_name, 'project_id': project_id,
                 'description': ns_uid}
        if mtu_cfg:
            attrs['mtu'] = mtu_cfg

        try:
            net = os_net.create_network(**attrs)
        except os_exc.SDKException:
            LOG.exception("Error creating neutron resources for the namespace "
                          "%s", ns_name)
            raise
        c_utils.tag_neutron_resources([net], exceptions=True)
        return net.id
예제 #23
0
    def allocate_ip(self,
                    pub_net_id,
                    project_id,
                    pub_subnet_id=None,
                    description=None,
                    port_id_to_be_associated=None):
        neutron = clients.get_neutron_client()

        if port_id_to_be_associated is not None:
            floating_ips_list = neutron.list_floatingips(
                port_id=port_id_to_be_associated)
            for entry in floating_ips_list['floatingips']:
                if not entry:
                    continue
                if (entry['floating_ip_address']):
                    LOG.debug('FIP %s already allocated to port %s',
                              entry['floating_ip_address'],
                              port_id_to_be_associated)
                    return entry['id'], entry['floating_ip_address']

        request = {
            'floatingip': {
                'tenant_id': project_id,
                'project_id': project_id,
                'floating_network_id': pub_net_id
            }
        }

        if pub_subnet_id is not None:
            request['floatingip']['subnet_id'] = pub_subnet_id
        if description is not None:
            request['floatingip']['description'] = description

        try:
            fip = neutron.create_floatingip(request).get('floatingip')
        except n_exc.NeutronClientException:
            LOG.exception("Failed to create floating IP - netid=%s ",
                          pub_net_id)
            raise
        utils.tag_neutron_resources('networks', [fip['id']])
        return fip['id'], fip['floating_ip_address']
예제 #24
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        os_net = clients.get_network_client()
        req = self._get_port_request(pod, project_id, subnets, security_groups)
        attempts = kuryr_config.CONF.pod_vif_nested.rev_update_attempts
        container_port = None
        while attempts > 0:
            vm_port = self._get_parent_port(pod)

            if not container_port:
                container_port = os_net.create_port(**req)
            utils.tag_neutron_resources([container_port])

            container_mac = container_port.mac_address
            container_ips = frozenset(entry['ip_address']
                                      for entry in container_port.fixed_ips)

            attempts = self._try_update_port(
                attempts, self._add_to_allowed_address_pairs, vm_port,
                container_ips, container_mac)

        return ovu.neutron_to_osvif_vif_nested_macvlan(container_port, subnets)
예제 #25
0
    def create_subnet(self, ns, project_id, net_id):
        os_net = clients.get_network_client()
        ns_name = ns['metadata']['name']

        # NOTE(gryf): assumption is, that all the subnets (well, currently
        # only one) in specific k8s namespaces are under exactly one network,
        # which have proper namespace uid in its description, so there is no
        # need to put it on the subnet as well.
        subnet_name = c_utils.get_resource_name(ns_name)
        subnets = os_net.subnets(network_id=net_id)

        try:
            # NOTE(ltomasbo): only one subnet must exists
            subnet = next(subnets)
            # NOTE(gryf): same situation as in networks.
            if TAGS and not set(TAGS).issubset(set(subnet.tags)):
                c_utils.tag_neutron_resources([subnet], exceptions=True)
            return subnet.id, subnet.cidr
        except StopIteration:
            LOG.debug('Subnet does not exist. Creating.')

        # create subnet with namespace as name
        subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool
        ip_version = utils.get_subnetpool_version(subnet_pool_id)
        try:
            neutron_subnet = (os_net
                              .create_subnet(network_id=net_id,
                                             ip_version=ip_version,
                                             name=subnet_name,
                                             enable_dhcp=False,
                                             subnetpool_id=subnet_pool_id,
                                             project_id=project_id))
        except os_exc.ConflictException:
            LOG.debug("Max number of retries on neutron side achieved, "
                      "raising ResourceNotReady to retry subnet creation "
                      "for %s", subnet_name)
            raise exceptions.ResourceNotReady(subnet_name)
        c_utils.tag_neutron_resources([neutron_subnet], exceptions=True)

        return neutron_subnet.id, neutron_subnet.cidr
예제 #26
0
    def _extend_lb_security_group_rules(self, loadbalancer, listener):
        neutron = clients.get_neutron_client()

        if CONF.octavia_defaults.sg_mode == 'create':
            sg_id = self._find_listeners_sg(loadbalancer)
            # if an SG for the loadbalancer has not being created, create one
            if not sg_id:
                sg = neutron.create_security_group({
                    'security_group': {
                        'name': loadbalancer.name,
                        'project_id': loadbalancer.project_id,
                    },
                })
                sg_id = sg['security_group']['id']
                c_utils.tag_neutron_resources('security-groups', [sg_id])
                loadbalancer.security_groups.append(sg_id)
                vip_port = self._get_vip_port(loadbalancer)
                neutron.update_port(vip_port.get('id'), {
                    'port': {
                        'security_groups': loadbalancer.security_groups
                    }
                })
        else:
            sg_id = self._get_vip_port(loadbalancer).get('security_groups')[0]
            # wait until octavia adds default sg rules
            self._remove_default_octavia_rules(sg_id, listener)

        for sg in loadbalancer.security_groups:
            if sg != sg_id:
                try:
                    neutron.create_security_group_rule({
                        'security_group_rule': {
                            'direction': 'ingress',
                            'port_range_min': listener.port,
                            'port_range_max': listener.port,
                            'protocol': listener.protocol,
                            'security_group_id': sg_id,
                            'remote_group_id': sg,
                            'description': listener.name,
                        },
                    })
                except n_exc.NeutronClientException as ex:
                    if ex.status_code != requests.codes.conflict:
                        LOG.exception(
                            'Failed when creating security group '
                            'rule for listener %s.', listener.name)

        # ensure routes have access to the services
        service_subnet_cidr = utils.get_subnet_cidr(loadbalancer.subnet_id)
        try:
            # add access from service subnet
            neutron.create_security_group_rule({
                'security_group_rule': {
                    'direction': 'ingress',
                    'port_range_min': listener.port,
                    'port_range_max': listener.port,
                    'protocol': listener.protocol,
                    'security_group_id': sg_id,
                    'remote_ip_prefix': service_subnet_cidr,
                    'description': listener.name,
                },
            })

            # add access from worker node VM subnet for non-native route
            # support
            worker_subnet_id = CONF.pod_vif_nested.worker_nodes_subnet
            if worker_subnet_id:
                worker_subnet_cidr = utils.get_subnet_cidr(worker_subnet_id)
                neutron.create_security_group_rule({
                    'security_group_rule': {
                        'direction': 'ingress',
                        'port_range_min': listener.port,
                        'port_range_max': listener.port,
                        'protocol': listener.protocol,
                        'security_group_id': sg_id,
                        'remote_ip_prefix': worker_subnet_cidr,
                        'description': listener.name,
                    },
                })
        except n_exc.NeutronClientException as ex:
            if ex.status_code != requests.codes.conflict:
                LOG.exception(
                    'Failed when creating security group rule '
                    'to enable routes for listener %s.', listener.name)
예제 #27
0
    def create_security_group_rules_from_network_policy(self, policy,
                                                        project_id):
        """Create initial security group and rules

        This method creates the initial security group for hosting security
        group rules coming out of network policies' parsing.
        """
        sg_name = ("sg-" + policy['metadata']['namespace'] + "-" +
                   policy['metadata']['name'])
        security_group_body = {
            "security_group":
                {
                    "name": sg_name,
                    "project_id": project_id,
                    "description": "Kuryr-Kubernetes NetPolicy SG"
                }
        }
        sg = None
        try:
            # Create initial security group
            sg = self.neutron.create_security_group(body=security_group_body)
            sg_id = sg['security_group']['id']
            driver_utils.tag_neutron_resources('security-groups', [sg_id])
            # NOTE(dulek): Neutron populates every new SG with two rules
            #              allowing egress on IPv4 and IPv6. This collides with
            #              how network policies are supposed to work, because
            #              initially even egress traffic should be blocked.
            #              To work around this we will delete those two SG
            #              rules just after creation.
            for sgr in sg['security_group']['security_group_rules']:
                self.neutron.delete_security_group_rule(sgr['id'])

            i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id)
            for i_rule in i_rules:
                sgr_id = driver_utils.create_security_group_rule(i_rule)
                i_rule['security_group_rule']['id'] = sgr_id

            for e_rule in e_rules:
                sgr_id = driver_utils.create_security_group_rule(e_rule)
                e_rule['security_group_rule']['id'] = sgr_id

            # Add default rules to allow traffic from host and svc subnet
            self._add_default_np_rules(sg_id)
        except (n_exc.NeutronClientException, exceptions.ResourceNotReady,
                os_exc.ResourceNotFound):
            LOG.exception("Error creating security group for network policy "
                          " %s", policy['metadata']['name'])
            # If there's any issue creating sg rules, remove them
            if sg:
                self.neutron.delete_security_group(sg['security_group']['id'])
            raise
        try:
            self._add_kuryrnetpolicy_crd(policy, project_id,
                                         sg['security_group']['id'], i_rules,
                                         e_rules)
        except exceptions.K8sClientException:
            LOG.exception("Rolling back security groups")
            # Same with CRD creation
            self.neutron.delete_security_group(sg['security_group']['id'])
            raise
        try:
            crd = self.get_kuryrnetpolicy_crd(policy)
            self.kubernetes.annotate(policy['metadata']['selfLink'],
                                     {"kuryrnetpolicy_selfLink":
                                      crd['metadata']['selfLink']})
        except exceptions.K8sClientException:
            LOG.exception('Error annotating network policy')
            raise
예제 #28
0
    def request_vifs(self,
                     pod,
                     project_id,
                     subnets,
                     security_groups,
                     num_ports,
                     semaphore,
                     trunk_ip=None):
        """This method creates subports and returns a list with their vifs.

        It creates up to num_ports subports and attaches them to the trunk
        port.

        If not enough vlan ids are available for all the subports to create,
        it creates as much as available vlan ids.

        Note the os_net add_trunk_subports is an atomic operation that will
        either attach all or none of the subports. Therefore, if there is a
        vlan id collision, all the created ports will be deleted and the
        exception is raised.
        """
        os_net = clients.get_network_client()
        if trunk_ip:
            parent_port = self._get_parent_port_by_host_ip(trunk_ip)
        else:
            parent_port = self._get_parent_port(pod)
        trunk_id = self._get_trunk_id(parent_port)

        port_rq, subports_info = self._create_subports_info(pod,
                                                            project_id,
                                                            subnets,
                                                            security_groups,
                                                            trunk_id,
                                                            num_ports,
                                                            unbound=True)

        if not subports_info:
            LOG.error("There are no vlan ids available to create subports")
            return []

        bulk_port_rq = {'ports': [port_rq] * len(subports_info)}
        # restrict amount of create Ports in bulk that might be running
        # in parallel.
        with semaphore:
            try:
                ports = list(os_net.create_ports(bulk_port_rq))
            except os_exc.SDKException:
                for subport_info in subports_info:
                    self._release_vlan_id(subport_info['segmentation_id'])
                LOG.exception("Error creating bulk ports: %s", bulk_port_rq)
                raise
        self._check_port_binding(ports)
        if not self._tag_on_creation:
            utils.tag_neutron_resources(ports)

        for index, port in enumerate(ports):
            subports_info[index]['port_id'] = port['id']

        try:
            try:
                os_net.add_trunk_subports(trunk_id, subports_info)
            except os_exc.ConflictException:
                LOG.error("vlan ids already in use on trunk")
                utils.delete_ports(ports)
                for subport_info in subports_info:
                    self._release_vlan_id(subport_info['segmentation_id'])
                return []
        except os_exc.SDKException:
            LOG.exception("Error happened during subport addition to trunk")
            utils.delete_ports(ports)
            for subport_info in subports_info:
                self._release_vlan_id(subport_info['segmentation_id'])
            return []

        vifs = []
        for index, port in enumerate(ports):
            vlan_id = subports_info[index]['segmentation_id']
            vif = ovu.neutron_to_osvif_vif_nested_vlan(port, subnets, vlan_id)
            vifs.append(vif)
        return vifs