def test_ensure_host_tunnel_addr_bad_ip(self, m_client, m_assign_host_tunnel_addr, m_get_tunnel_host_ip): m_get_tunnel_host_ip.return_value = IPAddress("11.0.0.1") ipv4_pools = [IPPool("10.0.0.0/16"), IPPool("10.1.0.0/16", ipip=True)] ipip_pools = [IPPool("10.1.0.0/16", ipip=True)] startup._ensure_host_tunnel_addr(ipv4_pools, ipip_pools) assert_equal(m_assign_host_tunnel_addr.mock_calls, [call(ipip_pools)])
def test_ensure_host_tunnel_addr_no_ip(self, m_hostname, m_client, m_assign_host_tunnel_addr, m_get_tunnel_host_ip): m_get_tunnel_host_ip.return_value = None ipv4_pools = [IPPool("10.0.0.0/16"), IPPool("10.1.0.0/16", ipip=True)] ipip_pools = [IPPool("10.1.0.0/16", ipip=True)] calico_ctl.node._ensure_host_tunnel_addr(ipv4_pools, ipip_pools) assert_equal(m_assign_host_tunnel_addr.mock_calls, [call(ipip_pools)])
def test_assign_host_tunnel_addr_none_available(self, m_client, m_exit): # First pool full, IP allocated from second pool. m_client.auto_assign_ips.side_effect = iter([([], []), ([], [])]) ipip_pools = [ IPPool("10.1.0.0/16", ipip=True), IPPool("10.0.0.0/16", ipip=True) ] m_exit.side_effect = Exception assert_raises(Exception, startup._assign_host_tunnel_addr, ipip_pools) assert_equal(m_exit.mock_calls, [call(1)])
def test_assign_host_tunnel_addr(self, m_client): startup.hostname = "host" # First pool full, IP allocated from second pool. m_client.auto_assign_ips.side_effect = iter([([], []), ([IPAddress("10.0.0.1")], [])]) ipip_pools = [ IPPool("10.1.0.0/16", ipip=True), IPPool("10.0.0.0/16", ipip=True) ] startup._assign_host_tunnel_addr(ipip_pools) assert_equal(m_client.set_per_host_config.mock_calls, [call("host", "IpInIpTunnelAddr", "10.0.0.1")])
def get_ip_pool_config(self, version, cidr): """ Get the configuration for the given pool. :param version: "v4" for IPv4, "v6" for IPv6 :param pool: IPNetwork object representing the pool :return: An IPPool object. """ assert version in ("v4", "v6") assert isinstance(cidr, IPNetwork) # Normalize to CIDR format (i.e. 10.1.1.1/8 goes to 10.0.0.0/8) cidr = cidr.cidr key = IP_POOL_KEY % { "version": version, "pool": str(cidr).replace("/", "-") } try: data = self.etcd_client.read(key).value except EtcdKeyNotFound: # Re-raise with a better error message. raise KeyError("%s is not a configured IP pool." % cidr) return IPPool.from_json(data)
def ip_pool_range_add(start_ip, end_ip, version, ipip, masquerade): """ Add the range of ip addresses as CIDRs to the IP address allocation pool. :param start_ip: The first ip address the ip range. :param end_ip: The last ip address in the ip range. :param version: 4 or 6 :param ipip: Use IP in IP for this pool. :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) ip_range = IPRange(start_ip, end_ip) pools = client.get_ip_pools(version) for pool in pools: pool_net = IPNetwork(pool.cidr) # Reject the new ip range if any of the following are true: # - The new ip range contains all ips of any existing pool # - An existing pool overlaps ips with the start of the new ip range # - An existing pool overlaps ips with the end of the new ip range if (pool_net in ip_range or start_ip in pool_net or end_ip in pool_net): print "Cannot add range - range conflicts with pool %s" % pool.cidr sys.exit(1) cidrs = netaddr.iprange_to_cidrs(start_ip, end_ip) for ip_net in cidrs: new_pool = IPPool(ip_net.cidr, ipip=ipip, masquerade=masquerade) client.add_ip_pool(version, new_pool)
def get_ip_pools(self, version, ipam=None): """ Get the configured IP pools. :param version: 4 for IPv4, 6 for IPv6 :param ipam: Filter on the ipam flag. If None, all IP Pools are returned. If False, only pools that are not used by Calico IPAM are returned. If True, only pools that are used by Calico IPAM are returned. :return: List of IPPool. """ assert version in (4, 6) pool_path = IP_POOLS_PATH % {"version": str(version)} try: leaves = self.etcd_client.read(pool_path, recursive=True).leaves except EtcdKeyNotFound: # Path doesn't exist. pools = [] else: # Convert the leaf values to IPPools. We need to handle an empty # leaf value because when no pools are configured the recursive # read returns the parent directory. pools = [IPPool.from_json(leaf.value) for leaf in leaves if leaf.value] # If required, filter out pools that are not used for Calico IPAM. if ipam is not None: pools = [pool for pool in pools if pool.ipam == ipam] return pools
def get_ip_and_pool(ip): if ip.lower() in ("ipv4", "ipv6"): if ip[-1] == '4': result = assign_any(1, 0) ip = result[0][0] else: result = assign_any(0, 1) ip = result[1][0] pool = get_pool_or_exit(ip) elif ip is not None and '/' in ip: pool = IPPool(ip) if IPNetwork(ip).version == 4: result = assign_any(1, 0, pool=(pool, None)) ip = result[0][0] else: result = assign_any(0, 1, pool=(None, pool)) ip = result[1][0] else: # Check the IP is in the allocation pool. If it isn't, BIRD won't # export it. ip = IPAddress(ip) pool = get_pool_or_exit(ip) # Assign the IP try: client.assign_ip(ip, None, {}) except AlreadyAssignedError: print_paragraph("IP address is already assigned in pool " "%s." % pool) sys.exit(1) return (ip, pool)
def create_network(): # force is required since the request doesn't have the correct mimetype # If the JSON is malformed, then a BadRequest exception is raised, # which returns a HTTP 400 response. json_data = request.get_json(force=True) app.logger.debug("CreateNetwork JSON=%s", json_data) # Create the CNM "network" as a Calico profile. network_id = json_data["NetworkID"] app.logger.info("Creating profile %s", network_id) client.create_profile(network_id) # Create a calico Pool for the CNM pool that was passed in. for version in (4, 6): ip_data = json_data["IPv%sData" % version] if ip_data: client.add_ip_pool(version, IPPool(ip_data[0]['Pool'])) # Store off the JSON passed in on this request. It's required in later calls # - CreateEndpoint needs it for the gateway address. # - DeleteNetwork needs it to clean up the pool. client.write_network(network_id, json_data) app.logger.debug("CreateNetwork response JSON=%s", "{}") return jsonify({})
def test_container_remove(self, m_netns, m_client, m_get_container_id, m_enforce_root): """ Test for container_remove of calicoctl container command """ # Set up mock objects m_get_container_id.return_value = 666 ipv4_nets = set() ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1'))) ipv6_nets = set() m_endpoint = Mock(spec=Endpoint) m_endpoint.ipv4_nets = ipv4_nets m_endpoint.ipv6_nets = ipv6_nets m_endpoint.endpoint_id = 12 m_endpoint.name = "eth1234" ippool = IPPool('1.1.1.1/24') m_client.get_endpoint.return_value = m_endpoint m_client.get_ip_pools.return_value = [ippool] # Call method under test container.container_remove('container1') # Assert m_enforce_root.assert_called_once_with() m_get_container_id.assert_called_once_with('container1') m_client.get_endpoint.assert_called_once_with( hostname=utils.hostname, orchestrator_id=utils.ORCHESTRATOR_ID, workload_id=666) self.assertEqual(m_client.unassign_address.call_count, 1) m_netns.remove_veth.assert_called_once_with("eth1234") m_client.remove_workload.assert_called_once_with( utils.hostname, utils.ORCHESTRATOR_ID, 666)
def test_container_remove_veth_error(self, m_client, m_get_container_id, m_enforce_root, m_remove_veth): """ Test for container_remove when remove_veth throws an error Assert that the system exits and workload is not removed. """ # Set up mock objects ipv4_nets = set() ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1'))) ipv6_nets = set() m_endpoint = Mock(spec=Endpoint) m_endpoint.ipv4_nets = ipv4_nets m_endpoint.ipv6_nets = ipv6_nets m_endpoint.endpoint_id = 12 m_endpoint.name = "eth1234" ippool = IPPool('1.1.1.1/24') m_client.get_endpoint.return_value = m_endpoint m_client.get_ip_pools.return_value = [ippool] m_get_container_id.return_value = 52 m_remove_veth.side_effect = CalledProcessError(1, "test") # Call function under test expecting a SystemExit self.assertRaises(SystemExit, container.container_remove, 'container1') # Assert m_enforce_root.assert_called_once_with() m_get_container_id.assert_called_once_with("container1") m_client.get_endpoint.assert_called_once_with( hostname=utils.hostname, orchestrator_id=utils.ORCHESTRATOR_ID, workload_id=52) m_client.get_ip_pools.assert_called_once_with(4) self.assertFalse(m_client.remove_workload.called)
def get_ip_and_pool(ip): if ip.lower() in ("ipv4", "ipv6"): if '4' in ip: result = assign_any(1, 0) ip = result[0][0] else: result = assign_any(0, 1) ip = result[1][0] pool = get_pool_or_exit(ip) elif ip is not None and '/' in ip: pool = IPPool(ip) if IPNetwork(ip).version == 4: result = assign_any(1, 0, pool=(pool, None)) ip = result[0][0] else: result = assign_any(0, 1, pool=(None, pool)) ip = result[1][0] else: # Check the IP is in the allocation pool. If it isn't, BIRD won't # export it. ip = IPAddress(ip) pool = get_pool_or_exit(ip) # Assign the IP if not client.assign_address(pool, ip): print "IP address is already assigned in pool %s " % pool sys.exit(1) return (ip, pool)
def ip_pool_add(cidrs, version, ipip, masquerade): """ Add the given CIDRS to the IP address allocation pool. :param cidrs: The pools to set in CIDR format, e.g. 192.168.0.0/16 :param version: 4 or 6 :param ipip: Use IP in IP for the pool(s). :param masquerade: Enable masquerade (outgoing NAT) for the pool(s). :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) # TODO Reject any cidrs that overlap with existing cidrs in the pool pools = [] for cidr in cidrs: try: pools.append(IPPool(cidr, ipip=ipip, masquerade=masquerade)) except CidrTooSmallError: print "An IPv%s pool must have a prefix length of %s or lower." \ "\nGiven: %s.\nNo pools added." % \ (version, BLOCK_PREFIXLEN[version], cidr) sys.exit(1) for pool in pools: client.add_ip_pool(version, pool)
def test_ip_pool_remove(self, m_client, m_sleep): """ Test mainline function of ip_pool_remove. """ net1 = IPNetwork("1.2.3.0/24") m_client.remove_ip_pool.side_effect = HostAffinityClaimedError m_pool = IPPool(net1) m_pool.cidr = net1.ip m_client.get_ip_pool_config.return_value = m_pool self.assertRaises(SystemExit, pool.ip_pool_remove, [str(net1)], 4) m_client.get_ip_pool_config.assert_called_once_with(4, net1) m_client.set_ip_pool_config.assert_called_once_with(4, m_pool) self.assertEqual(m_pool.disabled, True) m_client.release_pool_affinities.assert_called_once_with(m_pool) m_client.remove_ip_pool.assert_called_once_with(4, net1.ip)
def test_request_pool_invalid_pool_defined(self, m_get_pools): """ Test request_pool errors if an invalid pool is requested. """ request_data = {"Pool": "1.2.3.0/24", "SubPool": "", "V6": False} m_get_pools.return_value = [IPPool("1.2.4.0/24")] rv = self.app.post('/IpamDriver.RequestPool', data=json.dumps(request_data)) self.assertTrue("Err" in json.loads(rv.data))
def test_add_overlapping_existing_pool_2(self, m_client): """ Test ip_pool_add exits when a pool is added that fully encompasses an existing pool. """ m_client.get_ip_pools.return_value = [IPPool("10.10.10.0/26")] with patch('sys.exit', autospec=True) as m_sys_exit: # Call method under test pool.ip_pool_add(cidrs=["10.10.10.0/24"], version=4, ipip=False, masquerade=False) self.assertTrue(m_sys_exit.called)
def test_request_address_assign_ipv4_from_invalid_subnet( self, m_auto_assign, m_get_pools): """ Test request_address when IPv4 address is auto-assigned from an invalid subnet. """ request_data = {"PoolID": "1.2.3.0/24", "Address": ""} ip = IPAddress("1.2.3.4") m_auto_assign.return_value = ([], [ip]) m_get_pools.return_value = [IPPool("1.2.5.0/24")] rv = self.app.post('/IpamDriver.RequestAddress', data=json.dumps(request_data)) self.assertTrue("Err" in json.loads(rv.data))
def test_request_address_assign_ipv4_from_subnet(self, m_auto_assign, m_get_pools): """ Test request_address when IPv4 address is auto-assigned from a valid subnet. """ request_data = {"PoolID": "1.2.3.0/24", "Address": ""} ip = IPAddress("1.2.3.4") m_auto_assign.return_value = ([], [ip]) m_get_pools.return_value = [IPPool("1.2.3.0/24")] rv = self.app.post('/IpamDriver.RequestAddress', data=json.dumps(request_data)) response_data = {"Address": str(IPNetwork(ip)), "Data": {}} self.assertDictEqual(json.loads(rv.data), response_data)
def ip_pool_add(cidrs, version, ipip, masquerade): """ Add the given CIDRS to the IP address allocation pool. :param cidrs: The pools to set in CIDR format, e.g. 192.168.0.0/16 :param version: 4 or 6 :param ipip: Use IP in IP for the pool(s). :param masquerade: Enable masquerade (outgoing NAT) for the pool(s). :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) current_pools = client.get_ip_pools(version) new_pools = [] # Ensure new pools are valid and do not overlap with each other or existing # pools. for cidr in cidrs: try: pool = IPPool(cidr, ipip=ipip, masquerade=masquerade) except InvalidBlockSizeError: print "An IPv%s pool must have a prefix length of %s or lower." \ "\nGiven: %s.\nNo pools added." % \ (version, BLOCK_PREFIXLEN[version], cidr) sys.exit(1) # Check if new pool overlaps with any existing pool overlapping_pool = _get_overlapping_pool(pool, current_pools) if overlapping_pool: print "Cannot add IP pool %s - pool overlaps with an " \ "existing pool %s" % (cidr, overlapping_pool.cidr) sys.exit(1) # Check if this new pool overlaps with any other new pool overlapping_pool = _get_overlapping_pool(pool, new_pools) if overlapping_pool: print "Cannot add IP pool %s - pool overlaps with another " \ "new pool %s" % (cidr, overlapping_pool.cidr) sys.exit(1) # Append pool to pending list of new pools to add to Calico new_pools.append(pool) # Make client call to add each pool to Calico for new_pool in new_pools: client.add_ip_pool(version, new_pool)
def test_create_network(self, m_add_ip_pool, m_write_network, m_create): """ Test create_network """ request_json = '{"NetworkID": "%s", ' \ '"IPv4Data":[{"Pool": "6.5.4.3/21"}],'\ '"IPv6Data":[]'\ '}' % TEST_NETWORK_ID rv = self.app.post('/NetworkDriver.CreateNetwork', data=request_json) m_create.assert_called_once_with(TEST_NETWORK_ID) m_add_ip_pool.assert_called_once_with(4, IPPool("6.5.4.3/21")) m_write_network.assert_called_once_with(TEST_NETWORK_ID, json.loads(request_json)) self.assertDictEqual(json.loads(rv.data), {})
def ip_pool_add(cidr_pool, version, ipip, masquerade): """ Add the the given CIDR range to the IP address allocation pool. :param cidr_pool: The pool to set in CIDR format, e.g. 192.168.0.0/16 :param version: v4 or v6 :param ipip: Use IP in IP for this pool. :return: None """ if version == "v6" and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) cidr = check_ip_version(cidr_pool, version, IPNetwork) pool = IPPool(cidr, ipip=ipip, masquerade=masquerade) client.add_ip_pool(version, pool)
def test_request_pool_valid_ipv4_pool_defined(self, m_get_pools): """ Test request_pool errors if a valid IPv4 pool is requested. """ request_data = {"Pool": "1.2.3.4/26", "SubPool": "", "V6": False} m_get_pools.return_value = [IPPool("1.2.3.4/26")] rv = self.app.post('/IpamDriver.RequestPool', data=json.dumps(request_data)) response_data = { "PoolID": "1.2.3.4/26", "Pool": "0.0.0.0/0", "Data": { "com.docker.network.gateway": "0.0.0.0/0" } } self.assertDictEqual(json.loads(rv.data), response_data)
def ip_pool_add(cidrs, version, ipip, masquerade): """ Add the given CIDRS to the IP address allocation pool. :param cidrs: The pools to set in CIDR format, e.g. 192.168.0.0/16 :param version: 4 or 6 :param ipip: Use IP in IP for this pool. :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) # TODO Reject any cidrs that overlap with existing cidrs in the pool for cidr in cidrs: pool = IPPool(cidr, ipip=ipip, masquerade=masquerade) client.add_ip_pool(version, pool)
def ip_pool_range_add(start_ip, end_ip, version, ipip, masquerade): """ Add the range of ip addresses as CIDRs to the IP address allocation pool. :param start_ip: The first ip address the ip range. :param end_ip: The last ip address in the ip range. :param version: 4 or 6 :param ipip: Use IP in IP for the pool(s). :param masquerade: Enable masquerade (outgoing NAT) for the pool(s). :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) ip_range = IPRange(start_ip, end_ip) pools = client.get_ip_pools(version) for pool in pools: pool_net = IPNetwork(pool.cidr) # Reject the new ip range if any of the following are true: # - The new ip range contains all ips of any existing pool # - An existing pool overlaps ips with the start of the new ip range # - An existing pool overlaps ips with the end of the new ip range if (pool_net in ip_range or start_ip in pool_net or end_ip in pool_net): print "Cannot add range - range conflicts with pool %s" % pool.cidr sys.exit(1) cidrs = netaddr.iprange_to_cidrs(start_ip, end_ip) new_pools = [] for ip_net in cidrs: try: new_pools.append(IPPool(ip_net.cidr, ipip=ipip, masquerade=masquerade)) except CidrTooSmallError: pool_strings = [str(net) for net in cidrs] print "IPv%s ranges are split into pools, with the smallest pool " \ "size allowed having a prefix length of /%s. One or more " \ "of the generated pools is too small (prefix length is too " \ "high).\nRange given: %s - %s\nPools: %s\nNo pools added." % \ (version, BLOCK_PREFIXLEN[version], start_ip, end_ip, pool_strings) sys.exit(1) for new_pool in new_pools: client.add_ip_pool(version, new_pool)
def _assign_to_pool(subnet): """ Take subnet (str), create IP pool in datastore if none exists. Allocate next available IP in pool :param subnet (str): Subnet to create pool from :rtype: (IPPool, IPAddress) """ pool = IPPool(subnet) version = IPNetwork(subnet).version datastore_client.add_ip_pool(version, pool) candidate = SequentialAssignment().allocate(pool) candidate = IPAddress(candidate) _log.info("Using Pool %s" % pool) _log.info("Using IP %s" % candidate) return pool, candidate
def add_ip_pool(self, cidr, name): try: pool = IPPool(cidr, masquerade=True, ipam=True) except (AddrFormatError, ValueError): return # create pool _ipam.add_ip_pool(4, pool) # create profile for pool _ipam.create_profile(name) # add rules for profile profile_rule_add_remove('add', name, None, 'allow', 'inbound', 'icmp') profile_rule_add_remove('add', name, None, 'allow', 'inbound', 'tcp') profile_rule_add_remove('add', name, None, 'allow', 'inbound', 'udp') rds.set(_POOL_NAME_KEY % pool.cidr, name) rds.set(_POOL_CIDR_KEY % name, pool.cidr) return WrappedNetwork.from_calico(pool, name)
def get_ip_pools(self, version): """ Get the configured IP pools. :param version: 4 for IPv4, 6 for IPv6 :return: List of IPPool. """ assert version in (4, 6) pool_path = IP_POOLS_PATH % {"version": str(version)} try: leaves = self.etcd_client.read(pool_path, recursive=True).leaves except EtcdKeyNotFound: # Path doesn't exist. pools = [] else: # Convert the leaf values to IPPools. We need to handle an empty # leaf value because when no pools are configured the recursive # read returns the parent directory. pools = [IPPool.from_json(leaf.value) for leaf in leaves if leaf.value] return pools
def get_ip_pool_config(self, version, cidr): """ Get the configuration for the given pool. :param version: 4 for IPv4, 6 for IPv6 :param pool: IPNetwork object representing the pool :return: An IPPool object. """ assert version in (4, 6) assert isinstance(cidr, IPNetwork) # Normalize to CIDR format (i.e. 10.1.1.1/8 goes to 10.0.0.0/8) cidr = cidr.cidr key = IP_POOL_KEY % {"version": str(version), "pool": str(cidr).replace("/", "-")} try: data = self.etcd_client.read(key).value except EtcdKeyNotFound: # Re-raise with a better error message. raise KeyError("%s is not a configured IP pool." % cidr) return IPPool.from_json(data)
def test_create_network(self, m_add_ip_pool, m_write_network, m_create): """ Test create_network """ request_data = { "NetworkID": TEST_NETWORK_ID, "IPv4Data": [{ "Gateway": "10.0.0.0/8", "Pool": "6.5.4.3/21" }], "IPv6Data": [], "Options": { "com.docker.network.generic": {} } } rv = self.app.post('/NetworkDriver.CreateNetwork', data=json.dumps(request_data)) m_create.assert_called_once_with(TEST_NETWORK_ID) m_add_ip_pool.assert_called_once_with(4, IPPool("6.5.4.3/21", ipam=False)) m_write_network.assert_called_once_with(TEST_NETWORK_ID, request_data) self.assertDictEqual(json.loads(rv.data), {})
def create_network(): json_data = request.get_json(force=True) app.logger.debug("CreateNetwork JSON=%s", json_data) # Create the CNM "network" as a Calico profile. network_id = json_data["NetworkID"] app.logger.info("Creating profile %s", network_id) client.create_profile(network_id) for version in (4, 6): # Extract the gateway and pool from the network data. If this # indicates that Calico IPAM is not being used, then create a Calico # IP pool. gateway, pool = get_gateway_pool_from_network_data(json_data, version) # Skip over versions that have no gateway assigned. if gateway is None: continue # If we aren't using Calico IPAM then we need to ensure an IP pool # exists. IPIP and Masquerade options can be included on the network # create as additional options. Note that this IP Pool has ipam=False # to ensure it is not used in Calico IPAM assignment. if not is_using_calico_ipam(gateway): options = json_data["Options"]["com.docker.network.generic"] ipip = options.get("ipip") masquerade = options.get("nat-outgoing") client.add_ip_pool( pool.version, IPPool(pool, ipip=ipip, masquerade=masquerade, ipam=False)) # Store off the JSON passed in on this request. It's required in later # calls. client.write_network(network_id, json_data) app.logger.debug("CreateNetwork response JSON=%s", "{}") return jsonify({})
# See the License for the specific language governing permissions and # limitations under the License. import os import sys import socket import time import netaddr from netaddr import AddrFormatError, IPAddress from pycalico.datastore_datatypes import IPPool from pycalico.datastore_errors import DataStoreError from pycalico.ipam import IPAMClient from pycalico.util import get_host_ips, validate_asn DEFAULT_IPV4_POOL = IPPool("192.168.0.0/16") DEFAULT_IPV6_POOL = IPPool("fd80:24e2:f998:72d6::/64") def _find_pool(ip_addr, ipv4_pools): """ Find the pool containing the given IP. :param ip_addr: IP address to find. :param ipv4_pools: iterable containing IPPools. :return: The pool, or None if not found """ for pool in ipv4_pools: if ip_addr in pool.cidr: return pool else: