def test_ensure_host_tunnel_addr_bad_ip(self, m_client, m_assign_host_tunnel_addr, m_get_tunnel_host_ip): m_get_tunnel_host_ip.return_value = IPAddress("11.0.0.1") ipv4_pools = [IPPool("10.0.0.0/16"), IPPool("10.1.0.0/16", ipip=True)] ipip_pools = [IPPool("10.1.0.0/16", ipip=True)] startup._ensure_host_tunnel_addr(ipv4_pools, ipip_pools) assert_equal(m_assign_host_tunnel_addr.mock_calls, [call(ipip_pools)])
def test_ensure_host_tunnel_addr_no_ip(self, m_hostname, m_client, m_assign_host_tunnel_addr, m_get_tunnel_host_ip): m_get_tunnel_host_ip.return_value = None ipv4_pools = [IPPool("10.0.0.0/16"), IPPool("10.1.0.0/16", ipip=True)] ipip_pools = [IPPool("10.1.0.0/16", ipip=True)] calico_ctl.node._ensure_host_tunnel_addr(ipv4_pools, ipip_pools) assert_equal(m_assign_host_tunnel_addr.mock_calls, [call(ipip_pools)])
def test_assign_host_tunnel_addr_none_available(self, m_client, m_exit): # First pool full, IP allocated from second pool. m_client.auto_assign_ips.side_effect = iter([([], []), ([], [])]) ipip_pools = [ IPPool("10.1.0.0/16", ipip=True), IPPool("10.0.0.0/16", ipip=True) ] m_exit.side_effect = Exception assert_raises(Exception, startup._assign_host_tunnel_addr, ipip_pools) assert_equal(m_exit.mock_calls, [call(1)])
def test_assign_host_tunnel_addr(self, m_client): startup.hostname = "host" # First pool full, IP allocated from second pool. m_client.auto_assign_ips.side_effect = iter([([], []), ([IPAddress("10.0.0.1")], [])]) ipip_pools = [ IPPool("10.1.0.0/16", ipip=True), IPPool("10.0.0.0/16", ipip=True) ] startup._assign_host_tunnel_addr(ipip_pools) assert_equal(m_client.set_per_host_config.mock_calls, [call("host", "IpInIpTunnelAddr", "10.0.0.1")])
def ip_pool_range_add(start_ip, end_ip, version, ipip, masquerade): """ Add the range of ip addresses as CIDRs to the IP address allocation pool. :param start_ip: The first ip address the ip range. :param end_ip: The last ip address in the ip range. :param version: 4 or 6 :param ipip: Use IP in IP for this pool. :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) ip_range = IPRange(start_ip, end_ip) pools = client.get_ip_pools(version) for pool in pools: pool_net = IPNetwork(pool.cidr) # Reject the new ip range if any of the following are true: # - The new ip range contains all ips of any existing pool # - An existing pool overlaps ips with the start of the new ip range # - An existing pool overlaps ips with the end of the new ip range if (pool_net in ip_range or start_ip in pool_net or end_ip in pool_net): print "Cannot add range - range conflicts with pool %s" % pool.cidr sys.exit(1) cidrs = netaddr.iprange_to_cidrs(start_ip, end_ip) for ip_net in cidrs: new_pool = IPPool(ip_net.cidr, ipip=ipip, masquerade=masquerade) client.add_ip_pool(version, new_pool)
def get_ip_and_pool(ip): if ip.lower() in ("ipv4", "ipv6"): if '4' in ip: result = assign_any(1, 0) ip = result[0][0] else: result = assign_any(0, 1) ip = result[1][0] pool = get_pool_or_exit(ip) elif ip is not None and '/' in ip: pool = IPPool(ip) if IPNetwork(ip).version == 4: result = assign_any(1, 0, pool=(pool, None)) ip = result[0][0] else: result = assign_any(0, 1, pool=(None, pool)) ip = result[1][0] else: # Check the IP is in the allocation pool. If it isn't, BIRD won't # export it. ip = IPAddress(ip) pool = get_pool_or_exit(ip) # Assign the IP if not client.assign_address(pool, ip): print "IP address is already assigned in pool %s " % pool sys.exit(1) return (ip, pool)
def ip_pool_add(cidrs, version, ipip, masquerade): """ Add the given CIDRS to the IP address allocation pool. :param cidrs: The pools to set in CIDR format, e.g. 192.168.0.0/16 :param version: 4 or 6 :param ipip: Use IP in IP for the pool(s). :param masquerade: Enable masquerade (outgoing NAT) for the pool(s). :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) # TODO Reject any cidrs that overlap with existing cidrs in the pool pools = [] for cidr in cidrs: try: pools.append(IPPool(cidr, ipip=ipip, masquerade=masquerade)) except CidrTooSmallError: print "An IPv%s pool must have a prefix length of %s or lower." \ "\nGiven: %s.\nNo pools added." % \ (version, BLOCK_PREFIXLEN[version], cidr) sys.exit(1) for pool in pools: client.add_ip_pool(version, pool)
def get_ip_and_pool(ip): if ip.lower() in ("ipv4", "ipv6"): if ip[-1] == '4': result = assign_any(1, 0) ip = result[0][0] else: result = assign_any(0, 1) ip = result[1][0] pool = get_pool_or_exit(ip) elif ip is not None and '/' in ip: pool = IPPool(ip) if IPNetwork(ip).version == 4: result = assign_any(1, 0, pool=(pool, None)) ip = result[0][0] else: result = assign_any(0, 1, pool=(None, pool)) ip = result[1][0] else: # Check the IP is in the allocation pool. If it isn't, BIRD won't # export it. ip = IPAddress(ip) pool = get_pool_or_exit(ip) # Assign the IP try: client.assign_ip(ip, None, {}) except AlreadyAssignedError: print_paragraph("IP address is already assigned in pool " "%s." % pool) sys.exit(1) return (ip, pool)
def test_container_remove_veth_error(self, m_client, m_get_container_id, m_enforce_root, m_remove_veth): """ Test for container_remove when remove_veth throws an error Assert that the system exits and workload is not removed. """ # Set up mock objects ipv4_nets = set() ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1'))) ipv6_nets = set() m_endpoint = Mock(spec=Endpoint) m_endpoint.ipv4_nets = ipv4_nets m_endpoint.ipv6_nets = ipv6_nets m_endpoint.endpoint_id = 12 m_endpoint.name = "eth1234" ippool = IPPool('1.1.1.1/24') m_client.get_endpoint.return_value = m_endpoint m_client.get_ip_pools.return_value = [ippool] m_get_container_id.return_value = 52 m_remove_veth.side_effect = CalledProcessError(1, "test") # Call function under test expecting a SystemExit self.assertRaises(SystemExit, container.container_remove, 'container1') # Assert m_enforce_root.assert_called_once_with() m_get_container_id.assert_called_once_with("container1") m_client.get_endpoint.assert_called_once_with( hostname=utils.hostname, orchestrator_id=utils.ORCHESTRATOR_ID, workload_id=52) m_client.get_ip_pools.assert_called_once_with(4) self.assertFalse(m_client.remove_workload.called)
def create_network(): # force is required since the request doesn't have the correct mimetype # If the JSON is malformed, then a BadRequest exception is raised, # which returns a HTTP 400 response. json_data = request.get_json(force=True) app.logger.debug("CreateNetwork JSON=%s", json_data) # Create the CNM "network" as a Calico profile. network_id = json_data["NetworkID"] app.logger.info("Creating profile %s", network_id) client.create_profile(network_id) # Create a calico Pool for the CNM pool that was passed in. for version in (4, 6): ip_data = json_data["IPv%sData" % version] if ip_data: client.add_ip_pool(version, IPPool(ip_data[0]['Pool'])) # Store off the JSON passed in on this request. It's required in later calls # - CreateEndpoint needs it for the gateway address. # - DeleteNetwork needs it to clean up the pool. client.write_network(network_id, json_data) app.logger.debug("CreateNetwork response JSON=%s", "{}") return jsonify({})
def test_container_remove(self, m_netns, m_client, m_get_container_id, m_enforce_root): """ Test for container_remove of calicoctl container command """ # Set up mock objects m_get_container_id.return_value = 666 ipv4_nets = set() ipv4_nets.add(IPNetwork(IPAddress('1.1.1.1'))) ipv6_nets = set() m_endpoint = Mock(spec=Endpoint) m_endpoint.ipv4_nets = ipv4_nets m_endpoint.ipv6_nets = ipv6_nets m_endpoint.endpoint_id = 12 m_endpoint.name = "eth1234" ippool = IPPool('1.1.1.1/24') m_client.get_endpoint.return_value = m_endpoint m_client.get_ip_pools.return_value = [ippool] # Call method under test container.container_remove('container1') # Assert m_enforce_root.assert_called_once_with() m_get_container_id.assert_called_once_with('container1') m_client.get_endpoint.assert_called_once_with( hostname=utils.hostname, orchestrator_id=utils.ORCHESTRATOR_ID, workload_id=666) self.assertEqual(m_client.unassign_address.call_count, 1) m_netns.remove_veth.assert_called_once_with("eth1234") m_client.remove_workload.assert_called_once_with( utils.hostname, utils.ORCHESTRATOR_ID, 666)
def test_request_pool_invalid_pool_defined(self, m_get_pools): """ Test request_pool errors if an invalid pool is requested. """ request_data = {"Pool": "1.2.3.0/24", "SubPool": "", "V6": False} m_get_pools.return_value = [IPPool("1.2.4.0/24")] rv = self.app.post('/IpamDriver.RequestPool', data=json.dumps(request_data)) self.assertTrue("Err" in json.loads(rv.data))
def test_add_overlapping_existing_pool_2(self, m_client): """ Test ip_pool_add exits when a pool is added that fully encompasses an existing pool. """ m_client.get_ip_pools.return_value = [IPPool("10.10.10.0/26")] with patch('sys.exit', autospec=True) as m_sys_exit: # Call method under test pool.ip_pool_add(cidrs=["10.10.10.0/24"], version=4, ipip=False, masquerade=False) self.assertTrue(m_sys_exit.called)
def test_request_address_assign_ipv4_from_invalid_subnet( self, m_auto_assign, m_get_pools): """ Test request_address when IPv4 address is auto-assigned from an invalid subnet. """ request_data = {"PoolID": "1.2.3.0/24", "Address": ""} ip = IPAddress("1.2.3.4") m_auto_assign.return_value = ([], [ip]) m_get_pools.return_value = [IPPool("1.2.5.0/24")] rv = self.app.post('/IpamDriver.RequestAddress', data=json.dumps(request_data)) self.assertTrue("Err" in json.loads(rv.data))
def ip_pool_add(cidrs, version, ipip, masquerade): """ Add the given CIDRS to the IP address allocation pool. :param cidrs: The pools to set in CIDR format, e.g. 192.168.0.0/16 :param version: 4 or 6 :param ipip: Use IP in IP for the pool(s). :param masquerade: Enable masquerade (outgoing NAT) for the pool(s). :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) current_pools = client.get_ip_pools(version) new_pools = [] # Ensure new pools are valid and do not overlap with each other or existing # pools. for cidr in cidrs: try: pool = IPPool(cidr, ipip=ipip, masquerade=masquerade) except InvalidBlockSizeError: print "An IPv%s pool must have a prefix length of %s or lower." \ "\nGiven: %s.\nNo pools added." % \ (version, BLOCK_PREFIXLEN[version], cidr) sys.exit(1) # Check if new pool overlaps with any existing pool overlapping_pool = _get_overlapping_pool(pool, current_pools) if overlapping_pool: print "Cannot add IP pool %s - pool overlaps with an " \ "existing pool %s" % (cidr, overlapping_pool.cidr) sys.exit(1) # Check if this new pool overlaps with any other new pool overlapping_pool = _get_overlapping_pool(pool, new_pools) if overlapping_pool: print "Cannot add IP pool %s - pool overlaps with another " \ "new pool %s" % (cidr, overlapping_pool.cidr) sys.exit(1) # Append pool to pending list of new pools to add to Calico new_pools.append(pool) # Make client call to add each pool to Calico for new_pool in new_pools: client.add_ip_pool(version, new_pool)
def test_request_address_assign_ipv4_from_subnet(self, m_auto_assign, m_get_pools): """ Test request_address when IPv4 address is auto-assigned from a valid subnet. """ request_data = {"PoolID": "1.2.3.0/24", "Address": ""} ip = IPAddress("1.2.3.4") m_auto_assign.return_value = ([], [ip]) m_get_pools.return_value = [IPPool("1.2.3.0/24")] rv = self.app.post('/IpamDriver.RequestAddress', data=json.dumps(request_data)) response_data = {"Address": str(IPNetwork(ip)), "Data": {}} self.assertDictEqual(json.loads(rv.data), response_data)
def test_create_network(self, m_add_ip_pool, m_write_network, m_create): """ Test create_network """ request_json = '{"NetworkID": "%s", ' \ '"IPv4Data":[{"Pool": "6.5.4.3/21"}],'\ '"IPv6Data":[]'\ '}' % TEST_NETWORK_ID rv = self.app.post('/NetworkDriver.CreateNetwork', data=request_json) m_create.assert_called_once_with(TEST_NETWORK_ID) m_add_ip_pool.assert_called_once_with(4, IPPool("6.5.4.3/21")) m_write_network.assert_called_once_with(TEST_NETWORK_ID, json.loads(request_json)) self.assertDictEqual(json.loads(rv.data), {})
def ip_pool_add(cidr_pool, version, ipip, masquerade): """ Add the the given CIDR range to the IP address allocation pool. :param cidr_pool: The pool to set in CIDR format, e.g. 192.168.0.0/16 :param version: v4 or v6 :param ipip: Use IP in IP for this pool. :return: None """ if version == "v6" and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) cidr = check_ip_version(cidr_pool, version, IPNetwork) pool = IPPool(cidr, ipip=ipip, masquerade=masquerade) client.add_ip_pool(version, pool)
def test_request_pool_valid_ipv4_pool_defined(self, m_get_pools): """ Test request_pool errors if a valid IPv4 pool is requested. """ request_data = {"Pool": "1.2.3.4/26", "SubPool": "", "V6": False} m_get_pools.return_value = [IPPool("1.2.3.4/26")] rv = self.app.post('/IpamDriver.RequestPool', data=json.dumps(request_data)) response_data = { "PoolID": "1.2.3.4/26", "Pool": "0.0.0.0/0", "Data": { "com.docker.network.gateway": "0.0.0.0/0" } } self.assertDictEqual(json.loads(rv.data), response_data)
def test_ip_pool_remove(self, m_client, m_sleep): """ Test mainline function of ip_pool_remove. """ net1 = IPNetwork("1.2.3.0/24") m_client.remove_ip_pool.side_effect = HostAffinityClaimedError m_pool = IPPool(net1) m_pool.cidr = net1.ip m_client.get_ip_pool_config.return_value = m_pool self.assertRaises(SystemExit, pool.ip_pool_remove, [str(net1)], 4) m_client.get_ip_pool_config.assert_called_once_with(4, net1) m_client.set_ip_pool_config.assert_called_once_with(4, m_pool) self.assertEqual(m_pool.disabled, True) m_client.release_pool_affinities.assert_called_once_with(m_pool) m_client.remove_ip_pool.assert_called_once_with(4, net1.ip)
def ip_pool_add(cidrs, version, ipip, masquerade): """ Add the given CIDRS to the IP address allocation pool. :param cidrs: The pools to set in CIDR format, e.g. 192.168.0.0/16 :param version: 4 or 6 :param ipip: Use IP in IP for this pool. :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) # TODO Reject any cidrs that overlap with existing cidrs in the pool for cidr in cidrs: pool = IPPool(cidr, ipip=ipip, masquerade=masquerade) client.add_ip_pool(version, pool)
def ip_pool_range_add(start_ip, end_ip, version, ipip, masquerade): """ Add the range of ip addresses as CIDRs to the IP address allocation pool. :param start_ip: The first ip address the ip range. :param end_ip: The last ip address in the ip range. :param version: 4 or 6 :param ipip: Use IP in IP for the pool(s). :param masquerade: Enable masquerade (outgoing NAT) for the pool(s). :return: None """ if version == 6 and ipip: print "IP in IP not supported for IPv6 pools" sys.exit(1) ip_range = IPRange(start_ip, end_ip) pools = client.get_ip_pools(version) for pool in pools: pool_net = IPNetwork(pool.cidr) # Reject the new ip range if any of the following are true: # - The new ip range contains all ips of any existing pool # - An existing pool overlaps ips with the start of the new ip range # - An existing pool overlaps ips with the end of the new ip range if (pool_net in ip_range or start_ip in pool_net or end_ip in pool_net): print "Cannot add range - range conflicts with pool %s" % pool.cidr sys.exit(1) cidrs = netaddr.iprange_to_cidrs(start_ip, end_ip) new_pools = [] for ip_net in cidrs: try: new_pools.append(IPPool(ip_net.cidr, ipip=ipip, masquerade=masquerade)) except CidrTooSmallError: pool_strings = [str(net) for net in cidrs] print "IPv%s ranges are split into pools, with the smallest pool " \ "size allowed having a prefix length of /%s. One or more " \ "of the generated pools is too small (prefix length is too " \ "high).\nRange given: %s - %s\nPools: %s\nNo pools added." % \ (version, BLOCK_PREFIXLEN[version], start_ip, end_ip, pool_strings) sys.exit(1) for new_pool in new_pools: client.add_ip_pool(version, new_pool)
def _assign_to_pool(subnet): """ Take subnet (str), create IP pool in datastore if none exists. Allocate next available IP in pool :param subnet (str): Subnet to create pool from :rtype: (IPPool, IPAddress) """ pool = IPPool(subnet) version = IPNetwork(subnet).version datastore_client.add_ip_pool(version, pool) candidate = SequentialAssignment().allocate(pool) candidate = IPAddress(candidate) _log.info("Using Pool %s" % pool) _log.info("Using IP %s" % candidate) return pool, candidate
def add_ip_pool(self, cidr, name): try: pool = IPPool(cidr, masquerade=True, ipam=True) except (AddrFormatError, ValueError): return # create pool _ipam.add_ip_pool(4, pool) # create profile for pool _ipam.create_profile(name) # add rules for profile profile_rule_add_remove('add', name, None, 'allow', 'inbound', 'icmp') profile_rule_add_remove('add', name, None, 'allow', 'inbound', 'tcp') profile_rule_add_remove('add', name, None, 'allow', 'inbound', 'udp') rds.set(_POOL_NAME_KEY % pool.cidr, name) rds.set(_POOL_CIDR_KEY % name, pool.cidr) return WrappedNetwork.from_calico(pool, name)
def test_create_network(self, m_add_ip_pool, m_write_network, m_create): """ Test create_network """ request_data = { "NetworkID": TEST_NETWORK_ID, "IPv4Data": [{ "Gateway": "10.0.0.0/8", "Pool": "6.5.4.3/21" }], "IPv6Data": [], "Options": { "com.docker.network.generic": {} } } rv = self.app.post('/NetworkDriver.CreateNetwork', data=json.dumps(request_data)) m_create.assert_called_once_with(TEST_NETWORK_ID) m_add_ip_pool.assert_called_once_with(4, IPPool("6.5.4.3/21", ipam=False)) m_write_network.assert_called_once_with(TEST_NETWORK_ID, request_data) self.assertDictEqual(json.loads(rv.data), {})
def create_network(): json_data = request.get_json(force=True) app.logger.debug("CreateNetwork JSON=%s", json_data) # Create the CNM "network" as a Calico profile. network_id = json_data["NetworkID"] app.logger.info("Creating profile %s", network_id) client.create_profile(network_id) for version in (4, 6): # Extract the gateway and pool from the network data. If this # indicates that Calico IPAM is not being used, then create a Calico # IP pool. gateway, pool = get_gateway_pool_from_network_data(json_data, version) # Skip over versions that have no gateway assigned. if gateway is None: continue # If we aren't using Calico IPAM then we need to ensure an IP pool # exists. IPIP and Masquerade options can be included on the network # create as additional options. Note that this IP Pool has ipam=False # to ensure it is not used in Calico IPAM assignment. if not is_using_calico_ipam(gateway): options = json_data["Options"]["com.docker.network.generic"] ipip = options.get("ipip") masquerade = options.get("nat-outgoing") client.add_ip_pool( pool.version, IPPool(pool, ipip=ipip, masquerade=masquerade, ipam=False)) # Store off the JSON passed in on this request. It's required in later # calls. client.write_network(network_id, json_data) app.logger.debug("CreateNetwork response JSON=%s", "{}") return jsonify({})
# See the License for the specific language governing permissions and # limitations under the License. import os import sys import socket import time import netaddr from netaddr import AddrFormatError, IPAddress from pycalico.datastore_datatypes import IPPool from pycalico.datastore_errors import DataStoreError from pycalico.ipam import IPAMClient from pycalico.util import get_host_ips, validate_asn DEFAULT_IPV4_POOL = IPPool("192.168.0.0/16") DEFAULT_IPV6_POOL = IPPool("fd80:24e2:f998:72d6::/64") def _find_pool(ip_addr, ipv4_pools): """ Find the pool containing the given IP. :param ip_addr: IP address to find. :param ipv4_pools: iterable containing IPPools. :return: The pool, or None if not found """ for pool in ipv4_pools: if ip_addr in pool.cidr: return pool else:
def test_node_start(self, m_root, m_attach_and_stream, m_find_or_pull_node_image, m_docker, m_docker_client, m_client, m_conntrack, m_setup_ip, m_check_system, m_container, m_call, m_os_makedirs, m_os_path_exists, m_ipv6_enabled): """ Test that the node_Start function does not make Docker calls function returns """ # Set up mock objects m_container.return_value = False m_root.return_value = False m_os_path_exists.return_value = False ip_2 = '2.2.2.2' m_docker_client.create_host_config.return_value = 'host_config' container = {'Id': 666} m_docker_client.create_container.return_value = container m_check_system.return_value = [True, True, True] ipv4_pools = [ IPPool(IPNetwork("10.0.0.0/16")), IPPool(IPNetwork("10.1.0.0/16"), ipip=True) ] ipip_pools = [IPPool(IPNetwork("10.1.0.0/16"), ipip=True)] m_client.get_ip_pools.return_value = ipv4_pools # Set up arguments node_image = 'node_image' runtime = 'docker' log_dir = './log_dir' ip = '2.2.2.2' ip6 = 'aa:bb::zz' as_num = '' detach = False libnetwork = False # Don't pull the node image no_pull = True # Call method under test node.node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach, libnetwork, no_pull) # Set up variables used in assertion statements environment = [ "HOSTNAME=%s" % node.hostname, "IP=%s" % ip_2, "IP6=%s" % ip6, "CALICO_NETWORKING=%s" % node.CALICO_NETWORKING_DEFAULT, "AS=", "NO_DEFAULT_POOLS=", "ETCD_AUTHORITY=%s" % ETCD_AUTHORITY_DEFAULT, # etcd host:port "ETCD_SCHEME=%s" % ETCD_SCHEME_DEFAULT, ] binds = { log_dir: { "bind": "/var/log/calico", "ro": False }, "/var/run/calico": { "bind": "/var/run/calico", "ro": False }, "/lib/modules": { "bind": "/lib/modules", "ro": False } } # Assert m_os_path_exists.assert_called_once_with(log_dir) m_os_makedirs.assert_called_once_with(log_dir) m_check_system.assert_called_once_with(quit_if_error=False, libnetwork=libnetwork, check_docker=True, check_modules=True) m_setup_ip.assert_called_once_with() m_docker_client.remove_container.assert_called_once_with('calico-node', force=True) m_docker_client.create_host_config.assert_called_once_with( privileged=True, restart_policy={"Name": "always"}, network_mode="host", binds=binds) self.assertFalse(m_find_or_pull_node_image.called) m_docker_client.create_container.assert_called_once_with( node_image, name='calico-node', detach=True, environment=environment, host_config='host_config', volumes=['/var/log/calico', "/var/run/calico", "/lib/modules"]) m_docker_client.start.assert_called_once_with(container) m_attach_and_stream.assert_called_once_with(container, False)
def test_assign_to_pool(self, m_seq, m_client): m_seq.return_value = '10.22.0.1' calico_rkt._assign_to_pool(subnet=ARGS['subnet']) m_client.add_ip_pool.assert_called_once_with(4, IPPool("10.22.0.0/16"))
class RktPluginTest(unittest.TestCase): @patch('calico_rkt.create', autospec=True) def test_main_ADD(self, m_create): ARGS['command'] = 'ADD' calico_rkt.calico_rkt(ARGS) m_create.assert_called_once_with(ARGS) @patch('calico_rkt.delete', autospec=True) def test_main_DEL(self, m_delete): ARGS['command'] = 'DEL' calico_rkt.calico_rkt(ARGS) m_delete.assert_called_once_with(ARGS) @patch('calico_rkt.datastore_client', autospec=True) @patch('calico_rkt._create_calico_endpoint', autospec=True) @patch('calico_rkt._set_profile_on_endpoint', autospec=True) def test_create(self, m_set_profile, m_create_ep, m_client): ip_ = '1.2.3.4/32' path_ = '%s/%s/%s' % (NETNS_ROOT, ARGS['container_id'], ARGS['netns']) mock_ep = Mock() mock_ep.ipv4_nets = set() mock_ep.ipv4_nets.add(ip_) m_create_ep.return_value = mock_ep calico_rkt.create(ARGS) m_create_ep.assert_called_once_with(container_id=ARGS['container_id'], netns_path=path_, interface=ARGS['interface'], subnet=ARGS['subnet']) m_set_profile.assert_called_once_with(endpoint=mock_ep, profile_name="test") @patch('calico_rkt.HOSTNAME', autospec=True) @patch('calico_rkt.datastore_client', autospec=True) @patch('calico_rkt._container_add', return_value=('ep', 'ip'), autospec=True) def test_create_calico_endpoint(self, m_con_add, m_client, m_host): m_client.get_endpoint.return_value = None m_client.get_endpoint.side_effect = KeyError() id_, path_ = 'testcontainer', 'path/to/ns' calico_rkt._create_calico_endpoint(container_id=id_, netns_path=path_, interface=ARGS['interface'], subnet=ARGS['subnet']) m_client.get_endpoint.assert_called_once_with( hostname=m_host, orchestrator_id=ORCHESTRATOR_ID, workload_id=id_) m_con_add.assert_called_once_with(hostname=m_host, orchestrator_id=ORCHESTRATOR_ID, container_id=id_, netns_path=path_, interface=ARGS['interface'], subnet=ARGS['subnet']) @patch("sys.exit", autospec=True) @patch('calico_rkt.HOSTNAME', autospec=True) @patch('calico_rkt.datastore_client', autospec=True) @patch('calico_rkt._container_add', return_value=('ep', 'ip'), autospec=True) def test_create_calico_endpoint_fail(self, m_con_add, m_client, m_host, m_sys_exit): m_client.get_endpoint.return_value = "Endpoint Exists" id_, path_ = 'testcontainer', 'path/to/ns' calico_rkt._create_calico_endpoint(container_id=id_, netns_path=path_, interface=ARGS['interface'], subnet=ARGS['subnet']) m_client.get_endpoint.assert_called_once_with( hostname=m_host, orchestrator_id=ORCHESTRATOR_ID, workload_id=id_) m_sys_exit.assert_called_once_with(1) @patch('calico_rkt.HOSTNAME', autospec=True) @patch('calico_rkt.datastore_client', autospec=True) @patch('calico_rkt._assign_to_pool', return_value=(IPPool('1.2.0.0/16'), IPAddress('1.2.3.4')), autospec=True) def test_container_add(self, m_assign_pool, m_client, m_host): m_ep = Mock() m_client.create_endpoint.return_value = m_ep m_ep.provision_veth.return_value = 'macaddress' id_, path_ = 'testcontainer', 'path/to/ns' calico_rkt._container_add(hostname=m_host, orchestrator_id=ORCHESTRATOR_ID, container_id=id_, netns_path=path_, interface=ARGS['interface'], subnet=ARGS['subnet']) m_assign_pool.assert_called_once_with(ARGS['subnet']) m_client.create_endpoint.assert_called_once_with( m_host, ORCHESTRATOR_ID, id_, [IPAddress('1.2.3.4')]) m_ep.provision_veth.assert_called_once() m_client.set_endpoint.assert_called_once_with(m_ep) @patch('calico_rkt.HOSTNAME', autospec=True) @patch('calico_rkt.datastore_client', autospec=True) @patch('pycalico.netns', autospec=True) def test_container_remove(self, m_netns, m_client, m_host): m_ep = Mock() m_ep.ipv4_nets = set() m_ep.ipv4_nets.add(IPNetwork('1.2.3.4/32')) m_ep.ipv6_nets = set() m_ep.name = 'endpoint_test' m_client.get_endpoint.return_value = m_ep id_ = '123' calico_rkt._container_remove(hostname=m_host, orchestrator_id=ORCHESTRATOR_ID, container_id=id_) m_client.get_endpoint.assert_called_once_with( hostname=m_host, orchestrator_id=ORCHESTRATOR_ID, workload_id=id_) m_client.remove_workload.assert_called_once_with( hostname=m_host, orchestrator_id=ORCHESTRATOR_ID, workload_id=id_) m_client.unassign_address.assert_called_once_with( None, IPAddress('1.2.3.4')) @patch('calico_rkt.datastore_client', autospec=True) def test_set_profile_on_endpoint(self, m_client): m_client.profile_exists.return_value = False m_ep = Mock() m_ep.endpoint_id = '1234' p_name, ip_ = 'profile', '1.2.3.4' calico_rkt._set_profile_on_endpoint(endpoint=m_ep, profile_name=p_name) m_client.profile_exists.assert_called_once_with(p_name) m_client.create_profile.assert_called_once_with(p_name) m_client.set_profiles_on_endpoint.assert_called_once_with( profile_names=[p_name], endpoint_id='1234') @patch('calico_rkt.datastore_client', autospec=True) def test_create_assign_rules(self, m_client): m_profile = Mock() m_client.get_profile.return_value = m_profile p_name = 'profile' calico_rkt._assign_default_rules(profile_name=p_name) m_client.get_profile.assert_called_once_with(p_name) m_client.profile_update_rules.assert_called_once_with(m_profile) @patch('calico_rkt.datastore_client', autospec=True) @patch('pycalico.ipam.SequentialAssignment.allocate', autospec=True) def test_assign_to_pool(self, m_seq, m_client): m_seq.return_value = '10.22.0.1' calico_rkt._assign_to_pool(subnet=ARGS['subnet']) m_client.add_ip_pool.assert_called_once_with(4, IPPool("10.22.0.0/16"))