def create_profile(self, name): """ Create a policy profile. By default, endpoints in a profile accept traffic only from other endpoints in that profile, but can send traffic anywhere. Note this will clobber any existing profile with this name. :param name: Unique string name for the profile. :return: nothing. """ profile_path = PROFILE_PATH % {"profile_id": name} self.etcd_client.write(profile_path + "tags", '["%s"]' % name) # Accept inbound traffic from self, allow outbound traffic to anywhere. # Note: We do not need to add a default_deny to outbound packet traffic # since Felix implements a default drop at the end if no profile has # accepted. Dropping the packet will kill it before it can potentially # be accepted by another profile on the endpoint. accept_self = Rule(action="allow", src_tag=name) default_allow = Rule(action="allow") rules = Rules(id=name, inbound_rules=[accept_self], outbound_rules=[default_allow]) self.etcd_client.write(profile_path + "rules", rules.to_json())
def run(self): """ Controller.run() is called at program init to spawn watch threads, Loops to read responses from the Queue as they come in. """ _log.info("Leader election enabled? %s", self._leader_elect) if self._leader_elect: # Wait until we've been elected leader to start. self._wait_for_leadership() self._start_leader_thread() # Ensure the tier exists. metadata = {"order": NET_POL_TIER_ORDER} self._client.set_policy_tier_metadata(NET_POL_TIER_NAME, metadata) # Ensure the backstop policy exists. This policy fowards # any traffic to Kubernetes pods which doesn't match another policy # to the next-tier (i.e the per-namespace Profiles). selector = "has(%s)" % K8S_NAMESPACE_LABEL rules = Rules(inbound_rules=[Rule(action="next-tier")], outbound_rules=[Rule(action="next-tier")]) self._client.create_policy(NET_POL_TIER_NAME, "k8s-policy-no-match", selector, order=NET_POL_BACKSTOP_ORDER, rules=rules) # Read initial state from Kubernetes API. self.start_workers() # Loop and read updates from the queue. self.read_updates()
def add_update_namespace(namespace): """ Configures a Profile for the given Kubernetes namespace. """ namespace_name = namespace["metadata"]["name"] _log.debug("Adding/updating namespace: %s", namespace_name) # Determine the profile name to create. profile_name = NS_PROFILE_FMT % namespace_name # Build the rules to use. rules = Rules(inbound_rules=[Rule(action="allow")], outbound_rules=[Rule(action="allow")]) # Assign labels to the profile. We modify the keys to use # a special prefix to indicate that these labels are inherited # from the namespace. ns_labels = namespace["metadata"].get("labels", {}) labels = {NS_LABEL_KEY_FMT % k: v for k, v in ns_labels.iteritems()} _log.debug("Generated namespace labels: %s", labels) # Create the Calico profile to represent this namespace, or # update it if it already exists. client.create_profile(profile_name, rules, labels) _log.debug("Created/updated profile for namespace %s", namespace_name)
def test_correct_rules_for_host_profile(self, m_datastore, m_get_host_ip_net): new_profile = Mock(spec=Profile) m_datastore.get_profile.return_value = new_profile calico_mesos._create_profile_for_host_communication("default") new_rules = new_profile.rules self.assertIn(Rule(action="allow", src_net=self.HOST_IP_NET), new_rules.inbound_rules) self.assertIn(Rule(action="allow", dst_net=self.HOST_IP_NET), new_rules.outbound_rules) self.assertEqual(len(new_rules.inbound_rules) + len(new_rules.outbound_rules), 2)
def test_generate_rules(self): # Generate rules rules = self.driver.generate_rules() # Assert correct. expected = Rules(id=self.network_name, inbound_rules=[Rule(action="allow")], outbound_rules=[Rule(action="allow")]) assert_equal(rules, expected)
def test_correct_rules_for_netgroup_profile(self, m_datastore): new_profile = Mock(spec=Profile) m_datastore.get_profile.return_value = new_profile calico_mesos._create_profile_for_netgroup("prof_a") new_rules = new_profile.rules self.assertIn(Rule(action="allow", src_tag="prof_a"), new_rules.inbound_rules) self.assertIn(Rule(action="allow"), new_rules.outbound_rules) self.assertEqual(len(new_rules.inbound_rules) + len(new_rules.outbound_rules), 2)
def test_correct_rules_for_public_profile(self, m_datastore): new_profile = Mock(spec=Profile) m_datastore.get_profile.return_value = new_profile calico_mesos._create_profile_for_public_communication("public") new_rules = new_profile.rules self.assertIn(Rule(action="allow"), new_rules.inbound_rules) self.assertIn(Rule(action="allow"), new_rules.outbound_rules) self.assertEqual(len(new_rules.inbound_rules) + len(new_rules.outbound_rules), 2)
def test_generate_rules_mainline(self, m_get_pod): # Generate rules rules = self.driver.generate_rules() # Assert correct. Should return rules which isolate the pod by namespace. expected = Rules(id=self.profile_name, inbound_rules=[ Rule(action="allow", src_tag="namespace_namespace") ], outbound_rules=[Rule(action="allow")]) assert_equal(rules, expected)
def test_generate_rules_kube_system(self, m_get_pod): # Use kube-system namespace. self.driver.namespace = "kube-system" # Generate rules rules = self.driver.generate_rules() # Assert correct. Should allow all. expected = Rules(id=self.profile_name, inbound_rules=[Rule(action="allow")], outbound_rules=[Rule(action="allow")]) assert_equal(rules, expected)
def test_generate_rules_annotations(self, m_get_pod): # Mock get_metadata to return annotations. annotations = {"projectcalico.org/policy": "allow tcp"} self.driver._get_metadata = MagicMock(spec=self.driver._get_metadata) self.driver._get_metadata.return_value = annotations # Generate rules rules = self.driver.generate_rules() # Assert correct. Should allow all. expected = Rules(id=self.profile_name, inbound_rules=[Rule(action="allow", protocol="tcp")], outbound_rules=[Rule(action="allow")]) assert_equal(rules, expected)
def test_profile_rule_add_remove_add_rule_front_of_list( self, direction_arg, position_arg): """ Test for profile_rule_add_remove function when adding a new Rule to the front of the inbound or outbound rules list. Test for both directions - inbound and outbound. Test for multiple positions less than 0 (including positions out of range) """ with patch('calico_ctl.profile.client', autospec=True) as m_client: # Setup arguments to pass to method under test operation = 'add' name = 'profile1' position = position_arg action = 'allow' direction = direction_arg # Set up Mock objects rule = Rule() m_Rules = Mock(spec=Rules, id=name, inbound_rules=[rule], outbound_rules=[rule]) m_Profile = Mock(spec=Profile, name=name, rules=m_Rules) m_client.get_profile.return_value = m_Profile # Set up new rule that function will create/add - compare in asserts rule_dict = {'action': 'allow', 'icmp_type': 5, 'icmp_code': 5} new_rule = Rule(**rule_dict) # Call method under test profile_rule_add_remove(operation, name, position, action, direction, icmp_type='5', icmp_code='5') # Assert m_client.get_profile.assert_called_once_with(name) m_client.profile_update_rules.assert_called_once_with(m_Profile) if direction_arg == 'inbound': self.assertEqual(m_Rules.inbound_rules, [new_rule, rule]) else: self.assertEqual(m_Rules.outbound_rules, [new_rule, rule])
def add_update_network_policy(policy): """ Takes a new network policy from the Kubernetes API and creates the corresponding Calico policy configuration. """ # Determine the name for this policy. name = "%s.%s" % (policy["metadata"]["namespace"], policy["metadata"]["name"]) _log.debug("Adding new network policy: %s", name) try: parser = PolicyParser(policy) selector = parser.calculate_pod_selector() inbound_rules = parser.calculate_inbound_rules() except Exception: # If the Policy is malformed, log the error and kill the controller. # Kubernetes will restart us. _log.exception("Error parsing policy: %s", json.dumps(policy, indent=2)) os.exit(1) else: rules = Rules(inbound_rules=inbound_rules, outbound_rules=[Rule(action="allow")]) # Create the network policy using the calculated selector and rules. client.create_policy("default", name, selector, order=NET_POL_ORDER, rules=rules) _log.debug("Updated policy '%s' for NetworkPolicy", name)
def test_profile_rule_add_remove_add_rule_exists(self, m_client): """ Test for profile_rule_add_remove when adding a Rule that already exists. Assert that the profile_update_rules function is not called and that the Rules object has not changed. """ # Set up arguments to pass to method under test operation = 'add' name = 'profile1' position = None action = 'allow' direction = 'inbound' # Set up Mock objects rule_dict = {'action': 'allow'} rule = Rule(**rule_dict) m_Rules = Mock(spec=Rules, id=name, inbound_rules=[rule], outbound_rules=[rule]) m_Profile = Mock(spec=Profile, name=name, rules=m_Rules) m_client.get_profile.return_value = m_Profile # Call method under test profile_rule_add_remove(operation, name, position, action, direction) # Assert m_client.get_profile.assert_called_once_with(name) self.assertFalse(m_client.profile_update_rules.called) self.assertEqual(m_Rules.inbound_rules, [rule]) self.assertEqual(m_Rules.outbound_rules, [rule])
def test_profile_rule_add_remove_add_rule_not_exists(self, m_client): """ Test for profile_rule_add_remove when removing a Rule that does not exist. Position is specified to be None. Assert that the system exits. """ # Set up arguments to pass to method under test operation = 'remove' name = 'profile1' position = None action = 'allow' direction = 'inbound' # Set up Mock objects rule = Rule() m_Rules = Mock(spec=Rules, id=name, inbound_rules=[rule], outbound_rules=[rule]) m_Profile = Mock(spec=Profile, name=name, rules=m_Rules) m_client.get_profile.return_value = m_Profile # Call method under test self.assertRaises(SystemExit, profile_rule_add_remove, operation, name, position, action, direction)
def calculate_inbound_rules(self): """ Generate Calico Rule objects for this Policy's ingress rules. Returns a list of Calico datamodel Rules. """ _log.debug("Calculating inbound rules") rules = [] ingress_rules = self.policy["spec"].get("ingress") if ingress_rules: _log.debug("Got %d ingress rules: translating to Calico format", len(ingress_rules)) for ingress_rule in ingress_rules: _log.debug("Processing ingress rule %s", ingress_rule) if ingress_rule: # Convert ingress rule into Calico Rules. _log.debug("Adding rule %s", ingress_rule) rules.extend(self._allow_incoming_to_rules(ingress_rule)) else: # An empty rule means allow all traffic. _log.debug("Empty rule => allow all; skipping rest") rules.append(Rule(action="allow")) break _log.debug("Calculated total set of rules: %s", rules) return rules
def _add_update_namespace(self, key, namespace): """ Configures the necessary policy in Calico for this namespace. Uses the `net.alpha.kubernetes.io/network-isolation` annotation. """ _log.info("Adding/updating namespace: %s", key) # Determine the type of network-isolation specified by this namespace. # This defaults to no isolation. annotations = namespace["metadata"].get("annotations", {}) _log.debug("Namespace %s has annotations: %s", key, annotations) net_isolation = annotations.get(NS_POLICY_ANNOTATION, "no") == "yes" _log.info("Namespace %s has network-isolation? %s", key, net_isolation) # Determine the profile name to create. namespace_name = namespace["metadata"]["name"] profile_name = NS_PROFILE_FMT % namespace_name # Determine the rules to use. outbound_rules = [Rule(action="allow")] if net_isolation: inbound_rules = [Rule(action="deny")] else: inbound_rules = [Rule(action="allow")] rules = Rules(id=profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules) # Create the Calico policy to represent this namespace, or # update it if it already exists. Namespace policies select each # pod within that namespace. self._client.create_profile(profile_name, rules) # Assign labels to the profile. We modify the keys to use # a special prefix to indicate that these labels are inherited # from the namespace. labels = namespace["metadata"].get("labels", {}) for k, v in labels.iteritems(): labels[NS_LABEL_KEY_FMT % k] = v del labels[k] _log.debug("Generated namespace labels: %s", labels) # TODO: Actually assign labels to the profile. _log.info("Created/updated profile for namespace %s", namespace_name)
def _generate_rules(self, pod): """ Generate Rules takes human readable policy strings in annotations and returns a libcalico Rules object. :return tuple of inbound_rules, outbound_rules """ # Create allow and per-namespace rules for later use. allow = Rule(action="allow") allow_ns = Rule(action="allow", src_tag=self._get_namespace_tag(pod)) annotations = self._get_metadata(pod, "annotations") logger.debug("Found annotations: %s", annotations) if self.namespace == "kube-system" : # Pods in the kube-system namespace must be accessible by all # other pods for services like DNS to work. logger.info("Pod is in kube-system namespace - allow all") inbound_rules = [allow] outbound_rules = [allow] elif annotations and POLICY_ANNOTATION_KEY in annotations: # If policy annotations are defined, use them to generate rules. logger.info("Generating advanced security policy from annotations") rules = annotations[POLICY_ANNOTATION_KEY] inbound_rules = [] outbound_rules = [allow] for rule in rules.split(";"): parsed_rule = self.policy_parser.parse_line(rule) inbound_rules.append(parsed_rule) else: # If not annotations are defined, just use the configured # default policy. if self.default_policy == 'ns_isolation': # Isolate on namespace boundaries by default. logger.debug("Default policy is namespace isolation") inbound_rules = [allow_ns] outbound_rules = [allow] else: # Allow all traffic by default. logger.debug("Default policy is allow all") inbound_rules = [allow] outbound_rules = [allow] return Rules(id=self.profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules)
def test_default_profile(self, m_datastore): m_update_rules = Mock() m_datastore.profile_update_rules = m_update_rules m_profile = Mock() m_datastore.get_profile.return_value = m_profile calico_mesos._create_profile_with_default_mesos_rules("TESTPROF") new_rules = m_profile.rules host_net = calico_mesos._get_host_ip_net() #TODO: Better test for getting host ip self.assertIn(Rule(action="allow", src_net=host_net), new_rules.inbound_rules) self.assertIn(Rule(action="allow", src_tag="TESTPROF"), new_rules.inbound_rules) self.assertIn(Rule(action="allow"), new_rules.outbound_rules)
def _configure_profile(self, endpoint): """ Configure the calico profile on the given endpoint. If DEFAULT_POLICY != none, we create a new profile for this pod and populate it with the correct rules. Otherwise, the pod gets assigned to the default profile. """ if self.default_policy != POLICY_NONE: # Determine the name for this profile. profile_name = "%s_%s_%s" % (self.namespace, self.pod_name, str(self.docker_id)[:12]) # Create a new profile for this pod. logger.info("Creating profile '%s'", profile_name) # Retrieve pod labels, etc. pod = self._get_pod_config() if self._datastore_client.profile_exists(profile_name): # In profile-per-pod, we don't ever expect duplicate profiles. logger.error("Profile '%s' already exists.", profile_name) sys.exit(1) else: # The profile doesn't exist - generate the rule set for this # profile, and create it. rules = self._generate_rules(pod, profile_name) self._datastore_client.create_profile(profile_name, rules) # Add tags to the profile based on labels. self._apply_tags(pod, profile_name) # Set the profile for the workload. logger.info("Setting profile '%s' on endpoint %s", profile_name, endpoint.endpoint_id) self._datastore_client.set_profiles_on_endpoint( [profile_name], endpoint_id=endpoint.endpoint_id) logger.debug('Finished configuring profile.') else: # Policy is disabled - add this pod to the default profile. if not self._datastore_client.profile_exists(DEFAULT_PROFILE_NAME): # If the default profile doesn't exist, create it. logger.info("Creating profile '%s'", DEFAULT_PROFILE_NAME) allow = Rule(action="allow") rules = Rules(id=DEFAULT_PROFILE_NAME, inbound_rules=[allow], outbound_rules=[allow]) self._datastore_client.create_profile(DEFAULT_PROFILE_NAME, rules) # Set the default profile on this pod's Calico endpoint. logger.info("Setting profile '%s' on endpoint %s", DEFAULT_PROFILE_NAME, endpoint.endpoint_id) self._datastore_client.set_profiles_on_endpoint( [DEFAULT_PROFILE_NAME], endpoint_id=endpoint.endpoint_id)
def generate_rules(self): """Generates rules based on Kubernetes annotations. """ # Get the pod from the API. if self.namespace != "kube-system": self.pod = self._get_api_pod() # Get any annotations. annotations = self._get_metadata("annotations") _log.debug("Found annotations: %s", annotations) # Outbound rules are always "allow". outbound_rules = [Rule(action="allow")] if self.namespace == "kube-system": # Pods in the kube-system namespace must be accessible by all # other pods for services like DNS to work. _log.info("Pod is in kube-system namespace - allow all") inbound_rules = [Rule(action="allow")] elif annotations and self._annotation_key in annotations: # If policy annotations are defined, use them to generate rules. _log.info("Generating advanced policy from annotations") rules = annotations[self._annotation_key] inbound_rules = [] for rule in rules.split(";"): try: parsed_rule = self.policy_parser.parse_line(rule) except ValueError: # Invalid rule specified. _log.error("Invalid policy defined: %s", rule) raise ApplyProfileError("Invalid policy defined", details=rule) else: # Rule was valid - append. inbound_rules.append(parsed_rule) else: # Isolate on namespace boundaries by default. _log.info("No policy annotations - apply namespace isolation") inbound_rules = [Rule(action="allow", src_tag=self.ns_tag)] return Rules(id=self.profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules)
def generate_rules(self): """Generates default rules for a Kubernetes container manager. The default rules for Kubernetes is to allow all ingress and egress traffic. :rtype: A Calico Rules object :return: rules - allow all ingress and egress traffic """ allow = Rule(action="allow") rules = Rules(id=self.profile_name, inbound_rules=[allow], outbound_rules=[allow]) return rules
def _generate_rule(self, arguments): """ Generates a libcalico Rule object. :param arguments: A dictionary of arguments as generated by docopt. :return: A libcalico Rule object """ # We only support whitelist rules. rule_args = {"action": "allow"} # Get arguments. if arguments.get("tcp"): protocol = "tcp" elif arguments.get("udp"): protocol = "udp" elif arguments.get("icmp"): protocol = "icmp" else: protocol = None src_ports = arguments.get("<SRCPORTS>") dst_ports = arguments.get("<DSTPORTS>") icmp_type = arguments.get("<ICMPTYPE>") icmp_code = arguments.get("<ICMPCODE>") src_net = arguments.get("<SRCCIDR>") src_label = arguments.get("<SRCLABEL>") dst_net = arguments.get("<DSTCIDR>") dst_label = arguments.get("<DSTLABEL>") # Populate rule arguments if protocol: rule_args["protocol"] = protocol if src_ports: rule_args["src_ports"] = [s.strip() for s in src_ports.split(",")] if dst_ports: rule_args["dst_ports"] = [s.strip() for s in dst_ports.split(",")] if icmp_type: rule_args["icmp_type"] = icmp_type if icmp_code: rule_args["icmp_code"] = icmp_code if src_net: rule_args["src_net"] = src_net if src_label: rule_args["src_tag"] = self._validate_label(src_label) if dst_net: rule_args["dst_net"] = dst_net if dst_label: rule_args["dst_tag"] = self._validate_label(dst_label) return Rule(**rule_args)
def add_update_network_policy(key, policy): """ Takes a new network policy from the Kubernetes API and creates the corresponding Calico policy configuration. """ _log.info("Adding new network policy: %s", key) # Parse this network policy so we can convert it to the appropriate # Calico policy. First, get the selector from the API object. k8s_selector = policy["spec"]["podSelector"] k8s_selector = k8s_selector or {} # Build the appropriate Calico label selector. This is done using # the labels provided in the NetworkPolicy, as well as the # NetworkPolicy's namespace. namespace = policy["metadata"]["namespace"] selectors = ["%s == '%s'" % (k, v) for k, v in k8s_selector.iteritems()] selectors += ["%s == '%s'" % (K8S_NAMESPACE_LABEL, namespace)] selector = " && ".join(selectors) # Determine the name for this policy. name = "%s.%s" % (policy["metadata"]["namespace"], policy["metadata"]["name"]) # Build the Calico rules. try: inbound_rules = PolicyParser(policy).calculate_inbound_rules() except Exception: # It is possible bad rules will be passed - we don't want to # crash the agent, but we do want to indicate a problem in the # logs, so that the policy can be fixed. _log.exception("Error parsing policy: %s", json.dumps(policy, indent=2)) else: rules = Rules(id=name, inbound_rules=inbound_rules, outbound_rules=[Rule(action="allow")]) # Create the network policy using the calculated selector and rules. client.create_policy(NET_POL_TIER_NAME, name, selector, order=10, rules=rules) _log.info("Updated policy '%s' for NetworkPolicy %s", name, key)
def test_profile_rule_add_remove_remove_inbound(self, direction_arg, position_arg): """ Test for profile_rule_add_remove function when removing a Rule from a Profile. - Test both direction - inbound and outbound. - Test positions None and 1 (out of bounds tested elsewhere). """ with patch('calico_ctl.profile.client', autospec=True) as m_client: # Setup arguments to pass to method under test operation = 'remove' name = 'profile1' position = position_arg action = 'allow' direction = direction_arg # Set up Mock objects rule_dict = {'action': 'allow'} rule = Rule(**rule_dict) m_Rules = Mock(spec=Rules, id=name, inbound_rules=[rule], outbound_rules=[rule]) m_Profile = Mock(spec=Profile, name=name, rules=m_Rules) m_client.get_profile.return_value = m_Profile # Call method under test profile_rule_add_remove(operation, name, position, action, direction) # Assert m_client.get_profile.assert_called_once_with(name) m_client.profile_update_rules.assert_called_once_with(m_Profile) if direction_arg == 'inbound': self.assertEqual(m_Rules.inbound_rules, []) self.assertEqual(m_Rules.outbound_rules, [rule]) else: self.assertEqual(m_Rules.outbound_rules, []) self.assertEqual(m_Rules.inbound_rules, [rule])
def _allow_incoming_to_rules(self, allow_incoming_clause): """ Takes a single "allowIncoming" rule from a NetworkPolicy object and returns a list of Calico Rule object with implement it. """ _log.debug("Processing ingress rule: %s", allow_incoming_clause) # Generate to "to" arguments for this Rule. ports = allow_incoming_clause.get("ports") if ports: _log.debug("Parsing 'ports': %s", ports) to_args = self._generate_to_args(ports) else: _log.debug("No ports specified, allow all protocols / ports") to_args = [{}] # Generate "from" arguments for this Rule. froms = allow_incoming_clause.get("from") if froms: _log.debug("Parsing 'from': %s", froms) from_args = self._generate_from_args(froms) else: _log.debug("No from specified, allow from all sources") from_args = [{}] # Create a Rule per-protocol, per-from-clause. _log.debug("Creating rules") rules = [] for to_arg in to_args: for from_arg in from_args: _log.debug("\tAllow from %s to %s", from_arg, to_arg) args = {"action": "allow"} args.update(from_arg) args.update(to_arg) rules.append(Rule(**args)) return rules
def calculate_inbound_rules(self): """ Takes a NetworkPolicy object from the API and returns a list of Calico Rules objects which should be applied on ingress. """ _log.debug("Calculating inbound rules") rules = [] # Iterate through and create the appropriate Calico Rules. allow_incomings = self.policy["spec"].get("ingress") or [] _log.info("Found %s ingress rules", len(allow_incomings)) for allow_incoming_clause in allow_incomings: # Convert each allow_incoming_clause into one or more # Calico Rule objects. if allow_incoming_clause: # Rule exists - parse it. r = self._allow_incoming_to_rules(allow_incoming_clause) else: # An empty rule means allow all traffic. r = [Rule(action="allow")] rules.extend(r) _log.debug("Calculated total set of rules: %s", rules) return rules
def _calculate_inbound_rules(self, policy): """ Takes a NetworkPolicy object from the API and returns a list of Calico Rules objects which should be applied on ingress. """ _log.debug("Calculating inbound rules") # Store the rules to return. rules = [] # Get this policy's namespace. policy_ns = policy["metadata"]["namespace"] # Iterate through each inbound rule and create the appropriate # rules. allow_incomings = policy["spec"].get("ingress") or [] _log.info("Found %s ingress rules", len(allow_incomings)) for r in allow_incomings: # Determine the destination ports to allow. If no ports are # specified, allow all port / protocol combinations. _log.debug("Processing ingress rule: %s", r) ports_by_protocol = {} for to_port in r.get("ports", []): # Keep a dict of ports exposed, keyed by protocol. protocol = to_port.get("protocol") port = to_port.get("port") ports = ports_by_protocol.setdefault(protocol, []) if port: _log.debug("Allow to port: %s/%s", protocol, port) ports.append(port) # Convert into arguments to be passed to a Rule object. to_args = [] for protocol, ports in ports_by_protocol.iteritems(): arg = {"protocol": protocol.lower()} if ports: arg["dst_ports"] = ports to_args.append(arg) if not to_args: # There are not destination protocols / ports specified. # Allow to all protocols and ports. to_args = [{}] # Determine the from criteria. If no "from" block is specified, # then we should allow from all sources. from_args = [] for from_clause in r.get("from", []): # We need to check if the key exists, not just if there is # a non-null value. The presence of the key with a null # value means "select all". pods_present = "pods" in from_clause namespaces_present = "namespaces" in from_clause _log.debug("Is 'pods:' present? %s", pods_present) _log.debug("Is 'namespaces:' present? %s", namespaces_present) if pods_present and namespaces_present: # This is an error case according to the API. msg = "Policy API does not support both 'pods' and " \ "'namespaces' selectors." raise PolicyError(msg, policy) elif pods_present: # There is a pod selector in this "from" clause. pod_selector = from_clause["pods"] or {} _log.debug("Allow from pods: %s", pod_selector) selectors = [ "%s == '%s'" % (k, v) for k, v in pod_selector.iteritems() ] # We can only select on pods in this namespace. selectors.append("%s == %s" % (K8S_NAMESPACE_LABEL, policy_ns)) selector = " && ".join(selectors) # Append the selector to the from args. _log.debug("Allowing pods which match: %s", selector) from_args.append({"src_selector": selector}) elif namespaces_present: # There is a namespace selector. Namespace labels are # applied to each pod in the namespace using # the per-namespace profile. We can select on namespace # labels using the NS_LABEL_KEY_FMT modifier. namespaces = from_clause["namespaces"] or {} _log.debug("Allow from namespaces: %s", namespaces) selectors = ["%s == '%s'" % (NS_LABEL_KEY_FMT % k, v) \ for k,v in namespaces.iteritems()] selector = " && ".join(selectors) if selector: # Allow from the selected namespaces. _log.debug("Allowing from namespaces which match: %s", selector) from_args.append({"src_selector": selector}) else: # Allow from all pods in all namespaces. _log.debug("Allowing from all pods in all namespaces") selector = "has(%s)" % K8S_NAMESPACE_LABEL from_args.append({"src_selector": selector}) if not from_args: # There are no match criteria specified. We should allow # from all sources to the given ports. from_args = [{}] # A rule per-protocol, per-from-clause. for to_arg in to_args: for from_arg in from_args: # Create a rule by combining a 'from' argument with # the protocol / ports arguments. from_arg.update(to_arg) from_arg.update({"action": "allow"}) rules.append(Rule(**from_arg)) _log.debug("Calculated rules: %s", rules) return rules
def test_add_mainline_kubernetes_annotations(self, m_requests): """ Tests add functionality using Kubernetes annotation policy driver. """ # Configure. self.cni_args = "K8S_POD_NAME=podname;K8S_POD_NAMESPACE=defaultns" workload_id = "defaultns.podname" self.command = CNI_CMD_ADD ip4 = "10.0.0.1/32" ip6 = "0:0:0:0:0:ffff:a00:1" ipam_stdout = json.dumps({"ip4": {"ip": ip4}, "ip6": {"ip": ip6}}) self.set_ipam_result(0, ipam_stdout, "") self.policy = {"type": "k8s-annotations"} # Set up docker client response. inspect_result = {"HostConfig": {"NetworkMode": ""}} self.m_docker_client().inspect_container.return_value = inspect_result # Create plugin. p = self.create_plugin() assert_true(isinstance(p.container_engine, DockerEngine)) # Mock DatastoreClient such that no endpoints exist. self.client.get_endpoint.side_effect = KeyError # Mock profile such that it doesn't exist. self.client.profile_exists.return_value = False # Mock the API response. response = MagicMock() response.status_code = 200 api_pod = { "kind": "pod", "metadata": { "annotations": { "projectcalico.org/policy": "allow from label X=Y" }, "labels": { "a": "b", "c": "d" } } } response.text = json.dumps(api_pod) m_requests.Session().__enter__().get.return_value = response # Execute. p.execute() # Assert the correct policy driver was chosen. assert_true(isinstance(p.policy_driver, KubernetesAnnotationDriver)) # Assert an endpoint was created. self.client.create_endpoint.assert_called_once_with( ANY, "k8s", workload_id, [IPNetwork(ip4), IPNetwork(ip6)]) # Assert profile was created. self.client.create_profile.assert_called_once_with( "defaultns_podname", Rules( id="defaultns_podname", inbound_rules=[Rule(action="allow", src_tag="defaultns_X_Y")], outbound_rules=[Rule(action="allow")])) # Assert tags were added. self.client.profile_update_tags.assert_called_once_with( self.client.get_profile()) # Assert a profile was applied. self.client.append_profiles_to_endpoint.assert_called_once_with( profile_names=["defaultns_podname"], endpoint_id=self.client.create_endpoint().endpoint_id)
try: ingress_isolation = policy_annotation.get("ingress", {}).get("isolation", "") except AttributeError: _log.exception("Invalid namespace annotation: %s", policy_annotation) return isolate_ns = ingress_isolation == "DefaultDeny" _log.debug("Namespace %s has %s. Isolate=%s", namespace_name, ingress_isolation, isolate_ns) # Determine the profile name to create. profile_name = NS_PROFILE_FMT % namespace_name # Determine the rules to use. outbound_rules = [Rule(action="allow")] if isolate_ns: inbound_rules = [Rule(action="deny")] else: inbound_rules = [Rule(action="allow")] rules = Rules(id=profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules) # Assign labels to the profile. We modify the keys to use # a special prefix to indicate that these labels are inherited # from the namespace. ns_labels = namespace["metadata"].get("labels", {}) labels = {NS_LABEL_KEY_FMT % k: v for k, v in ns_labels.iteritems()} _log.debug("Generated namespace labels: %s", labels)
"metadata": {"namespace": "ns", "name": "test-policy"}, "spec": {}} network_policy_empty_result = [] # NetworkPolicy with only ports defined. ports = [{"port": 80, "protocol": "TCP"}, {"port": 443, "protocol": "UDP"}] spec = {"ingress": [{"ports": ports}]} network_policy_ports = {"kind": "networkpolicy", "apiversion": "net.alpha.kubernetes.io", "metadata": {"namespace": "ns", "name": "test-policy"}, "spec": spec} network_policy_ports_result = [ Rule(action="allow", dst_ports=[80], protocol="tcp"), Rule(action="allow", dst_ports=[443], protocol="udp") ] # NetworkPolicy with only pods defined. froms = [{"pods": {"role": "diags"}}, {"pods": {"tier": "db"}}] spec = {"ingress": [{"from": froms}]} network_policy_froms = {"kind": "networkpolicy", "apiversion": "net.alpha.kubernetes.io", "metadata": {"namespace": "ns", "name": "test-policy"}, "spec": spec} network_policy_froms_result = [ Rule(action="allow", src_selector="role == 'diags' && calico/k8s_ns == 'ns'"), Rule(action="allow", src_selector="tier == 'db' && calico/k8s_ns == 'ns'")