def create_profile(self, name): """ Create a policy profile. By default, endpoints in a profile accept traffic only from other endpoints in that profile, but can send traffic anywhere. Note this will clobber any existing profile with this name. :param name: Unique string name for the profile. :return: nothing. """ profile_path = PROFILE_PATH % {"profile_id": name} self.etcd_client.write(profile_path + "tags", '["%s"]' % name) # Accept inbound traffic from self, allow outbound traffic to anywhere. # Note: We do not need to add a default_deny to outbound packet traffic # since Felix implements a default drop at the end if no profile has # accepted. Dropping the packet will kill it before it can potentially # be accepted by another profile on the endpoint. accept_self = Rule(action="allow", src_tag=name) default_allow = Rule(action="allow") rules = Rules(id=name, inbound_rules=[accept_self], outbound_rules=[default_allow]) self.etcd_client.write(profile_path + "rules", rules.to_json())
def get_profile(self, name): """ Get a Profile object representing the named profile from the data store. :param name: The name of the profile. :return: A Profile object. """ profile_path = PROFILE_PATH % {"profile_id": name} try: _ = self.etcd_client.read(profile_path) profile = Profile(name) except EtcdKeyNotFound: raise KeyError("%s is not a configured profile." % name) tags_path = TAGS_PATH % {"profile_id": name} try: tags_result = self.etcd_client.read(tags_path) tags = json.loads(tags_result.value) profile.tags = set(tags) except EtcdKeyNotFound: pass rules_path = RULES_PATH % {"profile_id": name} try: rules_result = self.etcd_client.read(rules_path) rules = Rules.from_json(rules_result.value) profile.rules = rules except EtcdKeyNotFound: pass return profile
def _assign_default_rules(profile_name): """ Generate a new profile rule list and update the datastore_client :param profile_name: The profile to update :type profile_name: string :return: """ try: profile = datastore_client.get_profile(profile_name) except: _log.error("Could not apply rules. Profile not found: %s, exiting" % profile_name) sys.exit(1) rules_dict = { "id": profile_name, "inbound_rules": [ { "action": "allow", }, ], "outbound_rules": [ { "action": "allow", }, ], } rules_json = json.dumps(rules_dict, indent=2) profile_rules = Rules.from_json(rules_json) datastore_client.profile_update_rules(profile) _log.info("Finished applying default rules.")
def add_update_network_policy(policy): """ Takes a new network policy from the Kubernetes API and creates the corresponding Calico policy configuration. """ # Determine the name for this policy. name = "%s.%s" % (policy["metadata"]["namespace"], policy["metadata"]["name"]) _log.debug("Adding new network policy: %s", name) try: parser = PolicyParser(policy) selector = parser.calculate_pod_selector() inbound_rules = parser.calculate_inbound_rules() except Exception: # If the Policy is malformed, log the error and kill the controller. # Kubernetes will restart us. _log.exception("Error parsing policy: %s", json.dumps(policy, indent=2)) os.exit(1) else: rules = Rules(inbound_rules=inbound_rules, outbound_rules=[Rule(action="allow")]) # Create the network policy using the calculated selector and rules. client.create_policy("default", name, selector, order=NET_POL_ORDER, rules=rules) _log.debug("Updated policy '%s' for NetworkPolicy", name)
def run(self): """ Controller.run() is called at program init to spawn watch threads, Loops to read responses from the Queue as they come in. """ _log.info("Leader election enabled? %s", self._leader_elect) if self._leader_elect: # Wait until we've been elected leader to start. self._wait_for_leadership() self._start_leader_thread() # Ensure the tier exists. metadata = {"order": NET_POL_TIER_ORDER} self._client.set_policy_tier_metadata(NET_POL_TIER_NAME, metadata) # Ensure the backstop policy exists. This policy fowards # any traffic to Kubernetes pods which doesn't match another policy # to the next-tier (i.e the per-namespace Profiles). selector = "has(%s)" % K8S_NAMESPACE_LABEL rules = Rules(inbound_rules=[Rule(action="next-tier")], outbound_rules=[Rule(action="next-tier")]) self._client.create_policy(NET_POL_TIER_NAME, "k8s-policy-no-match", selector, order=NET_POL_BACKSTOP_ORDER, rules=rules) # Read initial state from Kubernetes API. self.start_workers() # Loop and read updates from the queue. self.read_updates()
def add_update_namespace(namespace): """ Configures a Profile for the given Kubernetes namespace. """ namespace_name = namespace["metadata"]["name"] _log.debug("Adding/updating namespace: %s", namespace_name) # Determine the profile name to create. profile_name = NS_PROFILE_FMT % namespace_name # Build the rules to use. rules = Rules(inbound_rules=[Rule(action="allow")], outbound_rules=[Rule(action="allow")]) # Assign labels to the profile. We modify the keys to use # a special prefix to indicate that these labels are inherited # from the namespace. ns_labels = namespace["metadata"].get("labels", {}) labels = {NS_LABEL_KEY_FMT % k: v for k, v in ns_labels.iteritems()} _log.debug("Generated namespace labels: %s", labels) # Create the Calico profile to represent this namespace, or # update it if it already exists. client.create_profile(profile_name, rules, labels) _log.debug("Created/updated profile for namespace %s", namespace_name)
def test_generate_rules(self): # Generate rules rules = self.driver.generate_rules() # Assert correct. expected = Rules(id=self.network_name, inbound_rules=[Rule(action="allow")], outbound_rules=[Rule(action="allow")]) assert_equal(rules, expected)
def _configure_profile(self, endpoint): """ Configure the calico profile on the given endpoint. If DEFAULT_POLICY != none, we create a new profile for this pod and populate it with the correct rules. Otherwise, the pod gets assigned to the default profile. """ if self.default_policy != POLICY_NONE: # Determine the name for this profile. profile_name = "%s_%s_%s" % (self.namespace, self.pod_name, str(self.docker_id)[:12]) # Create a new profile for this pod. logger.info("Creating profile '%s'", profile_name) # Retrieve pod labels, etc. pod = self._get_pod_config() if self._datastore_client.profile_exists(profile_name): # In profile-per-pod, we don't ever expect duplicate profiles. logger.error("Profile '%s' already exists.", profile_name) sys.exit(1) else: # The profile doesn't exist - generate the rule set for this # profile, and create it. rules = self._generate_rules(pod, profile_name) self._datastore_client.create_profile(profile_name, rules) # Add tags to the profile based on labels. self._apply_tags(pod, profile_name) # Set the profile for the workload. logger.info("Setting profile '%s' on endpoint %s", profile_name, endpoint.endpoint_id) self._datastore_client.set_profiles_on_endpoint( [profile_name], endpoint_id=endpoint.endpoint_id) logger.debug('Finished configuring profile.') else: # Policy is disabled - add this pod to the default profile. if not self._datastore_client.profile_exists(DEFAULT_PROFILE_NAME): # If the default profile doesn't exist, create it. logger.info("Creating profile '%s'", DEFAULT_PROFILE_NAME) allow = Rule(action="allow") rules = Rules(id=DEFAULT_PROFILE_NAME, inbound_rules=[allow], outbound_rules=[allow]) self._datastore_client.create_profile(DEFAULT_PROFILE_NAME, rules) # Set the default profile on this pod's Calico endpoint. logger.info("Setting profile '%s' on endpoint %s", DEFAULT_PROFILE_NAME, endpoint.endpoint_id) self._datastore_client.set_profiles_on_endpoint( [DEFAULT_PROFILE_NAME], endpoint_id=endpoint.endpoint_id)
def test_generate_rules_kube_system(self, m_get_pod): # Use kube-system namespace. self.driver.namespace = "kube-system" # Generate rules rules = self.driver.generate_rules() # Assert correct. Should allow all. expected = Rules(id=self.profile_name, inbound_rules=[Rule(action="allow")], outbound_rules=[Rule(action="allow")]) assert_equal(rules, expected)
def test_generate_rules_mainline(self, m_get_pod): # Generate rules rules = self.driver.generate_rules() # Assert correct. Should return rules which isolate the pod by namespace. expected = Rules(id=self.profile_name, inbound_rules=[ Rule(action="allow", src_tag="namespace_namespace") ], outbound_rules=[Rule(action="allow")]) assert_equal(rules, expected)
def test_generate_rules_annotations(self, m_get_pod): # Mock get_metadata to return annotations. annotations = {"projectcalico.org/policy": "allow tcp"} self.driver._get_metadata = MagicMock(spec=self.driver._get_metadata) self.driver._get_metadata.return_value = annotations # Generate rules rules = self.driver.generate_rules() # Assert correct. Should allow all. expected = Rules(id=self.profile_name, inbound_rules=[Rule(action="allow", protocol="tcp")], outbound_rules=[Rule(action="allow")]) assert_equal(rules, expected)
def generate_rules(self): """Generates default rules for a Kubernetes container manager. The default rules for Kubernetes is to allow all ingress and egress traffic. :rtype: A Calico Rules object :return: rules - allow all ingress and egress traffic """ allow = Rule(action="allow") rules = Rules(id=self.profile_name, inbound_rules=[allow], outbound_rules=[allow]) return rules
def _create_rules(id_): rules_dict = { 'id': id_, 'inbound_rules': [ { 'action': 'allow', }, ], 'outbound_rules': [ { 'action': 'allow', }, ], } rules_json = json.dumps(rules_dict, indent=2) rules = Rules.from_json(rules_json) return rules
def _create_rules(id_): rules_dict = { "id": id_, "inbound_rules": [ { "action": "allow", }, ], "outbound_rules": [ { "action": "allow", }, ], } rules_json = json.dumps(rules_dict, indent=2) rules = Rules.from_json(rules_json) return rules
def _add_update_namespace(self, key, namespace): """ Configures the necessary policy in Calico for this namespace. Uses the `net.alpha.kubernetes.io/network-isolation` annotation. """ _log.info("Adding/updating namespace: %s", key) # Determine the type of network-isolation specified by this namespace. # This defaults to no isolation. annotations = namespace["metadata"].get("annotations", {}) _log.debug("Namespace %s has annotations: %s", key, annotations) net_isolation = annotations.get(NS_POLICY_ANNOTATION, "no") == "yes" _log.info("Namespace %s has network-isolation? %s", key, net_isolation) # Determine the profile name to create. namespace_name = namespace["metadata"]["name"] profile_name = NS_PROFILE_FMT % namespace_name # Determine the rules to use. outbound_rules = [Rule(action="allow")] if net_isolation: inbound_rules = [Rule(action="deny")] else: inbound_rules = [Rule(action="allow")] rules = Rules(id=profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules) # Create the Calico policy to represent this namespace, or # update it if it already exists. Namespace policies select each # pod within that namespace. self._client.create_profile(profile_name, rules) # Assign labels to the profile. We modify the keys to use # a special prefix to indicate that these labels are inherited # from the namespace. labels = namespace["metadata"].get("labels", {}) for k, v in labels.iteritems(): labels[NS_LABEL_KEY_FMT % k] = v del labels[k] _log.debug("Generated namespace labels: %s", labels) # TODO: Actually assign labels to the profile. _log.info("Created/updated profile for namespace %s", namespace_name)
def add_update_network_policy(key, policy): """ Takes a new network policy from the Kubernetes API and creates the corresponding Calico policy configuration. """ _log.info("Adding new network policy: %s", key) # Parse this network policy so we can convert it to the appropriate # Calico policy. First, get the selector from the API object. k8s_selector = policy["spec"]["podSelector"] k8s_selector = k8s_selector or {} # Build the appropriate Calico label selector. This is done using # the labels provided in the NetworkPolicy, as well as the # NetworkPolicy's namespace. namespace = policy["metadata"]["namespace"] selectors = ["%s == '%s'" % (k, v) for k, v in k8s_selector.iteritems()] selectors += ["%s == '%s'" % (K8S_NAMESPACE_LABEL, namespace)] selector = " && ".join(selectors) # Determine the name for this policy. name = "%s.%s" % (policy["metadata"]["namespace"], policy["metadata"]["name"]) # Build the Calico rules. try: inbound_rules = PolicyParser(policy).calculate_inbound_rules() except Exception: # It is possible bad rules will be passed - we don't want to # crash the agent, but we do want to indicate a problem in the # logs, so that the policy can be fixed. _log.exception("Error parsing policy: %s", json.dumps(policy, indent=2)) else: rules = Rules(id=name, inbound_rules=inbound_rules, outbound_rules=[Rule(action="allow")]) # Create the network policy using the calculated selector and rules. client.create_policy(NET_POL_TIER_NAME, name, selector, order=10, rules=rules) _log.info("Updated policy '%s' for NetworkPolicy %s", name, key)
def _generate_rules(self, pod): """ Generate Rules takes human readable policy strings in annotations and returns a libcalico Rules object. :return tuple of inbound_rules, outbound_rules """ # Create allow and per-namespace rules for later use. allow = Rule(action="allow") allow_ns = Rule(action="allow", src_tag=self._get_namespace_tag(pod)) annotations = self._get_metadata(pod, "annotations") logger.debug("Found annotations: %s", annotations) if self.namespace == "kube-system" : # Pods in the kube-system namespace must be accessible by all # other pods for services like DNS to work. logger.info("Pod is in kube-system namespace - allow all") inbound_rules = [allow] outbound_rules = [allow] elif annotations and POLICY_ANNOTATION_KEY in annotations: # If policy annotations are defined, use them to generate rules. logger.info("Generating advanced security policy from annotations") rules = annotations[POLICY_ANNOTATION_KEY] inbound_rules = [] outbound_rules = [allow] for rule in rules.split(";"): parsed_rule = self.policy_parser.parse_line(rule) inbound_rules.append(parsed_rule) else: # If not annotations are defined, just use the configured # default policy. if self.default_policy == 'ns_isolation': # Isolate on namespace boundaries by default. logger.debug("Default policy is namespace isolation") inbound_rules = [allow_ns] outbound_rules = [allow] else: # Allow all traffic by default. logger.debug("Default policy is allow all") inbound_rules = [allow] outbound_rules = [allow] return Rules(id=self.profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules)
def generate_rules(self): """Generates rules based on Kubernetes annotations. """ # Get the pod from the API. if self.namespace != "kube-system": self.pod = self._get_api_pod() # Get any annotations. annotations = self._get_metadata("annotations") _log.debug("Found annotations: %s", annotations) # Outbound rules are always "allow". outbound_rules = [Rule(action="allow")] if self.namespace == "kube-system": # Pods in the kube-system namespace must be accessible by all # other pods for services like DNS to work. _log.info("Pod is in kube-system namespace - allow all") inbound_rules = [Rule(action="allow")] elif annotations and self._annotation_key in annotations: # If policy annotations are defined, use them to generate rules. _log.info("Generating advanced policy from annotations") rules = annotations[self._annotation_key] inbound_rules = [] for rule in rules.split(";"): try: parsed_rule = self.policy_parser.parse_line(rule) except ValueError: # Invalid rule specified. _log.error("Invalid policy defined: %s", rule) raise ApplyProfileError("Invalid policy defined", details=rule) else: # Rule was valid - append. inbound_rules.append(parsed_rule) else: # Isolate on namespace boundaries by default. _log.info("No policy annotations - apply namespace isolation") inbound_rules = [Rule(action="allow", src_tag=self.ns_tag)] return Rules(id=self.profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules)
def test_add_mainline_kubernetes_annotations(self, m_requests): """ Tests add functionality using Kubernetes annotation policy driver. """ # Configure. self.cni_args = "K8S_POD_NAME=podname;K8S_POD_NAMESPACE=defaultns" workload_id = "defaultns.podname" self.command = CNI_CMD_ADD ip4 = "10.0.0.1/32" ip6 = "0:0:0:0:0:ffff:a00:1" ipam_stdout = json.dumps({"ip4": {"ip": ip4}, "ip6": {"ip": ip6}}) self.set_ipam_result(0, ipam_stdout, "") self.policy = {"type": "k8s-annotations"} # Set up docker client response. inspect_result = {"HostConfig": {"NetworkMode": ""}} self.m_docker_client().inspect_container.return_value = inspect_result # Create plugin. p = self.create_plugin() assert_true(isinstance(p.container_engine, DockerEngine)) # Mock DatastoreClient such that no endpoints exist. self.client.get_endpoint.side_effect = KeyError # Mock profile such that it doesn't exist. self.client.profile_exists.return_value = False # Mock the API response. response = MagicMock() response.status_code = 200 api_pod = { "kind": "pod", "metadata": { "annotations": { "projectcalico.org/policy": "allow from label X=Y" }, "labels": { "a": "b", "c": "d" } } } response.text = json.dumps(api_pod) m_requests.Session().__enter__().get.return_value = response # Execute. p.execute() # Assert the correct policy driver was chosen. assert_true(isinstance(p.policy_driver, KubernetesAnnotationDriver)) # Assert an endpoint was created. self.client.create_endpoint.assert_called_once_with( ANY, "k8s", workload_id, [IPNetwork(ip4), IPNetwork(ip6)]) # Assert profile was created. self.client.create_profile.assert_called_once_with( "defaultns_podname", Rules( id="defaultns_podname", inbound_rules=[Rule(action="allow", src_tag="defaultns_X_Y")], outbound_rules=[Rule(action="allow")])) # Assert tags were added. self.client.profile_update_tags.assert_called_once_with( self.client.get_profile()) # Assert a profile was applied. self.client.append_profiles_to_endpoint.assert_called_once_with( profile_names=["defaultns_podname"], endpoint_id=self.client.create_endpoint().endpoint_id)
isolate_ns = ingress_isolation == "DefaultDeny" _log.debug("Namespace %s has %s. Isolate=%s", namespace_name, ingress_isolation, isolate_ns) # Determine the profile name to create. profile_name = NS_PROFILE_FMT % namespace_name # Determine the rules to use. outbound_rules = [Rule(action="allow")] if isolate_ns: inbound_rules = [Rule(action="deny")] else: inbound_rules = [Rule(action="allow")] rules = Rules(id=profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules) # Assign labels to the profile. We modify the keys to use # a special prefix to indicate that these labels are inherited # from the namespace. ns_labels = namespace["metadata"].get("labels", {}) labels = {NS_LABEL_KEY_FMT % k: v for k, v in ns_labels.iteritems()} _log.debug("Generated namespace labels: %s", labels) # Create the Calico profile to represent this namespace, or # update it if it already exists. client.create_profile(profile_name, rules, labels) # Create / update the tiered policy object for this namespace. selector = "%s == '%s'" % (K8S_NAMESPACE_LABEL, namespace_name)
return isolate_ns = ingress_isolation == "DefaultDeny" _log.debug("Namespace %s has %s. Isolate=%s", namespace_name, ingress_isolation, isolate_ns) # Determine the profile name to create. profile_name = NS_PROFILE_FMT % namespace_name # Determine the rules to use. outbound_rules = [Rule(action="allow")] if isolate_ns: inbound_rules = [Rule(action="deny")] else: inbound_rules = [Rule(action="allow")] rules = Rules(inbound_rules=inbound_rules, outbound_rules=outbound_rules) # Assign labels to the profile. We modify the keys to use # a special prefix to indicate that these labels are inherited # from the namespace. ns_labels = namespace["metadata"].get("labels", {}) labels = {NS_LABEL_KEY_FMT % k: v for k, v in ns_labels.iteritems()} _log.debug("Generated namespace labels: %s", labels) # Create the Calico profile to represent this namespace, or # update it if it already exists. client.create_profile(profile_name, rules, labels) _log.debug("Created/updated profile for namespace %s", namespace_name)