class NetworkPlugin(object): def __init__(self): self.pod_name = None self.profile_name = None self.namespace = None self.docker_id = None self.auth_token = os.environ.get("KUBE_AUTH_TOKEN", None) self.policy_parser = None self._datastore_client = IPAMClient() self._docker_client = Client( version=DOCKER_VERSION, base_url=os.getenv("DOCKER_HOST", "unix://var/run/docker.sock") ) def create(self, namespace, pod_name, docker_id): """"Create a pod.""" self.pod_name = pod_name self.docker_id = docker_id self.namespace = namespace self.policy_parser = PolicyParser(self.namespace) self.profile_name = "%s_%s_%s" % (self.namespace, self.pod_name, str(self.docker_id)[:12]) logger.info("Configuring docker container %s", self.docker_id) try: endpoint = self._configure_interface() self._configure_profile(endpoint) except CalledProcessError as e: logger.error("Error code %d creating pod networking: %s\n%s", e.returncode, e.output, e) sys.exit(1) def delete(self, namespace, pod_name, docker_id): """Cleanup after a pod.""" self.pod_name = pod_name self.docker_id = docker_id self.namespace = namespace self.profile_name = "%s_%s_%s" % (self.namespace, self.pod_name, str(self.docker_id)[:12]) logger.info("Deleting container %s with profile %s", self.docker_id, self.profile_name) # Remove the profile for the workload. self._container_remove() # Delete profile try: self._datastore_client.remove_profile(self.profile_name) except: logger.warning("Cannot remove profile %s; Profile cannot " "be found.", self.profile_name) def status(self, namespace, pod_name, docker_id): self.namespace = namespace self.pod_name = pod_name self.docker_id = docker_id # Find the endpoint try: endpoint = self._datastore_client.get_endpoint( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID, workload_id=self.docker_id ) except KeyError: logger.error("Container %s doesn't contain any endpoints", self.docker_id) sys.exit(1) # Retrieve IPAddress from the attached IPNetworks on the endpoint # Since Kubernetes only supports ipv4, we'll only check for ipv4 nets if not endpoint.ipv4_nets: logger.error("Exiting. No IPs attached to endpoint %s", self.docker_id) sys.exit(1) else: ip_net = list(endpoint.ipv4_nets) if len(ip_net) is not 1: logger.warning("There is more than one IPNetwork attached " "to endpoint %s", self.docker_id) ip = ip_net[0].ip logger.info("Retrieved pod IP Address: %s", ip) json_dict = {"apiVersion": "v1beta1", "kind": "PodNetworkStatus", "ip": str(ip)} logger.debug("Writing status to stdout: \n%s", json.dumps(json_dict)) print(json.dumps(json_dict)) def _configure_profile(self, endpoint): """ Configure the calico profile on the given endpoint. """ pod = self._get_pod_config() logger.info("Configuring Pod Profile: %s", self.profile_name) if self._datastore_client.profile_exists(self.profile_name): logger.error("Profile with name %s already exists, exiting.", self.profile_name) sys.exit(1) else: rules = self._generate_rules(pod) self._datastore_client.create_profile(self.profile_name, rules) # Add tags to the profile. self._apply_tags(pod) # Set the profile for the workload. logger.info("Setting profile %s on endpoint %s", self.profile_name, endpoint.endpoint_id) self._datastore_client.set_profiles_on_endpoint([self.profile_name], endpoint_id=endpoint.endpoint_id) logger.info("Finished configuring profile.") def _configure_interface(self): """Configure the Calico interface for a pod. This involves the following steps: 1) Determine the IP that docker assigned to the interface inside the container 2) Delete the docker-assigned veth pair that's attached to the docker bridge 3) Create a new calico veth pair, using the docker-assigned IP for the end in the container's namespace 4) Assign the node's IP to the host end of the veth pair (required for compatibility with kube-proxy REDIRECT iptables rules). """ # Set up parameters container_pid = self._get_container_pid(self.docker_id) interface = "eth0" self._delete_docker_interface() logger.info("Configuring Calico network interface") ep = self._container_add(container_pid, interface) # Log our container's interfaces after adding the new interface. _log_interfaces(container_pid) interface_name = generate_cali_interface_name(IF_PREFIX, ep.endpoint_id) node_ip = self._get_node_ip() logger.info("Adding IP %s to interface %s", node_ip, interface_name) # This is slightly tricky. Since the kube-proxy sometimes # programs REDIRECT iptables rules, we MUST have an IP on the host end # of the caliXXX veth pairs. This is because the REDIRECT rule # rewrites the destination ip/port of traffic from a pod to a service # VIP. The destination port is rewriten to an arbitrary high-numbered # port, and the destination IP is rewritten to one of the IPs allocated # to the interface. This fails if the interface doesn't have an IP, # so we allocate an IP which is already allocated to the node. We set # the subnet to /32 so that the routing table is not affected; # no traffic for the node_ip's subnet will use the /32 route. check_call(["ip", "addr", "add", node_ip + "/32", "dev", interface_name]) logger.info("Finished configuring network interface") return ep def _container_add(self, pid, interface): """ Add a container (on this host) to Calico networking with the given IP. """ # Check if the container already exists. If it does, exit. try: _ = self._datastore_client.get_endpoint( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID, workload_id=self.docker_id ) except KeyError: # Calico doesn't know about this container. Continue. pass else: logger.error("This container has already been configured " "with Calico Networking.") sys.exit(1) # Obtain information from Docker Client and validate container state self._validate_container_state(self.docker_id) ip_list = [self._assign_container_ip()] # Create Endpoint object try: logger.info("Creating endpoint with IPs %s", ip_list) ep = self._datastore_client.create_endpoint(HOSTNAME, ORCHESTRATOR_ID, self.docker_id, ip_list) except (AddrFormatError, KeyError): logger.exception("Failed to create endpoint with IPs %s. " "Unassigning IP address, then exiting.", ip_list) self._datastore_client.release_ips(set(ip_list)) sys.exit(1) # Create the veth, move into the container namespace, add the IP and # set up the default routes. logger.info("Creating the veth with namespace pid %s on interface " "name %s", pid, interface) ep.mac = ep.provision_veth(netns.PidNamespace(pid), interface) logger.info("Setting mac address %s to endpoint %s", ep.mac, ep.name) self._datastore_client.set_endpoint(ep) # Let the caller know what endpoint was created. return ep def _assign_container_ip(self): """ Assign IPAddress either with the assigned docker IPAddress or utilize calico IPAM. The option to utilize IPAM is indicated by the environment variable "CALICO_IPAM". True indicates to utilize Calico's auto_assign IPAM policy. False indicate to utilize the docker assigned IPAddress :return IPAddress which has been assigned """ def _assign(ip): """ Local helper function for assigning an IP and checking for errors. Only used when operating with CALICO_IPAM=false """ try: logger.info("Attempting to assign IP %s", ip) self._datastore_client.assign_ip(ip, str(self.docker_id), None) except (ValueError, RuntimeError): logger.exception("Failed to assign IPAddress %s", ip) sys.exit(1) if CALICO_IPAM == "true": logger.info("Using Calico IPAM") try: ipv4s, ipv6s = self._datastore_client.auto_assign_ips(1, 0, self.docker_id, None) ip = ipv4s[0] logger.debug("IPAM assigned ipv4=%s; ipv6= %s", ipv4s, ipv6s) except RuntimeError as err: logger.error("Cannot auto assign IPAddress: %s", err.message) sys.exit(1) else: logger.info("Using docker assigned IP address") ip = self._read_docker_ip() try: # Try to assign the address using the _assign helper function. _assign(ip) except AlreadyAssignedError: # If the Docker IP is already assigned, it is most likely that # an endpoint has been removed under our feet. When using # Docker IPAM, treat Docker as the source of # truth for IP addresses. logger.warning("Docker IP is already assigned, finding " "stale endpoint") self._datastore_client.release_ips(set([ip])) # Clean up whatever existing endpoint has this IP address. # We can improve this later by making use of IPAM attributes # in libcalico to store the endpoint ID. For now, # just loop through endpoints on this host. endpoints = self._datastore_client.get_endpoints(hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID) for ep in endpoints: if IPNetwork(ip) in ep.ipv4_nets: logger.warning("Deleting stale endpoint %s", ep.endpoint_id) for profile_id in ep.profile_ids: self._datastore_client.remove_profile(profile_id) self._datastore_client.remove_endpoint(ep) break # Assign the IP address to the new endpoint. It shouldn't # be assigned, since we just unassigned it. logger.warning("Retry Docker assigned IP") _assign(ip) return ip def _container_remove(self): """ Remove the indicated container on this host from Calico networking """ # Find the endpoint ID. We need this to find any ACL rules try: endpoint = self._datastore_client.get_endpoint( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID, workload_id=self.docker_id ) except KeyError: logger.exception("Container %s doesn't contain any endpoints", self.docker_id) sys.exit(1) # Remove any IP address assignments that this endpoint has ip_set = set() for net in endpoint.ipv4_nets | endpoint.ipv6_nets: ip_set.add(net.ip) logger.info("Removing IP addresses %s from endpoint %s", ip_set, endpoint.name) self._datastore_client.release_ips(ip_set) # Remove the veth interface from endpoint logger.info("Removing veth interface from endpoint %s", endpoint.name) try: netns.remove_veth(endpoint.name) except CalledProcessError: logger.exception("Could not remove veth interface from " "endpoint %s", endpoint.name) # Remove the container/endpoint from the datastore. try: self._datastore_client.remove_workload(HOSTNAME, ORCHESTRATOR_ID, self.docker_id) logger.info("Successfully removed workload from datastore") except KeyError: logger.exception("Failed to remove workload.") logger.info("Removed Calico interface from %s", self.docker_id) def _validate_container_state(self, container_name): info = self._get_container_info(container_name) # Check the container is actually running. if not info["State"]["Running"]: logger.error("The container is not currently running.") sys.exit(1) # We can't set up Calico if the container shares the host namespace. if info["HostConfig"]["NetworkMode"] == "host": logger.error("Can't add the container to Calico because " "it is running NetworkMode = host.") sys.exit(1) def _get_container_info(self, container_name): try: info = self._docker_client.inspect_container(container_name) except APIError as e: if e.response.status_code == 404: logger.error("Container %s was not found. Exiting.", container_name) else: logger.error(e.message) sys.exit(1) return info def _get_container_pid(self, container_name): return self._get_container_info(container_name)["State"]["Pid"] def _read_docker_ip(self): """Get the IP for the pod's infra container.""" container_info = self._get_container_info(self.docker_id) ip = container_info["NetworkSettings"]["IPAddress"] logger.info("Docker-assigned IP is %s", ip) return IPAddress(ip) def _get_node_ip(self): """ Determine the IP for the host node. """ # Compile list of addresses on network, return the first entry. # Try IPv4 and IPv6. addrs = get_host_ips(version=4) or get_host_ips(version=6) try: addr = addrs[0] logger.info("Using IP Address %s", addr) return addr except IndexError: # If both get_host_ips return empty lists, print message and exit. logger.exception( "No Valid IP Address Found for Host - cannot " "configure networking for pod %s. " "Exiting", self.pod_name, ) sys.exit(1) def _delete_docker_interface(self): """Delete the existing veth connecting to the docker bridge.""" logger.info("Deleting docker interface eth0") # Get the PID of the container. pid = str(self._get_container_pid(self.docker_id)) logger.info("Container %s running with PID %s", self.docker_id, pid) # Set up a link to the container's netns. logger.info("Linking to container's netns") logger.debug(check_output(["mkdir", "-p", "/var/run/netns"])) netns_file = "/var/run/netns/" + pid if not os.path.isfile(netns_file): logger.debug(check_output(["ln", "-s", "/proc/" + pid + "/ns/net", netns_file])) # Log our container's interfaces before making any changes. _log_interfaces(pid) # Reach into the netns and delete the docker-allocated interface. logger.debug(check_output(["ip", "netns", "exec", pid, "ip", "link", "del", "eth0"])) # Log our container's interfaces after making our changes. _log_interfaces(pid) # Clean up after ourselves (don't want to leak netns files) logger.debug(check_output(["rm", netns_file])) def _get_pod_ports(self, pod): """ Get the list of ports on containers in the Pod. :return list ports: the Kubernetes ContainerPort objects for the pod. """ ports = [] for container in pod["spec"]["containers"]: try: more_ports = container["ports"] logger.info("Adding ports %s", more_ports) ports.extend(more_ports) except KeyError: pass return ports def _get_pod_config(self): """Get the list of pods from the Kube API server.""" pods = self._get_api_path("pods") logger.debug("Got pods %s", pods) for pod in pods: logger.debug("Processing pod %s", pod) ns = pod["metadata"]["namespace"].replace("/", "_") name = pod["metadata"]["name"].replace("/", "_") if ns == self.namespace and name == self.pod_name: this_pod = pod break else: raise KeyError("Pod not found: " + self.pod_name) logger.debug("Got pod data %s", this_pod) return this_pod def _get_api_path(self, path): """Get a resource from the API specified API path. e.g. _get_api_path('pods') :param path: The relative path to an API endpoint. :return: A list of JSON API objects :rtype list """ logger.info("Getting API Resource: %s from KUBE_API_ROOT: %s", path, KUBE_API_ROOT) session = requests.Session() if self.auth_token: session.headers.update({"Authorization": "Bearer " + self.auth_token}) response = session.get(KUBE_API_ROOT + path, verify=False) response_body = response.text # The response body contains some metadata, and the pods themselves # under the 'items' key. return json.loads(response_body)["items"] def _api_root_secure(self): """ Checks whether the KUBE_API_ROOT is secure or insecure. If not an http or https address, exit. :return: Boolean: True if secure. False if insecure """ if KUBE_API_ROOT[:5] == "https": return True elif KUBE_API_ROOT[:5] == "http:": return False else: logger.error( "KUBE_API_ROOT is not set correctly (%s). " "Please specify as http or https address. Exiting", KUBE_API_ROOT, ) sys.exit(1) def _generate_rules(self, pod): """ Generate Rules takes human readable policy strings in annotations and returns a libcalico Rules object. :return tuple of inbound_rules, outbound_rules """ # Create allow and per-namespace rules for later use. allow = Rule(action="allow") allow_ns = Rule(action="allow", src_tag=self._get_namespace_tag(pod)) annotations = self._get_metadata(pod, "annotations") logger.debug("Found annotations: %s", annotations) if self.namespace == "kube-system": # Pods in the kube-system namespace must be accessible by all # other pods for services like DNS to work. logger.info("Pod is in kube-system namespace - allow all") inbound_rules = [allow] outbound_rules = [allow] elif annotations and POLICY_ANNOTATION_KEY in annotations: # If policy annotations are defined, use them to generate rules. logger.info("Generating advanced security policy from annotations") rules = annotations[POLICY_ANNOTATION_KEY] inbound_rules = [] outbound_rules = [allow] for rule in rules.split(";"): parsed_rule = self.policy_parser.parse_line(rule) inbound_rules.append(parsed_rule) else: # If not annotations are defined, just use the configured # default policy. if DEFAULT_POLICY == "ns_isolation": # Isolate on namespace boundaries by default. logger.debug("Default policy is namespace isolation") inbound_rules = [allow_ns] outbound_rules = [allow] else: # Allow all traffic by default. logger.debug("Default policy is allow all") inbound_rules = [allow] outbound_rules = [allow] return Rules(id=self.profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules) def _apply_tags(self, pod): """ In addition to Calico's default pod_name tag, Add tags generated from Kubernetes Labels and Namespace Ex. labels: {key:value} -> tags+= namespace_key_value Add tag for namespace Ex. namespace: default -> tags+= namespace_default :param self.profile_name: The name of the Calico profile. :type self.profile_name: string :param pod: The config dictionary for the pod being created. :type pod: dict :return: """ logger.info("Applying tags") try: profile = self._datastore_client.get_profile(self.profile_name) except KeyError: logger.error("Could not apply tags. Profile %s could not be " "found. Exiting", self.profile_name) sys.exit(1) # Grab namespace and create a tag if it exists. ns_tag = self._get_namespace_tag(pod) logger.info("Adding tag %s", ns_tag) profile.tags.add(ns_tag) # Create tags from labels labels = self._get_metadata(pod, "labels") if labels: for k, v in labels.iteritems(): tag = self._label_to_tag(k, v) logger.info("Adding tag %s", tag) profile.tags.add(tag) else: logger.warning("No labels found in pod %s", pod) self._datastore_client.profile_update_tags(profile) logger.info("Finished applying tags.") def _get_metadata(self, pod, key): """ Return Metadata[key] Object given Pod Returns None if no key-value exists """ try: val = pod["metadata"][key] except (KeyError, TypeError): logger.warning("No %s found in pod %s", key, pod) return None logger.debug("Pod %s: %s", key, val) return val def _escape_chars(self, unescaped_string): """ Calico can only handle 3 special chars, '_.-' This function uses regex sub to replace SCs with '_' """ # Character to replace symbols swap_char = "_" # If swap_char is in string, double it. unescaped_string = re.sub(swap_char, "%s%s" % (swap_char, swap_char), unescaped_string) # Substitute all invalid chars. return re.sub("[^a-zA-Z0-9\.\_\-]", swap_char, unescaped_string) def _get_namespace_tag(self, pod): """ Pull metadata for namespace and return it and a generated NS tag """ assert self.namespace ns_tag = self._escape_chars("%s=%s" % ("namespace", self.namespace)) return ns_tag def _label_to_tag(self, label_key, label_value): """ Labels are key-value pairs, tags are single strings. This function handles that translation. 1) Concatenate key and value with '=' 2) Prepend a pod's namespace followed by '/' if available 3) Escape the generated string so it is Calico compatible :param label_key: key to label :param label_value: value to given key for a label :param namespace: Namespace string, input None if not available :param types: (self, string, string, string) :return single string tag :rtype string """ tag = "%s=%s" % (label_key, label_value) tag = "%s/%s" % (self.namespace, tag) tag = self._escape_chars(tag) return tag
class IpamPlugin(object): def __init__(self, environment, ipam_config): self.command = None """ Command indicating which action to take - one of "ADD" or "DEL". """ self.container_id = None """ Identifier for the container for which we are performing IPAM. """ self.datastore_client = IPAMClient() """ Access to the datastore client. Relies on ETCD_AUTHORITY environment variable being set by the calling plugin. """ self.assign_ipv4 = ipam_config.get(ASSIGN_IPV4_KEY, "true") == "true" """ Whether we should auto assign an IPv4 address - defaults to True. """ self.assign_ipv6 = ipam_config.get(ASSIGN_IPV6_KEY, "false") == "true" """ Whether we should auto assign an IPv6 address - defaults to False. """ cni_args = parse_cni_args(environment.get(CNI_ARGS_ENV, "")) self.k8s_pod_name = cni_args.get(K8S_POD_NAME) self.k8s_namespace = cni_args.get(K8S_POD_NAMESPACE) """ Only populated when running under Kubernetes. """ """ Only populated if the user requests a specific IP address. """ self.ip = cni_args.get(CNI_ARGS_IP) # Validate the given environment and set fields. self._parse_environment(environment) if self.k8s_namespace and self.k8s_pod_name: self.workload_id = "%s.%s" % (self.k8s_namespace, self.k8s_pod_name) else: self.workload_id = self.container_id """ Identifier for the workload. In Kubernetes, this is the pod's namespace and name. Otherwise, this is the container ID. """ def execute(self): """ Assigns or releases IP addresses for the specified workload. May raise CniError. :return: CNI ipam dictionary for ADD, None for DEL. """ if self.command == "ADD": if self.ip: # The user has specifically requested an IP (v4) address. _log.info("User assigned address: %s for workload: %s", self.ip, self.workload_id) ipv4 = self._assign_existing_address() ipv6 = None else: # Auto-assign an IP address for this workload. _log.info("Assigning address to workload: %s", self.workload_id) ipv4, ipv6 = self._assign_address(handle_id=self.workload_id) # Build response dictionary. response = {} if ipv4: response["ip4"] = {"ip": str(ipv4.cidr)} if ipv6: response["ip6"] = {"ip": str(ipv6.cidr)} # Output the response and exit successfully. _log.debug("Returning response: %s", response) return json.dumps(response) else: # Release IPs using the workload_id as the handle. _log.info("Releasing addresses on workload: %s", self.workload_id) try: self.datastore_client.release_ip_by_handle( handle_id=self.workload_id ) except KeyError: _log.warning("No IPs assigned to workload: %s", self.workload_id) try: # Try to release using the container ID. Earlier # versions of IPAM used the container ID alone # as the handle. This allows us to be back-compatible. _log.debug("Try release using container ID") self.datastore_client.release_ip_by_handle( handle_id=self.container_id ) except KeyError: _log.debug("No IPs assigned to container: %s", self.container_id) def _assign_address(self, handle_id): """ Automatically assigns an IPv4 and an IPv6 address. :return: A tuple of (IPv4, IPv6) address assigned. """ ipv4 = None ipv6 = None # Determine which addresses to assign. num_v4 = 1 if self.assign_ipv4 else 0 num_v6 = 1 if self.assign_ipv6 else 0 _log.info("Assigning %s IPv4 and %s IPv6 addresses", num_v4, num_v6) try: ipv4_addrs, ipv6_addrs = self.datastore_client.auto_assign_ips( num_v4=num_v4, num_v6=num_v6, handle_id=handle_id, attributes=None, ) _log.debug("Allocated ip4s: %s, ip6s: %s", ipv4_addrs, ipv6_addrs) except RuntimeError as e: _log.error("Cannot auto assign IPAddress: %s", e.message) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) else: if num_v4: try: ipv4 = IPNetwork(ipv4_addrs[0]) except IndexError: _log.error("No IPv4 address returned, exiting") raise CniError(ERR_CODE_GENERIC, msg="No IPv4 addresses available in pool") if num_v6: try: ipv6 = IPNetwork(ipv6_addrs[0]) except IndexError: _log.error("No IPv6 address returned, exiting") raise CniError(ERR_CODE_GENERIC, msg="No IPv6 addresses available in pool") _log.info("Assigned IPv4: %s, IPv6: %s", ipv4, ipv6) return ipv4, ipv6 def _assign_existing_address(self): """ Assign an address chosen by the user. IPv4 only. :return: The IPNetwork if successfully assigned. """ try: address = IPAddress(self.ip, version=4) except AddrFormatError as e: _log.error("User requested IP: %s is invalid", self.ip) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) try: self.datastore_client.assign_ip(address, self.workload_id, None) except AlreadyAssignedError as e: _log.error("User requested IP: %s is already assigned", self.ip) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) except RuntimeError as e: _log.error("Cannot assign IPAddress: %s", e.message) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) return IPNetwork(address) def _parse_environment(self, env): """ Validates the plugins environment and extracts the required values. """ _log.debug('Environment: %s', json.dumps(env, indent=2)) # Check the given environment contains the required fields. try: self.command = env[CNI_COMMAND_ENV] except KeyError: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="CNI_COMMAND not found in environment") else: # If the command is present, make sure it is valid. if self.command not in [CNI_CMD_ADD, CNI_CMD_DELETE]: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="Invalid command '%s'" % self.command) try: self.container_id = env[CNI_CONTAINERID_ENV] except KeyError: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="CNI_CONTAINERID not found in environment")
class NetworkPlugin(object): def __init__(self, config): self.pod_name = None self.profile_name = None self.namespace = None self.docker_id = None self.policy_parser = None # Get configuration from the given dictionary. logger.debug("Plugin running with config: %s", config) self.auth_token = config[KUBE_AUTH_TOKEN_VAR] self.api_root = config[KUBE_API_ROOT_VAR] self.calico_ipam = config[CALICO_IPAM_VAR].lower() self.default_policy = config[DEFAULT_POLICY_VAR].lower() self._datastore_client = IPAMClient() self._docker_client = Client( version=DOCKER_VERSION, base_url=os.getenv("DOCKER_HOST", "unix://var/run/docker.sock")) def create(self, namespace, pod_name, docker_id): """"Create a pod.""" self.pod_name = pod_name self.docker_id = docker_id self.namespace = namespace self.policy_parser = PolicyParser(self.namespace) self.profile_name = "%s_%s_%s" % (self.namespace, self.pod_name, str(self.docker_id)[:12]) logger.info('Configuring pod %s/%s (container_id %s)', self.namespace, self.pod_name, self.docker_id) try: endpoint = self._configure_interface() logger.info("Created Calico endpoint: %s", endpoint.endpoint_id) self._configure_profile(endpoint) except CalledProcessError as e: logger.error('Error code %d creating pod networking: %s\n%s', e.returncode, e.output, e) sys.exit(1) logger.info("Successfully configured networking for pod %s/%s", self.namespace, self.pod_name) def delete(self, namespace, pod_name, docker_id): """Cleanup after a pod.""" self.pod_name = pod_name self.docker_id = docker_id self.namespace = namespace self.profile_name = "%s_%s_%s" % (self.namespace, self.pod_name, str(self.docker_id)[:12]) logger.info('Removing networking from pod %s/%s (container id %s)', self.namespace, self.pod_name, self.docker_id) # Remove the profile for the workload. self._container_remove() # Delete profile try: logger.info("Deleting Calico profile: %s", self.profile_name) self._datastore_client.remove_profile(self.profile_name) except: logger.warning("Cannot remove profile %s; Profile cannot " "be found.", self.profile_name) logger.info("Successfully removed networking for pod %s/%s", self.namespace, self.pod_name) def status(self, namespace, pod_name, docker_id): self.namespace = namespace self.pod_name = pod_name self.docker_id = docker_id if self._uses_host_networking(self.docker_id): # We don't perform networking / assign IP addresses for pods running # in the host namespace, and so we can't return a status update # for them. logger.debug("Ignoring status for pod %s/%s in host namespace", self.namespace, self.pod_name) sys.exit(0) # Find the endpoint try: endpoint = self._datastore_client.get_endpoint( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID, workload_id=self.docker_id ) except KeyError: logger.error("Error in status: No endpoint for pod: %s/%s", self.namespace, self.pod_name) sys.exit(1) # Retrieve IPAddress from the attached IPNetworks on the endpoint # Since Kubernetes only supports ipv4, we'll only check for ipv4 nets if not endpoint.ipv4_nets: logger.error("Error in status: No IPs attached to pod %s/%s", self.namespace, self.pod_name) sys.exit(1) else: ip_net = list(endpoint.ipv4_nets) if len(ip_net) is not 1: logger.warning("There is more than one IPNetwork attached " "to pod %s/%s", self.namespace, self.pod_name) ip = ip_net[0].ip logger.debug("Retrieved pod IP Address: %s", ip) json_dict = { "apiVersion": "v1beta1", "kind": "PodNetworkStatus", "ip": str(ip) } logger.debug("Writing status to stdout: \n%s", json.dumps(json_dict)) print(json.dumps(json_dict)) def _configure_profile(self, endpoint): """ Configure the calico profile on the given endpoint. """ pod = self._get_pod_config() logger.info('Configuring Pod Profile: %s', self.profile_name) if self._datastore_client.profile_exists(self.profile_name): logger.error("Profile with name %s already exists, exiting.", self.profile_name) sys.exit(1) else: rules = self._generate_rules(pod) self._datastore_client.create_profile(self.profile_name, rules) # Add tags to the profile. self._apply_tags(pod) # Set the profile for the workload. logger.info('Setting profile %s on endpoint %s', self.profile_name, endpoint.endpoint_id) self._datastore_client.set_profiles_on_endpoint( [self.profile_name], endpoint_id=endpoint.endpoint_id ) logger.debug('Finished configuring profile.') def _configure_interface(self): """Configure the Calico interface for a pod. This involves the following steps: 1) Determine the IP that docker assigned to the interface inside the container 2) Delete the docker-assigned veth pair that's attached to the docker bridge 3) Create a new calico veth pair, using the docker-assigned IP for the end in the container's namespace 4) Assign the node's IP to the host end of the veth pair (required for compatibility with kube-proxy REDIRECT iptables rules). """ # Set up parameters container_pid = self._get_container_pid(self.docker_id) interface = 'eth0' self._delete_docker_interface() logger.info('Configuring Calico network interface') ep = self._container_add(container_pid, interface) # Log our container's interfaces after adding the new interface. _log_interfaces(container_pid) interface_name = generate_cali_interface_name(IF_PREFIX, ep.endpoint_id) node_ip = self._get_node_ip() logger.debug('Adding node IP %s to host-side veth %s', node_ip, interface_name) # This is slightly tricky. Since the kube-proxy sometimes # programs REDIRECT iptables rules, we MUST have an IP on the host end # of the caliXXX veth pairs. This is because the REDIRECT rule # rewrites the destination ip/port of traffic from a pod to a service # VIP. The destination port is rewriten to an arbitrary high-numbered # port, and the destination IP is rewritten to one of the IPs allocated # to the interface. This fails if the interface doesn't have an IP, # so we allocate an IP which is already allocated to the node. We set # the subnet to /32 so that the routing table is not affected; # no traffic for the node_ip's subnet will use the /32 route. check_call(['ip', 'addr', 'add', node_ip + '/32', 'dev', interface_name]) logger.info('Finished configuring network interface') return ep def _container_add(self, pid, interface): """ Add a container (on this host) to Calico networking with the given IP. """ # Check if the container already exists. If it does, exit. try: _ = self._datastore_client.get_endpoint( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID, workload_id=self.docker_id ) except KeyError: # Calico doesn't know about this container. Continue. pass else: logger.error("This container has already been configured " "with Calico Networking.") sys.exit(1) # Obtain information from Docker Client and validate container state self._validate_container_state(self.docker_id) ip_list = [self._assign_container_ip()] # Create Endpoint object try: logger.info("Creating endpoint with IPs %s", ip_list) ep = self._datastore_client.create_endpoint(HOSTNAME, ORCHESTRATOR_ID, self.docker_id, ip_list) except (AddrFormatError, KeyError): logger.exception("Failed to create endpoint with IPs %s. " "Unassigning IP address, then exiting.", ip_list) self._datastore_client.release_ips(set(ip_list)) sys.exit(1) # Create the veth, move into the container namespace, add the IP and # set up the default routes. logger.debug("Creating the veth with namespace pid %s on interface " "name %s", pid, interface) ep.mac = ep.provision_veth(netns.PidNamespace(pid), interface) logger.debug("Setting mac address %s to endpoint %s", ep.mac, ep.name) self._datastore_client.set_endpoint(ep) # Let the caller know what endpoint was created. return ep def _assign_container_ip(self): """ Assign IPAddress either with the assigned docker IPAddress or utilize calico IPAM. True indicates to utilize Calico's auto_assign IPAM policy. False indicate to utilize the docker assigned IPAddress :return IPAddress which has been assigned """ def _assign(ip): """ Local helper function for assigning an IP and checking for errors. Only used when operating with CALICO_IPAM=false """ try: logger.info("Attempting to assign IP %s", ip) self._datastore_client.assign_ip(ip, str(self.docker_id), None) except (ValueError, RuntimeError): logger.exception("Failed to assign IPAddress %s", ip) sys.exit(1) if self.calico_ipam == 'true': logger.info("Using Calico IPAM") try: ipv4s, ipv6s = self._datastore_client.auto_assign_ips(1, 0, self.docker_id, None) ip = ipv4s[0] logger.debug("IPAM assigned ipv4=%s; ipv6= %s", ipv4s, ipv6s) except RuntimeError as err: logger.error("Cannot auto assign IPAddress: %s", err.message) sys.exit(1) else: logger.info("Using docker assigned IP address") ip = self._read_docker_ip() try: # Try to assign the address using the _assign helper function. _assign(ip) except AlreadyAssignedError: # If the Docker IP is already assigned, it is most likely that # an endpoint has been removed under our feet. When using # Docker IPAM, treat Docker as the source of # truth for IP addresses. logger.warning("Docker IP is already assigned, finding " "stale endpoint") self._datastore_client.release_ips(set([ip])) # Clean up whatever existing endpoint has this IP address. # We can improve this later by making use of IPAM attributes # in libcalico to store the endpoint ID. For now, # just loop through endpoints on this host. endpoints = self._datastore_client.get_endpoints( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID) for ep in endpoints: if IPNetwork(ip) in ep.ipv4_nets: logger.warning("Deleting stale endpoint %s", ep.endpoint_id) for profile_id in ep.profile_ids: self._datastore_client.remove_profile(profile_id) self._datastore_client.remove_endpoint(ep) break # Assign the IP address to the new endpoint. It shouldn't # be assigned, since we just unassigned it. logger.warning("Retry Docker assigned IP") _assign(ip) return ip def _container_remove(self): """ Remove the indicated container on this host from Calico networking """ # Find the endpoint ID. We need this to find any ACL rules try: endpoint = self._datastore_client.get_endpoint( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID, workload_id=self.docker_id ) except KeyError: logger.exception("Container %s doesn't contain any endpoints", self.docker_id) sys.exit(1) # Remove any IP address assignments that this endpoint has ip_set = set() for net in endpoint.ipv4_nets | endpoint.ipv6_nets: ip_set.add(net.ip) logger.info("Removing IP addresses %s from endpoint %s", ip_set, endpoint.name) self._datastore_client.release_ips(ip_set) # Remove the veth interface from endpoint logger.info("Removing veth interfaces") try: netns.remove_veth(endpoint.name) except CalledProcessError: logger.exception("Could not remove veth interface from " "endpoint %s", endpoint.name) # Remove the container/endpoint from the datastore. try: self._datastore_client.remove_workload( HOSTNAME, ORCHESTRATOR_ID, self.docker_id) except KeyError: logger.exception("Failed to remove workload.") logger.info("Removed Calico endpoint %s", endpoint.endpoint_id) def _validate_container_state(self, container_name): info = self._get_container_info(container_name) # Check the container is actually running. if not info["State"]["Running"]: logger.error("The container is not currently running.") sys.exit(1) # We can't set up Calico if the container shares the host namespace. if info["HostConfig"]["NetworkMode"] == "host": logger.warning("Calico cannot network container because " "it is running NetworkMode = host.") sys.exit(0) def _uses_host_networking(self, container_name): """ Returns true if the given container is running in the host network namespace. """ info = self._get_container_info(container_name) return info["HostConfig"]["NetworkMode"] == "host" def _get_container_info(self, container_name): try: info = self._docker_client.inspect_container(container_name) except APIError as e: if e.response.status_code == 404: logger.error("Container %s was not found. Exiting.", container_name) else: logger.error(e.message) sys.exit(1) return info def _get_container_pid(self, container_name): return self._get_container_info(container_name)["State"]["Pid"] def _read_docker_ip(self): """Get the IP for the pod's infra container.""" container_info = self._get_container_info(self.docker_id) ip = container_info["NetworkSettings"]["IPAddress"] logger.info('Docker-assigned IP is %s', ip) return IPAddress(ip) def _get_node_ip(self): """ Determine the IP for the host node. """ # Compile list of addresses on network, return the first entry. # Try IPv4 and IPv6. addrs = get_host_ips(version=4) or get_host_ips(version=6) try: addr = addrs[0] logger.debug("Node's IP address: %s", addr) return addr except IndexError: # If both get_host_ips return empty lists, print message and exit. logger.exception('No Valid IP Address Found for Host - cannot ' 'configure networking for pod %s. ' 'Exiting', self.pod_name) sys.exit(1) def _delete_docker_interface(self): """Delete the existing veth connecting to the docker bridge.""" logger.debug('Deleting docker interface eth0') # Get the PID of the container. pid = str(self._get_container_pid(self.docker_id)) logger.debug('Container %s running with PID %s', self.docker_id, pid) # Set up a link to the container's netns. logger.debug("Linking to container's netns") logger.debug(check_output(['mkdir', '-p', '/var/run/netns'])) netns_file = '/var/run/netns/' + pid if not os.path.isfile(netns_file): logger.debug(check_output(['ln', '-s', '/proc/' + pid + '/ns/net', netns_file])) # Log our container's interfaces before making any changes. _log_interfaces(pid) # Reach into the netns and delete the docker-allocated interface. logger.debug(check_output(['ip', 'netns', 'exec', pid, 'ip', 'link', 'del', 'eth0'])) # Log our container's interfaces after making our changes. _log_interfaces(pid) # Clean up after ourselves (don't want to leak netns files) logger.debug(check_output(['rm', netns_file])) def _get_pod_ports(self, pod): """ Get the list of ports on containers in the Pod. :return list ports: the Kubernetes ContainerPort objects for the pod. """ ports = [] for container in pod['spec']['containers']: try: more_ports = container['ports'] logger.info('Adding ports %s', more_ports) ports.extend(more_ports) except KeyError: pass return ports def _get_pod_config(self): """Get the pod resource from the API. API Path depends on the api_root, namespace, and pod_name :return: JSON object containing the pod spec """ with requests.Session() as session: if self._api_root_secure() and self.auth_token: logger.debug('Updating header with Token %s', self.auth_token) session.headers.update({'Authorization': 'Bearer ' + self.auth_token}) path = os.path.join(self.api_root, 'namespaces/%s/pods/%s' % (self.namespace, self.pod_name)) try: logger.debug('Querying API for Pod: %s', path) response = session.get(path, verify=False) except BaseException: logger.exception("Exception hitting Kubernetes API") sys.exit(1) else: if response.status_code != 200: logger.error("Response from API returned %s Error:\n%s", response.status_code, response.text) sys.exit(response.status_code) logger.debug("API Response: %s", response.text) pod = json.loads(response.text) return pod def _api_root_secure(self): """ Checks whether the Kubernetes api root is secure or insecure. If not an http or https address, exit. :return: Boolean: True if secure. False if insecure """ if (self.api_root[:5] == 'https'): logger.debug('Using Secure API access.') return True elif (self.api_root[:5] == 'http:'): logger.debug('Using Insecure API access.') return False else: logger.error('%s is not set correctly (%s). ' 'Please specify as http or https address. Exiting', KUBE_API_ROOT_VAR, self.api_root) sys.exit(1) def _generate_rules(self, pod): """ Generate Rules takes human readable policy strings in annotations and returns a libcalico Rules object. :return tuple of inbound_rules, outbound_rules """ # Create allow and per-namespace rules for later use. allow = Rule(action="allow") allow_ns = Rule(action="allow", src_tag=self._get_namespace_tag(pod)) annotations = self._get_metadata(pod, "annotations") logger.debug("Found annotations: %s", annotations) if self.namespace == "kube-system" : # Pods in the kube-system namespace must be accessible by all # other pods for services like DNS to work. logger.info("Pod is in kube-system namespace - allow all") inbound_rules = [allow] outbound_rules = [allow] elif annotations and POLICY_ANNOTATION_KEY in annotations: # If policy annotations are defined, use them to generate rules. logger.info("Generating advanced security policy from annotations") rules = annotations[POLICY_ANNOTATION_KEY] inbound_rules = [] outbound_rules = [allow] for rule in rules.split(";"): parsed_rule = self.policy_parser.parse_line(rule) inbound_rules.append(parsed_rule) else: # If not annotations are defined, just use the configured # default policy. if self.default_policy == 'ns_isolation': # Isolate on namespace boundaries by default. logger.debug("Default policy is namespace isolation") inbound_rules = [allow_ns] outbound_rules = [allow] else: # Allow all traffic by default. logger.debug("Default policy is allow all") inbound_rules = [allow] outbound_rules = [allow] return Rules(id=self.profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules) def _apply_tags(self, pod): """ In addition to Calico's default pod_name tag, Add tags generated from Kubernetes Labels and Namespace Ex. labels: {key:value} -> tags+= namespace_key_value Add tag for namespace Ex. namespace: default -> tags+= namespace_default :param self.profile_name: The name of the Calico profile. :type self.profile_name: string :param pod: The config dictionary for the pod being created. :type pod: dict :return: """ logger.debug('Applying tags') try: profile = self._datastore_client.get_profile(self.profile_name) except KeyError: logger.error('Could not apply tags. Profile %s could not be ' 'found. Exiting', self.profile_name) sys.exit(1) # Grab namespace and create a tag if it exists. ns_tag = self._get_namespace_tag(pod) logger.debug('Adding tag %s', ns_tag) profile.tags.add(ns_tag) # Create tags from labels labels = self._get_metadata(pod, 'labels') if labels: for k, v in labels.iteritems(): tag = self._label_to_tag(k, v) logger.debug('Adding tag %s', tag) profile.tags.add(tag) self._datastore_client.profile_update_tags(profile) logger.debug('Finished applying tags.') def _get_metadata(self, pod, key): """ Return Metadata[key] Object given Pod Returns None if no key-value exists """ try: val = pod['metadata'][key] except (KeyError, TypeError): logger.debug('No %s found in pod %s', key, pod) return None logger.debug("Pod %s: %s", key, val) return val def _escape_chars(self, unescaped_string): """ Calico can only handle 3 special chars, '_.-' This function uses regex sub to replace SCs with '_' """ # Character to replace symbols swap_char = '_' # If swap_char is in string, double it. unescaped_string = re.sub(swap_char, "%s%s" % (swap_char, swap_char), unescaped_string) # Substitute all invalid chars. return re.sub('[^a-zA-Z0-9\.\_\-]', swap_char, unescaped_string) def _get_namespace_tag(self, pod): """ Pull metadata for namespace and return it and a generated NS tag """ assert self.namespace ns_tag = self._escape_chars('%s=%s' % ('namespace', self.namespace)) return ns_tag def _label_to_tag(self, label_key, label_value): """ Labels are key-value pairs, tags are single strings. This function handles that translation. 1) Concatenate key and value with '=' 2) Prepend a pod's namespace followed by '/' if available 3) Escape the generated string so it is Calico compatible :param label_key: key to label :param label_value: value to given key for a label :param namespace: Namespace string, input None if not available :param types: (self, string, string, string) :return single string tag :rtype string """ tag = '%s=%s' % (label_key, label_value) tag = '%s/%s' % (self.namespace, tag) tag = self._escape_chars(tag) return tag
class IpamPlugin(object): def __init__(self, environment, ipam_config): self.command = None """ Command indicating which action to take - one of "ADD" or "DEL". """ self.container_id = None """ Identifier for the container for which we are performing IPAM. """ self.datastore_client = IPAMClient() """ Access to the datastore client. Relies on ETCD_AUTHORITY environment variable being set by the calling plugin. """ self.assign_ipv4 = ipam_config.get(ASSIGN_IPV4_KEY, "true") == "true" """ Whether we should auto assign an IPv4 address - defaults to True. """ self.assign_ipv6 = ipam_config.get(ASSIGN_IPV6_KEY, "false") == "true" """ Whether we should auto assign an IPv6 address - defaults to False. """ cni_args = parse_cni_args(environment.get(CNI_ARGS_ENV, "")) self.k8s_pod_name = cni_args.get(K8S_POD_NAME) self.k8s_namespace = cni_args.get(K8S_POD_NAMESPACE) """ Only populated when running under Kubernetes. """ """ Only populated if the user requests a specific IP address. """ self.ip = cni_args.get(CNI_ARGS_IP) # Validate the given environment and set fields. self._parse_environment(environment) if self.k8s_namespace and self.k8s_pod_name: self.workload_id = "%s.%s" % (self.k8s_namespace, self.k8s_pod_name) else: self.workload_id = self.container_id """ Identifier for the workload. In Kubernetes, this is the pod's namespace and name. Otherwise, this is the container ID. """ def execute(self): """ Assigns or releases IP addresses for the specified workload. May raise CniError. :return: CNI ipam dictionary for ADD, None for DEL. """ if self.command == "ADD": if self.ip: # The user has specifically requested an IP (v4) address. _log.info("User assigned address: %s for workload: %s", self.ip, self.workload_id) ipv4 = self._assign_existing_address() ipv6 = None else: # Auto-assign an IP address for this workload. _log.info("Assigning address to workload: %s", self.workload_id) ipv4, ipv6 = self._assign_address(handle_id=self.workload_id) # Build response dictionary. response = {} if ipv4: response["ip4"] = {"ip": str(ipv4.cidr)} if ipv6: response["ip6"] = {"ip": str(ipv6.cidr)} # Output the response and exit successfully. _log.debug("Returning response: %s", response) return json.dumps(response) else: # Release IPs using the workload_id as the handle. _log.info("Releasing addresses on workload: %s", self.workload_id) try: self.datastore_client.release_ip_by_handle( handle_id=self.workload_id) except KeyError: _log.warning("No IPs assigned to workload: %s", self.workload_id) try: # Try to release using the container ID. Earlier # versions of IPAM used the container ID alone # as the handle. This allows us to be back-compatible. _log.debug("Try release using container ID") self.datastore_client.release_ip_by_handle( handle_id=self.container_id) except KeyError: _log.debug("No IPs assigned to container: %s", self.container_id) def _assign_address(self, handle_id): """ Automatically assigns an IPv4 and an IPv6 address. :return: A tuple of (IPv4, IPv6) address assigned. """ ipv4 = None ipv6 = None # Determine which addresses to assign. num_v4 = 1 if self.assign_ipv4 else 0 num_v6 = 1 if self.assign_ipv6 else 0 _log.info("Assigning %s IPv4 and %s IPv6 addresses", num_v4, num_v6) try: ipv4_addrs, ipv6_addrs = self.datastore_client.auto_assign_ips( num_v4=num_v4, num_v6=num_v6, handle_id=handle_id, attributes=None, ) _log.debug("Allocated ip4s: %s, ip6s: %s", ipv4_addrs, ipv6_addrs) except RuntimeError as e: _log.error("Cannot auto assign IPAddress: %s", e.message) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) else: if num_v4: try: ipv4 = IPNetwork(ipv4_addrs[0]) except IndexError: _log.error("No IPv4 address returned, exiting") raise CniError(ERR_CODE_GENERIC, msg="No IPv4 addresses available in pool") if num_v6: try: ipv6 = IPNetwork(ipv6_addrs[0]) except IndexError: _log.error("No IPv6 address returned, exiting") raise CniError(ERR_CODE_GENERIC, msg="No IPv6 addresses available in pool") _log.info("Assigned IPv4: %s, IPv6: %s", ipv4, ipv6) return ipv4, ipv6 def _assign_existing_address(self): """ Assign an address chosen by the user. IPv4 only. :return: The IPNetwork if successfully assigned. """ try: address = IPAddress(self.ip, version=4) except AddrFormatError as e: _log.error("User requested IP: %s is invalid", self.ip) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) try: self.datastore_client.assign_ip(address, self.workload_id, None) except AlreadyAssignedError as e: _log.error("User requested IP: %s is already assigned", self.ip) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) except RuntimeError as e: _log.error("Cannot assign IPAddress: %s", e.message) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) return IPNetwork(address) def _parse_environment(self, env): """ Validates the plugins environment and extracts the required values. """ _log.debug('Environment: %s', json.dumps(env, indent=2)) # Check the given environment contains the required fields. try: self.command = env[CNI_COMMAND_ENV] except KeyError: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="CNI_COMMAND not found in environment") else: # If the command is present, make sure it is valid. if self.command not in [CNI_CMD_ADD, CNI_CMD_DELETE]: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="Invalid command '%s'" % self.command) try: self.container_id = env[CNI_CONTAINERID_ENV] except KeyError: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="CNI_CONTAINERID not found in environment")
class IpamPlugin(object): def __init__(self, environment, ipam_config): self.command = None """ Command indicating which action to take - one of "ADD" or "DEL". """ self.container_id = None """ Identifier for the container for which we are performing IPAM. """ self.datastore_client = IPAMClient() """ Access to the datastore client. Relies on ETCD_AUTHORITY environment variable being set by the calling plugin. """ self.assign_ipv4 = ipam_config.get(ASSIGN_IPV4_KEY, "true") == "true" """ Whether we should assign an IPv4 address - defaults to True. """ self.assign_ipv6 = ipam_config.get(ASSIGN_IPV6_KEY, "false") == "true" """ Whether we should assign an IPv6 address - defaults to False. """ # Validate the given environment and set fields. self._parse_environment(environment) def execute(self): """ Assigns or releases IP addresses for the specified container. May raise CniError. :return: CNI ipam dictionary for ADD, None for DEL. """ if self.command == "ADD": # Assign an IP address for this container. _log.info("Assigning address to container %s", self.container_id) ipv4, ipv6 = self._assign_address(handle_id=self.container_id) # Build response dictionary. response = {} if ipv4: response["ip4"] = {"ip": str(ipv4.cidr)} if ipv6: response["ip6"] = {"ip": str(ipv6.cidr)} # Output the response and exit successfully. _log.debug("Returning response: %s", response) return json.dumps(response) else: # Release IPs using the container_id as the handle. _log.info("Releasing addresses on container %s", self.container_id) try: self.datastore_client.release_ip_by_handle(handle_id=self.container_id) except KeyError: _log.warning("No IPs assigned to container_id %s", self.container_id) def _assign_address(self, handle_id): """ Assigns an IPv4 and an IPv6 address. :return: A tuple of (IPv4, IPv6) address assigned. """ ipv4 = None ipv6 = None # Determine which addresses to assign. num_v4 = 1 if self.assign_ipv4 else 0 num_v6 = 1 if self.assign_ipv6 else 0 _log.info("Assigning %s IPv4 and %s IPv6 addresses", num_v4, num_v6) try: ipv4_addrs, ipv6_addrs = self.datastore_client.auto_assign_ips( num_v4=num_v4, num_v6=num_v6, handle_id=handle_id, attributes=None, ) _log.debug("Allocated ip4s: %s, ip6s: %s", ipv4_addrs, ipv6_addrs) except RuntimeError as e: _log.error("Cannot auto assign IPAddress: %s", e.message) raise CniError(ERR_CODE_GENERIC, msg="Failed to assign IP address", details=e.message) else: if num_v4: try: ipv4 = IPNetwork(ipv4_addrs[0]) except IndexError: _log.error("No IPv4 address returned, exiting") raise CniError(ERR_CODE_GENERIC, msg="No IPv4 addresses available in pool") if num_v6: try: ipv6 = IPNetwork(ipv6_addrs[0]) except IndexError: _log.error("No IPv6 address returned, exiting") raise CniError(ERR_CODE_GENERIC, msg="No IPv6 addresses available in pool") _log.info("Assigned IPv4: %s, IPv6: %s", ipv4, ipv6) return ipv4, ipv6 def _parse_environment(self, env): """ Validates the plugins environment and extracts the required values. """ _log.debug('Environment: %s', json.dumps(env, indent=2)) # Check the given environment contains the required fields. try: self.command = env[CNI_COMMAND_ENV] except KeyError: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="CNI_COMMAND not found in environment") else: # If the command is present, make sure it is valid. if self.command not in [CNI_CMD_ADD, CNI_CMD_DELETE]: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="Invalid command '%s'" % self.command) try: self.container_id = env[CNI_CONTAINERID_ENV] except KeyError: raise CniError(ERR_CODE_GENERIC, msg="Invalid arguments", details="CNI_CONTAINERID not found in environment")
class NetworkPlugin(object): def __init__(self, config): self.pod_name = None self.namespace = None self.docker_id = None self.policy_parser = None # Get configuration from the given dictionary. logger.debug("Plugin running with config: %s", config) self.auth_token = config[KUBE_AUTH_TOKEN_VAR] self.api_root = config[KUBE_API_ROOT_VAR] self.calico_ipam = config[CALICO_IPAM_VAR].lower() self.default_policy = config[DEFAULT_POLICY_VAR].lower() self._datastore_client = IPAMClient() self._docker_client = Client( version=DOCKER_VERSION, base_url=os.getenv("DOCKER_HOST", "unix://var/run/docker.sock")) def create(self, namespace, pod_name, docker_id): """"Create a pod.""" self.pod_name = pod_name self.docker_id = docker_id self.namespace = namespace self.policy_parser = PolicyParser(self.namespace) logger.info('Configuring pod %s/%s (container_id %s)', self.namespace, self.pod_name, self.docker_id) # Obtain information from Docker Client and validate container state. # If validation fails, the plugin will exit. self._validate_container_state(self.docker_id) try: endpoint = self._configure_interface() logger.info("Created Calico endpoint: %s", endpoint.endpoint_id) self._configure_profile(endpoint) except BaseException: # Check to see if an endpoint has been created. If so, # we need to tear down any state we may have created. logger.exception("Error networking pod - cleaning up") try: self.delete(namespace, pod_name, docker_id) except BaseException: # Catch all errors tearing down the pod - this # is best-effort. logger.exception("Error cleaning up pod") # We've torn down, exit. logger.info("Done cleaning up") sys.exit(1) else: logger.info("Successfully configured networking for pod %s/%s", self.namespace, self.pod_name) def delete(self, namespace, pod_name, docker_id): """Cleanup after a pod.""" self.pod_name = pod_name self.docker_id = docker_id self.namespace = namespace logger.info('Removing networking from pod %s/%s (container id %s)', self.namespace, self.pod_name, self.docker_id) # Get the Calico endpoint. endpoint = self._get_endpoint() if not endpoint: # If there is no endpoint, we don't have any work to do - return. logger.debug("No Calico endpoint for pod, no work to do.") sys.exit(0) logger.debug("Pod has Calico endpoint %s", endpoint.endpoint_id) # Remove the endpoint and its configuration. self._remove_endpoint(endpoint) # Remove any profiles. self._remove_profiles(endpoint) logger.info("Successfully removed networking for pod %s/%s", self.namespace, self.pod_name) def _remove_profiles(self, endpoint): """ If the pod has any profiles, delete them unless they are the default profile or have other members. We can do this because we create a profile per pod. Profile management for namespaces and service based policy will need to be done differently. """ logger.debug("Endpoint has profiles: %s", endpoint.profile_ids) for profile_id in endpoint.profile_ids: if profile_id == DEFAULT_PROFILE_NAME: logger.debug("Do not delete default profile") continue if self._datastore_client.get_profile_members(profile_id): logger.info("Profile %s still has members, do not delete", profile_id) continue try: logger.info("Deleting Calico profile: %s", profile_id) self._datastore_client.remove_profile(profile_id) except KeyError: logger.warning("Cannot remove profile %s; Profile cannot " "be found.", profile_id) def _get_endpoint(self): """ Attempts to get and return the Calico endpoint for this pod. If no endpoint exists, returns None. """ logger.debug("Looking up endpoint for workload %s", self.docker_id) try: endpoint = self._datastore_client.get_endpoint( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID, workload_id=self.docker_id ) except KeyError: logger.debug("No Calico endpoint exists for pod %s/%s", self.namespace, self.pod_name) endpoint = None return endpoint def status(self, namespace, pod_name, docker_id): self.namespace = namespace self.pod_name = pod_name self.docker_id = docker_id if self._uses_host_networking(self.docker_id): # We don't perform networking / assign IP addresses for pods running # in the host namespace, and so we can't return a status update # for them. logger.debug("Ignoring status for pod %s/%s in host namespace", self.namespace, self.pod_name) sys.exit(0) # Get the endpoint. endpoint = self._get_endpoint() if not endpoint: # If the endpoint doesn't exist, we cannot provide a status. logger.debug("No endpoint for pod - cannot provide status") sys.exit(1) # Retrieve IPAddress from the attached IPNetworks on the endpoint # Since Kubernetes only supports ipv4, we'll only check for ipv4 nets if not endpoint.ipv4_nets: logger.error("Error in status: No IPs attached to pod %s/%s", self.namespace, self.pod_name) sys.exit(1) else: ip_net = list(endpoint.ipv4_nets) if len(ip_net) is not 1: logger.warning("There is more than one IPNetwork attached " "to pod %s/%s", self.namespace, self.pod_name) ip = ip_net[0].ip logger.debug("Retrieved pod IP Address: %s", ip) json_dict = { "apiVersion": "v1beta1", "kind": "PodNetworkStatus", "ip": str(ip) } logger.debug("Writing status to stdout: \n%s", json.dumps(json_dict)) print(json.dumps(json_dict)) def _configure_profile(self, endpoint): """ Configure the calico profile on the given endpoint. If DEFAULT_POLICY != none, we create a new profile for this pod and populate it with the correct rules. Otherwise, the pod gets assigned to the default profile. """ if self.default_policy != POLICY_NONE: # Determine the name for this profile. profile_name = "%s_%s_%s" % (self.namespace, self.pod_name, str(self.docker_id)[:12]) # Create a new profile for this pod. logger.info("Creating profile '%s'", profile_name) # Retrieve pod labels, etc. pod = self._get_pod_config() if self._datastore_client.profile_exists(profile_name): # In profile-per-pod, we don't ever expect duplicate profiles. logger.error("Profile '%s' already exists.", profile_name) sys.exit(1) else: # The profile doesn't exist - generate the rule set for this # profile, and create it. rules = self._generate_rules(pod, profile_name) self._datastore_client.create_profile(profile_name, rules) # Add tags to the profile based on labels. self._apply_tags(pod, profile_name) # Set the profile for the workload. logger.info("Setting profile '%s' on endpoint %s", profile_name, endpoint.endpoint_id) self._datastore_client.set_profiles_on_endpoint( [profile_name], endpoint_id=endpoint.endpoint_id ) logger.debug('Finished configuring profile.') else: # Policy is disabled - add this pod to the default profile. if not self._datastore_client.profile_exists(DEFAULT_PROFILE_NAME): # If the default profile doesn't exist, create it. logger.info("Creating profile '%s'", DEFAULT_PROFILE_NAME) allow = Rule(action="allow") rules = Rules(id=DEFAULT_PROFILE_NAME, inbound_rules=[allow], outbound_rules=[allow]) self._datastore_client.create_profile(DEFAULT_PROFILE_NAME, rules) # Set the default profile on this pod's Calico endpoint. logger.info("Setting profile '%s' on endpoint %s", DEFAULT_PROFILE_NAME, endpoint.endpoint_id) self._datastore_client.set_profiles_on_endpoint( [DEFAULT_PROFILE_NAME], endpoint_id=endpoint.endpoint_id ) def _configure_interface(self): """Configure the Calico interface for a pod. This involves the following steps: 1) Determine the IP that docker assigned to the interface inside the container 2) Delete the docker-assigned veth pair that's attached to the docker bridge 3) Create a new calico veth pair, using the docker-assigned IP for the end in the container's namespace 4) Assign the node's IP to the host end of the veth pair (required for compatibility with kube-proxy REDIRECT iptables rules). """ # Get container's PID. container_pid = self._get_container_pid(self.docker_id) self._delete_docker_interface() logger.info('Configuring Calico network interface') ep = self._create_endpoint(container_pid) # Log our container's interfaces after adding the new interface. _log_interfaces(container_pid) interface_name = generate_cali_interface_name(IF_PREFIX, ep.endpoint_id) node_ip = self._get_node_ip() logger.debug('Adding node IP %s to host-side veth %s', node_ip, interface_name) # This is slightly tricky. Since the kube-proxy sometimes # programs REDIRECT iptables rules, we MUST have an IP on the host end # of the caliXXX veth pairs. This is because the REDIRECT rule # rewrites the destination ip/port of traffic from a pod to a service # VIP. The destination port is rewriten to an arbitrary high-numbered # port, and the destination IP is rewritten to one of the IPs allocated # to the interface. This fails if the interface doesn't have an IP, # so we allocate an IP which is already allocated to the node. We set # the subnet to /32 so that the routing table is not affected; # no traffic for the node_ip's subnet will use the /32 route. check_call(['ip', 'addr', 'add', node_ip + '/32', 'dev', interface_name]) logger.info('Finished configuring network interface') return ep def _create_endpoint(self, pid): """ Creates a Calico endpoint for this pod. - Assigns an IP address for this pod. - Creates the Calico endpoint object in the datastore. - Provisions the Calico veth pair for this pod. Returns the created libcalico Endpoint object. """ # Check if the container already exists. If it does, exit. if self._get_endpoint(): logger.error("This container has already been configured " "with Calico Networking.") sys.exit(1) ip_list = [self._assign_container_ip()] # Create Endpoint object try: logger.info("Creating Calico endpoint with IPs %s", ip_list) ep = self._datastore_client.create_endpoint(HOSTNAME, ORCHESTRATOR_ID, self.docker_id, ip_list) except (AddrFormatError, KeyError): # We failed to create the endpoint - we must release the IPs # that we assigned for this endpoint or else they will leak. logger.exception("Failed to create endpoint with IPs %s. " "Unassigning IP address, then exiting.", ip_list) self._datastore_client.release_ips(set(ip_list)) sys.exit(1) # Create the veth, move into the container namespace, add the IP and # set up the default routes. logger.debug("Creating eth0 in network namespace with pid=%s", pid) ep.mac = ep.provision_veth(netns.PidNamespace(pid), "eth0") logger.debug("Setting mac address %s on endpoint %s", ep.mac, ep.name) self._datastore_client.set_endpoint(ep) # Let the caller know what endpoint was created. return ep def _assign_container_ip(self): """ Assign IPAddress either with the assigned docker IPAddress or utilize calico IPAM. True indicates to utilize Calico's auto_assign IPAM policy. False indicate to utilize the docker assigned IPAddress :return IPAddress which has been assigned """ def _assign(ip): """ Local helper function for assigning an IP and checking for errors. Only used when operating with CALICO_IPAM=false """ try: logger.info("Attempting to assign IP %s", ip) self._datastore_client.assign_ip(ip, str(self.docker_id), None) except (ValueError, RuntimeError): logger.exception("Failed to assign IPAddress %s", ip) sys.exit(1) if self.calico_ipam == 'true': logger.info("Using Calico IPAM") try: ipv4s, ipv6s = self._datastore_client.auto_assign_ips(1, 0, self.docker_id, None) logger.debug("IPAM assigned ipv4=%s; ipv6= %s", ipv4s, ipv6s) except RuntimeError as err: logger.error("Cannot auto assign IP address: %s", err.message) sys.exit(1) # Check to make sure an address was assigned. if not ipv4s: logger.error("Unable to assign an IP address - exiting") sys.exit(1) # Get the address. ip = ipv4s[0] else: logger.info("Using docker assigned IP address") ip = self._read_docker_ip() try: # Try to assign the address using the _assign helper function. _assign(ip) except AlreadyAssignedError: # If the Docker IP is already assigned, it is most likely that # an endpoint has been removed under our feet. When using # Docker IPAM, treat Docker as the source of # truth for IP addresses. logger.warning("Docker IP is already assigned, finding " "stale endpoint") self._datastore_client.release_ips(set([ip])) # Clean up whatever existing endpoint has this IP address. # We can improve this later by making use of IPAM attributes # in libcalico to store the endpoint ID. For now, # just loop through endpoints on this host. endpoints = self._datastore_client.get_endpoints( hostname=HOSTNAME, orchestrator_id=ORCHESTRATOR_ID) for ep in endpoints: if IPNetwork(ip) in ep.ipv4_nets: logger.warning("Deleting stale endpoint %s", ep.endpoint_id) for profile_id in ep.profile_ids: self._datastore_client.remove_profile(profile_id) self._datastore_client.remove_endpoint(ep) break # Assign the IP address to the new endpoint. It shouldn't # be assigned, since we just unassigned it. logger.warning("Retry Docker assigned IP") _assign(ip) return ip def _remove_endpoint(self, endpoint): """ Remove the provided endpoint on this host from Calico networking. - Removes any IP address assignments. - Removes the veth interface for this endpoint. - Removes the endpoint object from etcd. """ # Remove any IP address assignments that this endpoint has ip_set = set() for net in endpoint.ipv4_nets | endpoint.ipv6_nets: ip_set.add(net.ip) logger.info("Removing IP addresses %s from endpoint %s", ip_set, endpoint.name) self._datastore_client.release_ips(ip_set) # Remove the veth interface from endpoint logger.info("Removing veth interfaces") try: netns.remove_veth(endpoint.name) except CalledProcessError: logger.exception("Could not remove veth interface from " "endpoint %s", endpoint.name) # Remove endpoint from the datastore. try: self._datastore_client.remove_workload( HOSTNAME, ORCHESTRATOR_ID, self.docker_id) except KeyError: logger.exception("Error removing workload.") logger.info("Removed Calico endpoint %s", endpoint.endpoint_id) def _validate_container_state(self, container_name): info = self._get_container_info(container_name) # Check the container is actually running. if not info["State"]["Running"]: logger.error("The infra container is not currently running.") sys.exit(1) # We can't set up Calico if the container shares the host namespace. if info["HostConfig"]["NetworkMode"] == "host": logger.info("Skipping pod %s/%s because " "it is running NetworkMode = host.", self.namespace, self.pod_name) sys.exit(0) def _uses_host_networking(self, container_name): """ Returns true if the given container is running in the host network namespace. """ info = self._get_container_info(container_name) return info["HostConfig"]["NetworkMode"] == "host" def _get_container_info(self, container_name): try: info = self._docker_client.inspect_container(container_name) except APIError as e: if e.response.status_code == 404: logger.error("Container %s was not found. Exiting.", container_name) else: logger.error(e.message) sys.exit(1) return info def _get_container_pid(self, container_name): return self._get_container_info(container_name)["State"]["Pid"] def _read_docker_ip(self): """Get the IP for the pod's infra container.""" container_info = self._get_container_info(self.docker_id) ip = container_info["NetworkSettings"]["IPAddress"] logger.info('Docker-assigned IP is %s', ip) return IPAddress(ip) def _get_node_ip(self): """ Determine the IP for the host node. """ # Compile list of addresses on network, return the first entry. # Try IPv4 and IPv6. addrs = get_host_ips(version=4) or get_host_ips(version=6) try: addr = addrs[0] logger.debug("Node's IP address: %s", addr) return addr except IndexError: # If both get_host_ips return empty lists, print message and exit. logger.exception('No Valid IP Address Found for Host - cannot ' 'configure networking for pod %s. ' 'Exiting', self.pod_name) sys.exit(1) def _delete_docker_interface(self): """Delete the existing veth connecting to the docker bridge.""" logger.debug('Deleting docker interface eth0') # Get the PID of the container. pid = str(self._get_container_pid(self.docker_id)) logger.debug('Container %s running with PID %s', self.docker_id, pid) # Set up a link to the container's netns. logger.debug("Linking to container's netns") logger.debug(check_output(['mkdir', '-p', '/var/run/netns'])) netns_file = '/var/run/netns/' + pid if not os.path.isfile(netns_file): logger.debug(check_output(['ln', '-s', '/proc/' + pid + '/ns/net', netns_file])) # Log our container's interfaces before making any changes. _log_interfaces(pid) # Reach into the netns and delete the docker-allocated interface. logger.debug(check_output(['ip', 'netns', 'exec', pid, 'ip', 'link', 'del', 'eth0'])) # Log our container's interfaces after making our changes. _log_interfaces(pid) # Clean up after ourselves (don't want to leak netns files) logger.debug(check_output(['rm', netns_file])) def _get_pod_ports(self, pod): """ Get the list of ports on containers in the Pod. :return list ports: the Kubernetes ContainerPort objects for the pod. """ ports = [] for container in pod['spec']['containers']: try: more_ports = container['ports'] logger.info('Adding ports %s', more_ports) ports.extend(more_ports) except KeyError: pass return ports def _get_pod_config(self): """Get the pod resource from the API. API Path depends on the api_root, namespace, and pod_name :return: JSON object containing the pod spec """ with requests.Session() as session: if self._api_root_secure() and self.auth_token: logger.debug('Updating header with Token %s', self.auth_token) session.headers.update({'Authorization': 'Bearer ' + self.auth_token}) path = os.path.join(self.api_root, 'namespaces/%s/pods/%s' % (self.namespace, self.pod_name)) try: logger.debug('Querying API for Pod: %s', path) response = session.get(path, verify=False) except BaseException: logger.exception("Exception hitting Kubernetes API") sys.exit(1) else: if response.status_code != 200: logger.error("Response from API returned %s Error:\n%s", response.status_code, response.text) sys.exit(response.status_code) logger.debug("API Response: %s", response.text) pod = json.loads(response.text) return pod def _api_root_secure(self): """ Checks whether the Kubernetes api root is secure or insecure. If not an http or https address, exit. :return: Boolean: True if secure. False if insecure """ if (self.api_root[:5] == 'https'): logger.debug('Using Secure API access.') return True elif (self.api_root[:5] == 'http:'): logger.debug('Using Insecure API access.') return False else: logger.error('%s is not set correctly (%s). ' 'Please specify as http or https address. Exiting', KUBE_API_ROOT_VAR, self.api_root) sys.exit(1) def _generate_rules(self, pod, profile_name): """ Generate Rules takes human readable policy strings in annotations and returns a libcalico Rules object. :return Pycalico Rules object. """ # Create allow and per-namespace rules for later use. allow = Rule(action="allow") allow_ns = Rule(action="allow", src_tag=self._get_namespace_tag(pod)) annotations = self._get_metadata(pod, "annotations") logger.debug("Found annotations: %s", annotations) if self.namespace == "kube-system" : # Pods in the kube-system namespace must be accessible by all # other pods for services like DNS to work. logger.info("Pod is in kube-system namespace - allow all") inbound_rules = [allow] outbound_rules = [allow] elif annotations and POLICY_ANNOTATION_KEY in annotations: # If policy annotations are defined, use them to generate rules. logger.info("Generating advanced security policy from annotations") rules = annotations[POLICY_ANNOTATION_KEY] inbound_rules = [] outbound_rules = [allow] for rule in rules.split(";"): parsed_rule = self.policy_parser.parse_line(rule) inbound_rules.append(parsed_rule) else: # If not annotations are defined, just use the configured # default policy. if self.default_policy == POLICY_NS_ISOLATION: # Isolate on namespace boundaries by default. logger.debug("Default policy is namespace isolation") inbound_rules = [allow_ns] outbound_rules = [allow] elif self.default_policy == POLICY_ALLOW: # Allow all traffic by default. logger.debug("Default policy is allow all") inbound_rules = [allow] outbound_rules = [allow] return Rules(id=profile_name, inbound_rules=inbound_rules, outbound_rules=outbound_rules) def _apply_tags(self, pod, profile_name): """ In addition to Calico's default pod_name tag, Add tags generated from Kubernetes Labels and Namespace Ex. labels: {key:value} -> tags+= namespace_key_value Add tag for namespace Ex. namespace: default -> tags+= namespace_default :param profile_name: The name of the Calico profile. :type profile_name: string :param pod: The config dictionary for the pod being created. :type pod: dict :return: """ logger.debug("Applying tags to profile '%s'", profile_name) try: profile = self._datastore_client.get_profile(profile_name) except KeyError: logger.error('Could not apply tags. Profile %s could not be ' 'found. Exiting', profile_name) sys.exit(1) # Grab namespace and create a tag if it exists. ns_tag = self._get_namespace_tag(pod) logger.debug('Generated tag: %s', ns_tag) profile.tags.add(ns_tag) # Create tags from labels labels = self._get_metadata(pod, 'labels') if labels: for k, v in labels.iteritems(): tag = self._label_to_tag(k, v) logger.debug('Generated tag: %s', tag) profile.tags.add(tag) # Apply tags to profile. self._datastore_client.profile_update_tags(profile) logger.debug('Finished applying tags.') def _get_metadata(self, pod, key): """ Return Metadata[key] Object given Pod Returns None if no key-value exists """ try: val = pod['metadata'][key] except (KeyError, TypeError): logger.debug('No %s found in pod %s', key, pod) return None logger.debug("Pod %s: %s", key, val) return val def _escape_chars(self, unescaped_string): """ Calico can only handle 3 special chars, '_.-' This function uses regex sub to replace SCs with '_' """ # Character to replace symbols swap_char = '_' # If swap_char is in string, double it. unescaped_string = re.sub(swap_char, "%s%s" % (swap_char, swap_char), unescaped_string) # Substitute all invalid chars. return re.sub('[^a-zA-Z0-9\.\_\-]', swap_char, unescaped_string) def _get_namespace_tag(self, pod): """ Pull metadata for namespace and return it and a generated NS tag """ assert self.namespace ns_tag = self._escape_chars('%s=%s' % ('namespace', self.namespace)) return ns_tag def _label_to_tag(self, label_key, label_value): """ Labels are key-value pairs, tags are single strings. This function handles that translation. 1) Concatenate key and value with '=' 2) Prepend a pod's namespace followed by '/' if available 3) Escape the generated string so it is Calico compatible :param label_key: key to label :param label_value: value to given key for a label :param namespace: Namespace string, input None if not available :param types: (self, string, string, string) :return single string tag :rtype string """ tag = '%s=%s' % (label_key, label_value) tag = '%s/%s' % (self.namespace, tag) tag = self._escape_chars(tag) return tag
class IpamPlugin(object): def __init__(self, config, environment): self.config = config """ Dictionary representation of the config passed via stdin. """ self.env = environment """ Current environment (e.g os.environ) """ self.command = None """ Command indicating which action to take - one of "ADD" or "DEL". """ self.container_id = None """ Identifier for the container for which we are performing IPAM. """ self.datastore_client = IPAMClient() """ Access to the datastore client. Relies on ETCD_AUTHORITY environment variable being set by the calling plugin. """ # Validate the given config and environment and set fields # using the given config and environment. self._parse_config() def execute(self): """ Assigns or releases IP addresses for the specified container. :return: """ if self.command == "ADD": # Assign an IP address for this container. _log.info("Assigning address to container %s", self.container_id) ipv4, ipv6 = self._assign_address(handle_id=self.container_id) # Output the response and exit successfully. print json.dumps({"ip4": {"ip": str(ipv4.cidr),}, "ip6": {"ip": str(ipv6.cidr),},}) else: # Release any IP addresses for this container. assert self.command == CNI_CMD_DELETE, \ "Invalid command: %s" % self.command # Release IPs using the container_id as the handle. _log.info("Releasing addresses on container %s", self.container_id) try: self.datastore_client.release_ip_by_handle(handle_id=self.container_id) except KeyError: _log.warning("No IPs assigned to container_id %s", self.container_id) def _assign_address(self, handle_id, ipv4_pool=None, ipv6_pool=None): """ Assigns an IPv4 and IPv6 address within the given pools. If no pools are given, they will be automatically chosen. :return: A tuple of (IPv4, IPv6) address assigned. """ ipv4 = IPNetwork("0.0.0.0") ipv6 = IPNetwork("::") pool = (ipv4_pool, ipv6_pool) try: ipv4_addrs, ipv6_addrs = self.datastore_client.auto_assign_ips( num_v4=1, num_v6=1, handle_id=handle_id, attributes=None, pool=pool ) _log.debug("Allocated ip4s: %s, ip6s: %s", ipv4_addrs, ipv6_addrs) except RuntimeError as err: _log.error("Cannot auto assign IPAddress: %s", err.message) _exit_on_error(code=ERR_CODE_FAILED_ASSIGNMENT, message="Failed to assign IP address", details=err.message) else: try: ipv4 = ipv4_addrs[0] except IndexError: _log.error("No IPv4 address returned, exiting") _exit_on_error(code=ERR_CODE_FAILED_ASSIGNMENT, message="No IPv4 addresses available in pool", details = "") try: ipv6 = ipv6_addrs[0] except IndexError: _log.error("No IPv6 address returned, exiting") _exit_on_error(code=ERR_CODE_FAILED_ASSIGNMENT, message="No IPv6 addresses available in pool", details="") _log.info("Assigned IPv4: %s, IPv6: %s", ipv4, ipv6) return IPNetwork(ipv4), IPNetwork(ipv6) def _parse_config(self): """ Validates that the plugins environment and given config contain the required values. """ _log.debug('Environment: %s', json.dumps(self.env, indent=2)) _log.debug('Network config: %s', json.dumps(self.config, indent=2)) # Check the given environment contains the required fields. try: self.command = self.env[CNI_COMMAND_ENV] except KeyError: _exit_on_error(code=ERR_CODE_INVALID_ARGUMENT, message="Arguments Invalid", details="CNI_COMMAND not found in environment") else: # If the command is present, make sure it is valid. if self.command not in [CNI_CMD_ADD, CNI_CMD_DELETE]: _exit_on_error(code=ERR_CODE_INVALID_ARGUMENT, message="Arguments Invalid", details="Invalid command '%s'" % self.command) try: self.container_id = self.env[CNI_CONTAINERID_ENV] except KeyError: _exit_on_error(code=ERR_CODE_INVALID_ARGUMENT, message="Arguments Invalid", details="CNI_CONTAINERID not found in environment")