def _translate_profile_for_datastore(self): """ Translate the profile for updating in the datastore. This also checks the updated tags reference real Docker networks when the profile is for a Docker network. :return: The translated profile. """ # If this is not a Docker network profile then just return the profile # unchanged. if not self.is_docker_network_profile(): return self.profile # This is a Docker network profile, so translate from names to IDs. try: profile = self._translate_profile(self.profile, self._get_id_from_name) except NoDockerNetwork as e: # A tag in the profile does not reference a valid Docker network. print_paragraph("You are referencing a Docker network (%s) that " "does not exist. Create the network first and " "then update this profile rule to reference " "it." % e.name) sys.exit(1) else: return profile
def validate_arguments(arguments): """ Validate argument values: <IP> Arguments not validated: <CONTAINER> <INTERFACE> :param arguments: Docopt processed arguments """ # Validate IP requested_ip = arguments.get("<IP>") if not (requested_ip is None or validate_ip(requested_ip, 4) or validate_ip(requested_ip, 6) or validate_cidr(requested_ip) or requested_ip.lower() in ('ipv4', 'ipv6')): print_paragraph("Invalid IP address specified. Argument must be a " "valid IP or CIDR.") sys.exit(1) # Validate POOL if requested_ip is not None and '/' in requested_ip: requested_pool = IPNetwork(requested_ip) try: client.get_ip_pool_config(requested_pool.version, requested_pool) except KeyError: print_paragraph("Invalid CIDR specified for desired pool. " "No pool found for {0}.".format(requested_pool)) sys.exit(1) # Validate PROFILE endpoint.validate_arguments(arguments)
def warn_if_hostname_conflict(ip): """ Prints a warning message if it seems like an existing host is already running calico using this hostname. :param ip: User-provided IP address to start this node with. :return: Nothing """ # If there's already a calico-node container on this host, they're probably # just re-running node to update one of the ip addresses, so skip.. try: current_ipv4, _ = client.get_host_bgp_ips(hostname) except KeyError: # No other machine has registered configuration under this hostname. # This must be a new host with a unique hostname, which is the # expected behavior. pass else: if current_ipv4 != "" and current_ipv4 != ip: hostname_warning = "WARNING: Hostname '%s' is already in use " \ "with IP address %s. Calico requires each " \ "compute host to have a unique hostname. " \ "If this is your first time running " \ "'calicoctl node' on this host, ensure " \ "that another host is not already using the " \ "same hostname." % (hostname, ip) try: if not docker_client.containers(filters={'name': 'calico-node'}): # Calico-node isn't running on this host. # There may be another host using this hostname. print_paragraph(hostname_warning) except IOError: # Couldn't connect to docker to confirm calico-node is running. print_paragraph(hostname_warning)
def validate_arguments(arguments): """ Validate argument values: <PROFILES> Arguments not validated: <HOSTNAME> <ORCHESTRATOR_ID> <WORKLOAD_ID> <ENDPOINT_ID> :param arguments: Docopt processed arguments """ # Validate Profiles profile_ok = True profiles = arguments.get("<PROFILES>") if profiles is not None: for profile in profiles: profile_ok = validate_characters(profile) if not profile_ok: print_paragraph("Profile names must be < 40 character long and can " "only contain numbers, letters, dots, dashes and " "underscores.") sys.exit(1)
def warn_if_hostname_conflict(ip): """ Prints a warning message if it seems like an existing host is already running calico using this hostname. :param ip: User-provided IP address to start this node with. :return: Nothing """ # If there's already a calico-node container on this host, they're probably # just re-running node to update one of the ip addresses, so skip.. if len(docker_client.containers(filters={'name': 'calico-node'})) == 0: # Otherwise, check if another host with the same hostname # is already configured try: current_ipv4, _ = client.get_host_bgp_ips(hostname) except KeyError: # No other machine has registered configuration under this hostname. # This must be a new host with a unique hostname, which is the # expected behavior. pass else: if current_ipv4 != "" and current_ipv4 != ip: print_paragraph("WARNING: Hostname '%s' is already in use " "with IP address %s. Calico requires each compute host to " "have a unique hostname. If this is your first time " "running 'calicoctl node' on this host, ensure that " \ "another host is not already using the " \ "same hostname." % (hostname, ip))
def warn_if_hostname_conflict(ip): """ Prints a warning message if it seems like an existing host is already running calico using this hostname. :param ip: User-provided IP address to start this node with. :return: Nothing """ # If there's already a calico-node container on this host, they're probably # just re-running node to update one of the ip addresses, so skip.. try: current_ipv4, _ = client.get_host_bgp_ips(hostname) except KeyError: # No other machine has registered configuration under this hostname. # This must be a new host with a unique hostname, which is the # expected behavior. pass else: if current_ipv4 != "" and current_ipv4 != ip: hostname_warning = "WARNING: Hostname '%s' is already in use " \ "with IP address %s. Calico requires each " \ "compute host to have a unique hostname. " \ "If this is your first time running " \ "'calicoctl node' on this host, ensure " \ "that another host is not already using the " \ "same hostname." % (hostname, ip) try: if not docker_client.containers( filters={'name': 'calico-node'}): # Calico-node isn't running on this host. # There may be another host using this hostname. print_paragraph(hostname_warning) except IOError: # Couldn't connect to docker to confirm calico-node is running. print_paragraph(hostname_warning)
def get_ip_and_pool(ip): if ip.lower() in ("ipv4", "ipv6"): if '4' in ip: result = assign_any(1, 0) ip = result[0][0] else: result = assign_any(0, 1) ip = result[1][0] pool = get_pool_or_exit(ip) elif ip is not None and '/' in ip: pool = IPPool(ip) if IPNetwork(ip).version == 4: result = assign_any(1, 0, pool=(pool, None)) ip = result[0][0] else: result = assign_any(0, 1, pool=(None, pool)) ip = result[1][0] else: # Check the IP is in the allocation pool. If it isn't, BIRD won't # export it. ip = IPAddress(ip) pool = get_pool_or_exit(ip) # Assign the IP try: client.assign_ip(ip, None, {}) except AlreadyAssignedError: print_paragraph("IP address is already assigned in pool " "%s." % pool) sys.exit(1) return (ip, pool)
def get_ip_and_pool(ip): if ip.lower() in ("ipv4", "ipv6"): if ip[-1] == '4': result = assign_any(1, 0) ip = result[0][0] else: result = assign_any(0, 1) ip = result[1][0] pool = get_pool_or_exit(ip) elif ip is not None and '/' in ip: pool = IPPool(ip) if IPNetwork(ip).version == 4: result = assign_any(1, 0, pool=(pool, None)) ip = result[0][0] else: result = assign_any(0, 1, pool=(None, pool)) ip = result[1][0] else: # Check the IP is in the allocation pool. If it isn't, BIRD won't # export it. ip = IPAddress(ip) pool = get_pool_or_exit(ip) # Assign the IP try: client.assign_ip(ip, None, {}) except AlreadyAssignedError: print_paragraph("IP address is already assigned in pool " "%s." % pool) sys.exit(1) return (ip, pool)
def _assign_host_tunnel_addr(ipip_pools): """ Claims an IPIP-enabled IP address from the first pool with some space. Stores the result in the host's config as its tunnel address. Exits on failure. :param ipip_pools: List of IPPools to search for an address. """ for ipip_pool in ipip_pools: v4_addrs, _ = client.auto_assign_ips(num_v4=1, num_v6=0, handle_id=None, attributes={}, pool=(ipip_pool, None), host=hostname) if v4_addrs: # Successfully allocated an address. Unpack the list. [ip_addr] = v4_addrs break else: # Failed to allocate an address, the pools must be full. print_paragraph( "Failed to allocate an IP address from an IPIP-enabled pool " "for the host's IPIP tunnel device. Pools are likely " "exhausted.") sys.exit(1) # If we get here, we've allocated a new IPIP-enabled address, # Store it in etcd so that Felix will pick it up. client.set_per_host_config(hostname, "IpInIpTunnelAddr", str(ip_addr))
def _assign_host_tunnel_addr(ipip_pools): """ Claims an IPIP-enabled IP address from the first pool with some space. Stores the result in the host's config as its tunnel address. Exits on failure. :param ipip_pools: List of IPPools to search for an address. """ for ipip_pool in ipip_pools: v4_addrs, _ = client.auto_assign_ips( num_v4=1, num_v6=0, handle_id=None, attributes={}, pool=(ipip_pool, None), host=hostname ) if v4_addrs: # Successfully allocated an address. Unpack the list. [ip_addr] = v4_addrs break else: # Failed to allocate an address, the pools must be full. print_paragraph( "Failed to allocate an IP address from an IPIP-enabled pool " "for the host's IPIP tunnel device. Pools are likely " "exhausted." ) sys.exit(1) # If we get here, we've allocated a new IPIP-enabled address, # Store it in etcd so that Felix will pick it up. client.set_per_host_config(hostname, "IpInIpTunnelAddr", str(ip_addr))
def validate_arguments(arguments): """ Validate argument values: <PROFILES> Arguments not validated: <HOSTNAME> <ORCHESTRATOR_ID> <WORKLOAD_ID> <ENDPOINT_ID> :param arguments: Docopt processed arguments """ # List of valid characters that Felix permits valid_chars = '[a-zA-Z0-9_\.\-]' # Validate Profiles profile_ok = True profiles = arguments.get("<PROFILES>") if profiles is not None: for profile in profiles: profile_ok = validate_characters(profile) if not profile_ok: print_paragraph("Profile names must be < 40 character long and can " "only contain numbers, letters, dots, dashes and " "underscores.") sys.exit(1)
def endpoint_profile_show(hostname, orchestrator_id, workload_id, endpoint_id): """ List the profiles assigned to a particular endpoint. :param hostname: The hostname. :param orchestrator_id: The orchestrator ID. :param workload_id: The workload ID. :param endpoint_id: The endpoint ID. :return: None """ try: endpoint = client.get_endpoint(hostname=hostname, orchestrator_id=orchestrator_id, workload_id=workload_id, endpoint_id=endpoint_id) except MultipleEndpointsMatch: print "Failed to list profiles in endpoint.\n" print_paragraph("More than 1 endpoint matches the provided " "criteria. Please provide additional parameters to " "refine the search.") sys.exit(1) except KeyError: print "Failed to list profiles in endpoint.\n" print_paragraph("Endpoint %s is unknown to Calico.\n" % endpoint_id) sys.exit(1) if endpoint.profile_ids: x = PrettyTable(["Name"], sortby="Name") for name in endpoint.profile_ids: x.add_row([name]) print str(x) + "\n" else: print "Endpoint has no profiles associated with it."
def print_container_not_in_calico_msg(container_name): """ Display message indicating that the supplied container is not known to Calico. :param container_name: The container name. :return: None. """ print_paragraph("Container %s is unknown to Calico." % container_name) print_paragraph("Use `calicoctl container add` to add the container " "to the Calico network.")
def upload_temp_diags(diags_path): # TODO: Rewrite into httplib print_paragraph("Uploading file. Available for 14 days from the URL " "printed when the upload completes") curl_cmd = ["curl", "--upload-file", diags_path, os.path.join("https://transfer.sh", os.path.basename(diags_path))] curl_process = subprocess.Popen(curl_cmd) curl_process.communicate() curl_process.wait() print("Done")
def get_ip_and_pool(ip_or_pool): """ Return the IP address and associated pool to use when creating a container. :param ip_or_pool: (string) The requested IP address, pool CIDR, or special values "ipv4" and "ipv6". When an IP address is specified, that IP address is used. When a pool CIDR is specified, an IP address is allocated from that pool. IF either "ipv6" or "ipv6" are specified, then an IP address is allocated from an arbitrary IPv4 or IPv6 pool respectively. :return: A tuple of (IPAddress, IPPool) """ if ip_or_pool.lower() in ("ipv4", "ipv6"): # Requested to auto-assign an IP address if ip_or_pool[-1] == '4': result = assign_any(1, 0) ip = result[0][0] else: result = assign_any(0, 1) ip = result[1][0] # We can't get the pool until we have the IP address. If we fail to # get the pool (very small timing window if another client deletes the # pool) we must release the IP. try: pool = get_pool_or_exit(ip) except SystemExit: client.release_ips({ip}) raise elif ip_or_pool is not None and '/' in ip_or_pool: # Requested to assign an IP address from a specified pool. cidr = IPNetwork(ip_or_pool) pool = get_pool_by_cidr_or_exit(cidr) if cidr.version == 4: result = assign_any(1, 0, pool=(pool, None)) ip = result[0][0] else: result = assign_any(0, 1, pool=(None, pool)) ip = result[1][0] else: # Requested a specific IP address to use. ip = IPAddress(ip_or_pool) pool = get_pool_or_exit(ip) # Assign the IP try: client.assign_ip(ip, None, {}) except AlreadyAssignedError: print_paragraph("IP address is already assigned in pool " "%s." % pool) sys.exit(1) return (ip, pool)
def container(arguments): """ Main dispatcher for container commands. Calls the corresponding helper function. :param arguments: A dictionary of arguments already processed through this file's docstring with docopt :return: None """ validate_arguments(arguments) ip_version = get_container_ipv_from_arguments(arguments) try: if arguments.get("endpoint-id"): container_endpoint_id_show(arguments.get("<CONTAINER>")) elif arguments.get("ip"): if arguments.get("add"): container_ip_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), ip_version, arguments.get("--interface")) elif arguments.get("remove"): container_ip_remove(arguments.get("<CONTAINER>"), arguments.get("<IP>"), ip_version, arguments.get("--interface")) else: if arguments.get("add"): container_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) if arguments.get("remove"): container_remove(arguments.get("<CONTAINER>")) else: if arguments.get("add"): container_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) if arguments.get("remove"): container_remove(arguments.get("<CONTAINER>")) except ConnectionError as e: # We hit a "Permission denied error (13) if the docker daemon # does not have sudo permissions if permission_denied_error(e): print_paragraph("Unable to run command. Re-run the " "command as root, or configure the docker " "group to run with sudo privileges (see docker " "installation guide for details).") else: print_paragraph("Unable to run docker commands. Is the docker " "daemon running?") sys.exit(1)
def endpoint_profile_append(hostname, orchestrator_id, workload_id, endpoint_id, profile_names): """ Append a list of profiles to the container endpoint profile list. The hostname, orchestrator_id, workload_id, and endpoint_id are all optional parameters used to determine which endpoint is being targeted. The more parameters used, the faster the endpoint query will be. The query must be specific enough to match a single endpoint or it will fail. The profile list may not contain duplicate entries, invalid profile names, or profiles that are already in the containers list. If no profile is specified, nothing happens. :param hostname: The host that the targeted endpoint resides on. :param orchestrator_id: The orchestrator that created the targeted endpoint. :param workload_id: The ID of workload which created the targeted endpoint. :param endpoint_id: The endpoint ID of the targeted endpoint. :param profile_names: The list of profile names to add to the targeted endpoint. :return: None """ # Validate the profile list. validate_profile_list(profile_names) if not profile_names: print_paragraph("No profile specified.") else: try: client.append_profiles_to_endpoint(profile_names, hostname=hostname, orchestrator_id=orchestrator_id, workload_id=workload_id, endpoint_id=endpoint_id) print_paragraph("Profile(s) %s appended." % (", ".join(profile_names))) except KeyError: print "Failed to append profiles to endpoint.\n" print_paragraph("Endpoint %s is unknown to Calico.\n" % endpoint_id) sys.exit(1) except ProfileAlreadyInEndpoint, e: print_paragraph("Profile %s is already in endpoint " "profile list" % e.profile_name) except MultipleEndpointsMatch: print_paragraph( "More than 1 endpoint matches the provided criteria. " "Please provide additional parameters to refine the " "search.") sys.exit(1)
def endpoint_profile_append(hostname, orchestrator_id, workload_id, endpoint_id, profile_names): """ Append a list of profiles to the container endpoint profile list. The hostname, orchestrator_id, workload_id, and endpoint_id are all optional parameters used to determine which endpoint is being targeted. The more parameters used, the faster the endpoint query will be. The query must be specific enough to match a single endpoint or it will fail. The profile list may not contain duplicate entries, invalid profile names, or profiles that are already in the containers list. If no profile is specified, nothing happens. :param hostname: The host that the targeted endpoint resides on. :param orchestrator_id: The orchestrator that created the targeted endpoint. :param workload_id: The ID of workload which created the targeted endpoint. :param endpoint_id: The endpoint ID of the targeted endpoint. :param profile_names: The list of profile names to add to the targeted endpoint. :return: None """ # Validate the profile list. validate_profile_list(profile_names) if not profile_names: print_paragraph("No profile specified.") else: try: client.append_profiles_to_endpoint(profile_names, hostname=hostname, orchestrator_id=orchestrator_id, workload_id=workload_id, endpoint_id=endpoint_id) print_paragraph("Profile(s) %s appended." % (", ".join(profile_names))) except KeyError: print "Failed to append profiles to endpoint.\n" print_paragraph("Endpoint %s is unknown to Calico.\n" % endpoint_id) sys.exit(1) except ProfileAlreadyInEndpoint, e: print_paragraph("Profile %s is already in endpoint " "profile list" % e.profile_name) except MultipleEndpointsMatch: print_paragraph("More than 1 endpoint matches the provided criteria. " "Please provide additional parameters to refine the " "search.") sys.exit(1)
def parse_ports(ports_str): """ Parse a string representing a port list into a list of ports and port ranges. Returns None if the input is None. :param StringTypes|NoneType ports_str: string representing a port list. Examples: "1" "1,2,3" "1:3" "1,2,3:4" :return list[StringTypes|int]|NoneType: list of ports or None. """ if ports_str is None: return None # We allow ranges with : or - but convert to :, which is what the data # model uses. if not re.match(r'^(\d+([:-]\d+)?)(,\d+([:-]\d+)?)*$', ports_str): print_paragraph("Ports: %r are invalid; expecting a comma-separated " "list of ports and port ranges." % ports_str) sys.exit(1) splits = ports_str.split(",") parsed_ports = [] for split in splits: m = re.match(r'^(\d+)[:-](\d+)$', split) if m: # Got a range, canonicalise it. min = int(m.group(1)) max = int(m.group(2)) if min > max: print "Port range minimum (%s) > maximum (%s)." % (min, max) sys.exit(1) if not (0 <= min <= 65535): print "Port minimum (%s) out-of-range." % min sys.exit(1) if not (0 <= max <= 65535): print "Port maximum (%s) out-of-range." % max sys.exit(1) parsed_ports.append("%s:%s" % (min, max)) else: # Should be a lone port, convert to int. port = int(split) if not (0 <= port <= 65535): print "Port (%s) out-of-range." % min sys.exit(1) parsed_ports.append(port) return parsed_ports
def error_if_bgp_ip_conflict(ip, ip6): """ Prints an error message and exits if either of the IPv4 or IPv6 addresses is already in use by another calico BGP host. :param ip: User-provided IPv4 address to start this node with. :param ip6: User-provided IPv6 address to start this node with. :return: Nothing """ ip_list = [] if ip: ip_list.append(ip) if ip6: ip_list.append(ip6) try: # Get hostname of host that already uses the given IP, if it exists ip_conflicts = client.get_hostnames_from_ips(ip_list) except KeyError: # No hosts have been configured in etcd, so there cannot be a conflict return if ip_conflicts.keys(): ip_error = ( "ERROR: IP address %s is already in use by host %s. " "Calico requires each compute host to have a unique IP. " "If this is your first time running 'calicoctl node' on " "this host, ensure that another host is not already using " "the same IP address." ) try: if ip_conflicts[ip] != hostname: ip_error = ip_error % (ip, str(ip_conflicts[ip])) print_paragraph(ip_error) sys.exit(1) except KeyError: # IP address was not found in ip-host dictionary pass try: if ip6 and ip_conflicts[ip6] != hostname: ip_error = ip_error % (ip6, str(ip_conflicts[ip6])) print_paragraph(ip_error) sys.exit(1) except KeyError: # IP address was not found in ip-host dictionary pass
def ip_pool_remove(cidrs, version): """ Remove the given CIDRs from the IP address allocation pool. :param cidrs: The pools to remove in CIDR format, e.g. 192.168.0.0/16 :param version: 4 or 6 :return: None """ for cidr in cidrs: # Get the existing IP Pool so that we can disable it, try: pool = client.get_ip_pool_config(version, IPNetwork(cidr)) except KeyError: print "%s is not a configured pool." % cidr sys.exit(1) try: # Disable the pool to prevent any further allocation blocks from # being assigned from the pool. Existing allocation blocks will # still exist and may be allocated from until affinity is removed # from the blocks. print "Disabling IP Pool" pool.disabled = True client.set_ip_pool_config(version, pool) # Remove affinity from the allocation blocks for the pool. This # will prevent these blocks from being used for auto-allocations. # We pause before removing the affinities and the pool to allow # any in-progress IP allocations to complete - there is a known # timing window here, which can be closed but at the expense of # additional etcd reads for each affine block allocation - since # deletion of a pool is not common, it is simplest to pause in # between disabling and deleting the pool. print "Removing IPAM configuration for pool" time.sleep(3) client.release_pool_affinities(pool) client.remove_ip_pool(version, pool.cidr) print "Deleted IP Pool" except (KeyError, HostAffinityClaimedError): print_paragraph("Conflicting modifications have been made to the " "IPAM configuration for this pool. Please retry " "the command.") sys.exit(1)
def error_if_bgp_ip_conflict(ip, ip6): """ Prints an error message and exits if either of the IPv4 or IPv6 addresses is already in use by another calico BGP host. :param ip: User-provided IPv4 address to start this node with. :param ip6: User-provided IPv6 address to start this node with. :return: Nothing """ ip_list = [] if ip: ip_list.append(ip) if ip6: ip_list.append(ip6) try: # Get hostname of host that already uses the given IP, if it exists ip_conflicts = client.get_hostnames_from_ips(ip_list) except KeyError: # No hosts have been configured in etcd, so there cannot be a conflict return if ip_conflicts.keys(): ip_error = "ERROR: IP address %s is already in use by host %s. " \ "Calico requires each compute host to have a unique IP. " \ "If this is your first time running 'calicoctl node' on " \ "this host, ensure that another host is not already using " \ "the same IP address." try: if ip_conflicts[ip] != hostname: ip_error = ip_error % (ip, str(ip_conflicts[ip])) print_paragraph(ip_error) sys.exit(1) except KeyError: # IP address was not found in ip-host dictionary pass try: if ip6 and ip_conflicts[ip6] != hostname: ip_error = ip_error % (ip6, str(ip_conflicts[ip6])) print_paragraph(ip_error) sys.exit(1) except KeyError: # IP address was not found in ip-host dictionary pass
def _find_or_pull_node_image(image_name): """ Check if Docker has a cached copy of an image, and if not, attempt to pull it. :param image_name: The full name of the image. :return: None. """ try: _ = docker_client.inspect_image(image_name) except docker.errors.APIError as err: if err.response.status_code == 404: # TODO: Display proper status bar print_paragraph("Pulling Docker image %s" % image_name) try: # Pull the image and then verify that it was succesfully # pulled (the pull doesn't raise an exception on failure). docker_client.pull(image_name) docker_client.inspect_image(image_name) except docker.errors.APIError: # Unable to download the Docker image. print_paragraph("ERROR: Unable to download Docker image.") print_paragraph("Please verify that you have network " "connectivity to DockerHub and that, if you " "explicitly specified which calico/node image " "to use, the image name is correct.") sys.exit(1)
def node_stop(force): """ Stop the Calico node. This stops the containers (calico/node and calico/node-libnetwork) that are started by calicoctl node. """ endpoints = len(client.get_endpoints(hostname=hostname)) if endpoints: if not force: print_paragraph("Current host has active endpoints so can't be " "stopped. Force with --force") print_paragraph("Note that stopping the node while there are " "active endpoints may make it difficult to clean " "up the endpoints: for example, Docker containers " "networked using libnetwork with Calico will not " "invoke network cleanup during the normal " "container lifecycle.") sys.exit(1) else: print_paragraph("Stopping node while host has active endpoints. " "If this in error, restart the node using the " "'calicoctl node' command.") try: docker_client.stop("calico-node") except docker.errors.APIError as err: if err.response.status_code != 404: raise try: docker_client.stop("calico-libnetwork") except docker.errors.APIError as err: if err.response.status_code != 404: raise print "Node stopped"
def node_remove(remove_endpoints): """ Remove a node from the Calico network. :param remove_endpoints: Whether the endpoint data should be forcibly removed. """ if _container_running("calico-node") or \ _container_running("calico-libnetwork"): print_paragraph("The node cannot be removed while it is running. " "Please run 'calicoctl node stop' to stop the node " "before removing it.") sys.exit(1) endpoints = client.get_endpoints(hostname=hostname) if endpoints and not remove_endpoints: print_paragraph("The node has active Calico endpoints so can't be " "deleted. Force with --remove-endpoints") print_paragraph("Note that forcible removing the node may leave some " "workloads in an indeterminate networked state. If " "this is in error, you may restart the node using the " "'calicoctl node' command and clean up the workloads " "in the normal way.") sys.exit(1) for endpoint in endpoints: remove_veth(endpoint.name) client.remove_host(hostname) print "Node configuration removed"
def node_remove(remove_endpoints, host): """ Remove a node from the Calico network. :param remove_endpoints: Whether the endpoint data should be forcibly removed. :param host: The hostname of the host whose node will be removed, or None if removing this host's node. :return: None. """ host_to_remove = host or hostname if host_to_remove == hostname and ( _container_running("calico-node") or _container_running("calico-libnetwork")): print_paragraph("The node cannot be removed while it is running. " "Please run 'calicoctl node stop' to stop the node " "before removing it.") sys.exit(1) endpoints = client.get_endpoints(hostname=host_to_remove) if endpoints and not remove_endpoints: print_paragraph("The node has active Calico endpoints so can't be " "deleted. Force with --remove-endpoints") print_paragraph("Note that forcible removing the node may leave some " "workloads in an indeterminate networked state. If " "this is in error, you may restart the node using the " "'calicoctl node' command and clean up the workloads " "in the normal way.") sys.exit(1) # Remove the veths, and release all IPs associated with the endpoints. To # release the IPs, we construct a set of all IP addresses across all # endpoints (this assumes the endpoint nets are all single IPs). ips = set() for endpoint in endpoints: remove_veth(endpoint.name) ips |= {net.ip for net in endpoint.ipv4_nets} ips |= {net.ip for net in endpoint.ipv6_nets} client.release_ips(ips) # Remove the IPAM host data. client.remove_ipam_host(host_to_remove) # If the host had an IPIP tunnel address, release it back to the IPAM pool # so that we don't leak it when we delete the config. raw_addr = client.get_per_host_config(host_to_remove, "IpInIpTunnelAddr") try: ip_addr = IPAddress(raw_addr) client.release_ips({ip_addr}) except (AddrFormatError, ValueError, TypeError): pass client.remove_per_host_config(host_to_remove, "IpInIpTunnelAddr") client.remove_host(host_to_remove) print "Node configuration removed"
def endpoint_profile_set(hostname, orchestrator_id, workload_id, endpoint_id, profile_names): """ Set the complete list of profiles for the container endpoint profile list. The hostname, orchestrator_id, workload_id, and endpoint_id are all optional parameters used to determine which endpoint is being targeted. The more parameters used, the faster the endpoint query will be. The query must be specific enough to match a single endpoint or it will fail. The profile list may not contain duplicate entries or invalid profile names. :param hostname: The host that the targeted endpoint resides on. :param orchestrator_id: The orchestrator that created the targeted endpoint. :param workload_id: The ID of workload which created the targeted endpoint. :param endpoint_id: The endpoint ID of the targeted endpoint. :param profile_names: The list of profile names to set on the targeted endpoint. :return: None """ # Validate the profile list. validate_profile_list(profile_names) try: client.set_profiles_on_endpoint(profile_names, hostname=hostname, orchestrator_id=orchestrator_id, workload_id=workload_id, endpoint_id=endpoint_id) print_paragraph("Profiles %s set for %s." % (", ".join(profile_names), endpoint_id)) except KeyError: print "Failed to set profiles for endpoint.\n" print_paragraph("Endpoint %s is unknown to Calico.\n" % endpoint_id) sys.exit(1)
def node_remove(remove_endpoints, host): """ Remove a node from the Calico network. :param remove_endpoints: Whether the endpoint data should be forcibly removed. :param host: The hostname of the host whose node will be removed, or None if removing this host's node. :return: None. """ host_to_remove = host or hostname if host_to_remove == hostname and (_container_running("calico-node") or _container_running("calico-libnetwork")): print_paragraph("The node cannot be removed while it is running. " "Please run 'calicoctl node stop' to stop the node " "before removing it.") sys.exit(1) endpoints = client.get_endpoints(hostname=host_to_remove) if endpoints and not remove_endpoints: print_paragraph("The node has active Calico endpoints so can't be " "deleted. Force with --remove-endpoints") print_paragraph("Note that forcible removing the node may leave some " "workloads in an indeterminate networked state. If " "this is in error, you may restart the node using the " "'calicoctl node' command and clean up the workloads " "in the normal way.") sys.exit(1) # Remove the veths, and release all IPs associated with the endpoints. To # release the IPs, we construct a set of all IP addresses across all # endpoints (this assumes the endpoint nets are all single IPs). ips = set() for endpoint in endpoints: remove_veth(endpoint.name) ips |= {net.ip for net in endpoint.ipv4_nets} ips |= {net.ip for net in endpoint.ipv6_nets} client.release_ips(ips) # Remove the IPAM host data. client.remove_ipam_host(host_to_remove) # If the host had an IPIP tunnel address, release it back to the IPAM pool # so that we don't leak it when we delete the config. raw_addr = client.get_per_host_config(host_to_remove, "IpInIpTunnelAddr") try: ip_addr = IPAddress(raw_addr) client.release_ips({ip_addr}) except (AddrFormatError, ValueError, TypeError): pass client.remove_per_host_config(host_to_remove, "IpInIpTunnelAddr") client.remove_host(host_to_remove) print "Node configuration removed"
def endpoint_profile_remove(hostname, orchestrator_id, workload_id, endpoint_id, profile_names): """ Remove a list of profiles from the endpoint profile list. The hostname, orchestrator_id, workload_id, and endpoint_id are all optional parameters used to determine which endpoint is being targeted. The more parameters used, the faster the endpoint query will be. The query must be specific enough to match a single endpoint or it will fail. The profile list may not contain duplicate entries, invalid profile names, or profiles that are not already in the containers list. :param hostname: The host that the targeted endpoint resides on. :param orchestrator_id: The orchestrator that created the targeted endpoint. :param workload_id: The ID of workload which created the targeted endpoint. :param endpoint_id: The endpoint ID of the targeted endpoint. :param profile_names: The list of profile names to remove from the targeted endpoint. :return: None """ # Validate the profile list. validate_profile_list(profile_names) try: client.remove_profiles_from_endpoint(profile_names, hostname=hostname, orchestrator_id=orchestrator_id, workload_id=workload_id, endpoint_id=endpoint_id) print_paragraph("Profiles %s removed." % (",".join(profile_names))) except KeyError: print "Failed to remove profiles from endpoint.\n" print_paragraph("Endpoint could not be found.\n") sys.exit(1) except ProfileNotInEndpoint, e: print_paragraph("Profile %s is not in endpoint profile " "list." % e.profile_name)
def container_add(container_id, ip, interface): """ Add a container (on this host) to Calico networking with the given IP. :param container_id: The namespace path or the docker name/ID of the container. :param ip: An IPAddress object with the desired IP to assign. :param interface: The name of the interface in the container. """ # The netns manipulations must be done as root. enforce_root() # TODO: This section is redundant in container_add_ip and elsewhere if container_id.startswith("/") and os.path.exists(container_id): # The ID is a path. Don't do any docker lookups workload_id = escape_etcd(container_id) orchestrator_id = NAMESPACE_ORCHESTRATOR_ID namespace = netns.Namespace(container_id) else: info = get_container_info_or_exit(container_id) workload_id = info["Id"] orchestrator_id = DOCKER_ORCHESTRATOR_ID namespace = netns.PidNamespace(info["State"]["Pid"]) # Check the container is actually running. if not info["State"]["Running"]: print_paragraph("%s is not currently running." % container_id) sys.exit(1) # We can't set up Calico if the container shares the host namespace. if info["HostConfig"]["NetworkMode"] == "host": print_paragraph("Can't add %s to Calico because it is " "running NetworkMode = host." % container_id) sys.exit(1) # Check if the container already exists try: _ = client.get_endpoint(hostname=hostname, orchestrator_id=orchestrator_id, workload_id=workload_id) except KeyError: # Calico doesn't know about this container. Continue. pass else: # Calico already set up networking for this container. Since we got # called with an IP address, we shouldn't just silently exit, since # that would confuse the user: the container would not be reachable on # that IP address. print_paragraph("%s has already been configured with Calico " "Networking." % container_id) sys.exit(1) ip, pool = get_ip_and_pool(ip) try: # The next hop IPs for this host are stored in etcd. next_hops = client.get_default_next_hops(hostname) next_hops[ip.version] except KeyError: print_paragraph("This node is not configured for IPv%d. " "Is calico-node running?" % ip.version) unallocated_ips = client.release_ips({ip}) if unallocated_ips: print_paragraph("Error during cleanup. %s was already" "unallocated." % ip) sys.exit(1) # Get the next hop for the IP address. next_hop = next_hops[ip.version] network = IPNetwork(IPAddress(ip)) ep = Endpoint(hostname=hostname, orchestrator_id=DOCKER_ORCHESTRATOR_ID, workload_id=workload_id, endpoint_id=uuid.uuid1().hex, state="active", mac=None) if network.version == 4: ep.ipv4_nets.add(network) ep.ipv4_gateway = next_hop else: ep.ipv6_nets.add(network) ep.ipv6_gateway = next_hop # Create the veth, move into the container namespace, add the IP and # set up the default routes. netns.increment_metrics(namespace) netns.create_veth(ep.name, ep.temp_interface_name) netns.move_veth_into_ns(namespace, ep.temp_interface_name, interface) netns.add_ip_to_ns_veth(namespace, ip, interface) netns.add_ns_default_route(namespace, next_hop, interface) # Grab the MAC assigned to the veth in the namespace. ep.mac = netns.get_ns_veth_mac(namespace, interface) # Register the endpoint with Felix. client.set_endpoint(ep) # Let the caller know what endpoint was created. print_paragraph("IP %s added to %s" % (str(ip), container_id)) return ep
import sys import docker import docker.errors from pycalico.ipam import IPAMClient from pycalico.datastore import (ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT, ETCD_SCHEME_ENV, ETCD_SCHEME_DEFAULT, ETCD_KEY_FILE_ENV, ETCD_CERT_FILE_ENV, ETCD_CA_CERT_FILE_ENV, DataStoreError) from utils import DOCKER_VERSION from utils import print_paragraph from pycalico.util import validate_hostname_port # If an ETCD_AUTHORITY is specified in the environment variables, validate # it. etcd_authority = os.getenv(ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT) if etcd_authority and not validate_hostname_port(etcd_authority): print_paragraph("Invalid %s. It must take the form <address>:<port>. " "Value provided is '%s'" % (ETCD_AUTHORITY_ENV, etcd_authority)) sys.exit(1) try: client = IPAMClient() except DataStoreError as e: print_paragraph(e.message) sys.exit(1) DOCKER_URL = os.getenv("DOCKER_HOST", "unix://var/run/docker.sock") docker_client = docker.Client(version=DOCKER_VERSION, base_url=DOCKER_URL)
def container(arguments): """ Main dispatcher for container commands. Calls the corresponding helper function. :param arguments: A dictionary of arguments already processed through this file's docstring with docopt :return: None """ validate_arguments(arguments) try: if arguments.get("ip"): if arguments.get("add"): container_ip_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) elif arguments.get("remove"): container_ip_remove(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) else: if arguments.get("add"): container_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) if arguments.get("remove"): container_remove(arguments.get("<CONTAINER>")) elif arguments.get("endpoint"): orchestrator_id, workload_id = \ lookup_workload(arguments.get("<CONTAINER>")) endpoint.endpoint_show(hostname, orchestrator_id, workload_id, None, True) elif arguments.get("profile"): orchestrator_id, workload_id = \ lookup_workload(arguments.get("<CONTAINER>")) if arguments.get("append"): endpoint.endpoint_profile_append(hostname, orchestrator_id, workload_id, None, arguments['<PROFILES>']) elif arguments.get("remove"): endpoint.endpoint_profile_remove(hostname, orchestrator_id, workload_id, None, arguments['<PROFILES>']) elif arguments.get("set"): endpoint.endpoint_profile_set(hostname, orchestrator_id, workload_id, None, arguments['<PROFILES>']) else: if arguments.get("add"): container_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) if arguments.get("remove"): container_remove(arguments.get("<CONTAINER>")) except ConnectionError as e: # We hit a "Permission denied error (13) if the docker daemon # does not have sudo permissions if permission_denied_error(e): print_paragraph("Unable to run command. Re-run the " "command as root, or configure the docker " "group to run with sudo privileges (see docker " "installation guide for details).") else: print_paragraph("Unable to run docker commands. Is the docker " "daemon running?") sys.exit(1)
f.write("FILE, %s, %s\n" % (child.key, child.value)) except EtcdException, e: print "Unable to dump etcd datastore" f.write("Unable to dump etcd datastore: %s" % e) # Create tar. tar_filename = datetime.strftime(datetime.today(), "diags-%d%m%y_%H%M%S.tar.gz") full_tar_path = os.path.join(temp_dir, tar_filename) with tarfile.open(full_tar_path, "w:gz") as tar: # pass in arcname, otherwise zip contains layers of subfolders tar.add(temp_dir, arcname="") print("\nDiags saved to %s\n" % (full_tar_path)) print_paragraph("If required, you can upload the diagnostics bundle to a " "file sharing service such as transfer.sh using curl or " "similar. For example:") print(" curl --upload-file %s https://transfer.sh/%s" % (full_tar_path, os.path.basename(full_tar_path))) def write_diags(comment, command): if comment: print comment # Strip out non letters and numbers from the command to form the filename filename = re.sub(r'[^a-zA-Z0-9 -]', "", command) # And substitute underscore for spaces filename = re.sub(r'\s', "_", filename)
def save_diags(log_dir): # Create temp directory temp_dir = tempfile.mkdtemp() temp_diags_dir = os.path.join(temp_dir, 'diagnostics') os.mkdir(temp_diags_dir) print("Using temp dir: %s" % temp_dir) # Write date to file with open(os.path.join(temp_diags_dir, 'date'), 'w') as f: f.write("DATE=%s" % datetime.strftime(datetime.today(), "%Y-%m-%d_%H-%M-%S")) # Write hostname to file with open(os.path.join(temp_diags_dir, 'hostname'), 'w') as f: f.write("%s" % socket.gethostname()) # Write netstat output to file with open(os.path.join(temp_diags_dir, 'netstat'), 'w') as f: try: print("Dumping netstat output") netstat = sh.Command._create("netstat") f.writelines( netstat( # Display all sockets (default: connected) all=True, # Don't resolve names numeric=True)) except sh.CommandNotFound as e: print "Missing command: %s" % e.message # Write routes print("Dumping routes") with open(os.path.join(temp_diags_dir, 'route'), 'w') as f: try: route = sh.Command._create("route") f.write("route --numeric\n") f.writelines(route(numeric=True)) f.write('\n') except sh.CommandNotFound as e: print "Missing command: %s" % e.message try: ip = sh.Command._create("ip") f.write("ip route\n") f.writelines(ip("route")) f.write('\n') f.write("ip -6 route\n") f.writelines(ip("-6", "route")) f.write('\n') except sh.CommandNotFound as e: print "Missing command: %s" % e.message # Dump iptables with open(os.path.join(temp_diags_dir, 'iptables'), 'w') as f: try: iptables_save = sh.Command._create("iptables-save") print("Dumping iptables") f.writelines(iptables_save()) except sh.CommandNotFound as e: print "Missing command: %s" % e.message # Dump ipset list # TODO: ipset might not be installed on the host. But we don't want to # gather the diags in the container because it might not be running... with open(os.path.join(temp_diags_dir, 'ipset'), 'w') as f: try: ipset = sh.Command._create("ipset") print("Dumping ipset") f.writelines(ipset("list")) except sh.CommandNotFound as e: print "Missing command: %s" % e.message except sh.ErrorReturnCode_1 as e: print "Error running ipset. Maybe you need to run as root." # Ask Felix to dump stats to its log file - ignore errors as the # calico-node might not be running subprocess.call( ["docker", "exec", "calico-node", "pkill", "-SIGUSR1", "felix"]) if os.path.isdir(log_dir): print("Copying Calico logs") # Skip the lock files as they can only be copied by root. copytree(log_dir, os.path.join(temp_diags_dir, "logs"), ignore=ignore_patterns('lock')) else: print('No logs found in %s; skipping log copying' % log_dir) print("Dumping datastore") # TODO: May want to move this into datastore.py as a dump-calico function try: datastore_client = DatastoreClient() datastore_data = datastore_client.etcd_client.read("/calico", recursive=True) with open(os.path.join(temp_diags_dir, 'etcd_calico'), 'w') as f: f.write("dir?, key, value\n") # TODO: python-etcd bug: Leaves show up twice in get_subtree(). for child in datastore_data.get_subtree(): if child.dir: f.write("DIR, %s,\n" % child.key) else: f.write("FILE, %s, %s\n" % (child.key, child.value)) except EtcdException: print "Unable to dump etcd datastore" # Create tar. tar_filename = datetime.strftime(datetime.today(), "diags-%d%m%y_%H%M%S.tar.gz") full_tar_path = os.path.join(temp_dir, tar_filename) with tarfile.open(full_tar_path, "w:gz") as tar: # pass in arcname, otherwise zip contains layers of subfolders tar.add(temp_dir, arcname="") print("\nDiags saved to %s\n" % (full_tar_path)) print_paragraph("If required, you can upload the diagnostics bundle to a " "file sharing service such as transfer.sh using curl or " "similar. For example:") print(" curl --upload-file %s https://transfer.sh/%s" % (full_tar_path, os.path.basename(full_tar_path)))
def node_start(node_image, log_dir, ip, ip6, as_num, detach, kubernetes): """ Create the calico-node container and establish Calico networking on this host. :param ip: The IPv4 address of the host. :param node_image: The calico-node image to use. :param ip6: The IPv6 address of the host (or None if not configured) :param as_num: The BGP AS Number to use for this node. If not specified the global default value will be used. :param detach: True to run in Docker's "detached" mode, False to run attached. :return: None. """ # Ensure log directory exists if not os.path.exists(log_dir): os.makedirs(log_dir) # Print warnings for any known system issues before continuing check_system(fix=False, quit_if_error=False) # Get IP address of host, if none was specified if not ip: ips = get_host_ips(exclude=["docker0"]) try: ip = ips.pop() except IndexError: print "Couldn't autodetect a management IP address. Please provide" \ " an IP by rerunning the command with the --ip=<IP_ADDRESS> flag." sys.exit(1) else: print "No IP provided. Using detected IP: %s" % ip # Verify that the chosen IP exists on the current host warn_if_unknown_ip(ip, ip6) # Warn if this hostname conflicts with an existing host warn_if_hostname_conflict(ip) # Install kubernetes plugin if kubernetes: try: # Attempt to install to the default kubernetes directory install_kubernetes(KUBERNETES_PLUGIN_DIR) except OSError: # Use the backup directory install_kubernetes(KUBERNETES_PLUGIN_DIR_BACKUP) # Set up etcd ipv4_pools = client.get_ip_pools(4) ipv6_pools = client.get_ip_pools(6) # Create default pools if required if not ipv4_pools: client.add_ip_pool(4, DEFAULT_IPV4_POOL) if not ipv6_pools: client.add_ip_pool(6, DEFAULT_IPV6_POOL) client.ensure_global_config() client.create_host(hostname, ip, ip6, as_num) try: docker_client.remove_container("calico-node", force=True) except docker.errors.APIError as err: if err.response.status_code != 404: raise etcd_authority = os.getenv(ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT) etcd_authority_split = etcd_authority.split(':') if len(etcd_authority_split) != 2: print_paragraph("Invalid %s. Must take the form <address>:<port>. Value " \ "provided is '%s'" % (ETCD_AUTHORITY_ENV, etcd_authority)) sys.exit(1) etcd_authority_address = etcd_authority_split[0] etcd_authority_port = etcd_authority_split[1] # Always try to convert the address(hostname) to an IP. This is a noop if # the address is already an IP address. etcd_authority = '%s:%s' % (socket.gethostbyname(etcd_authority_address), etcd_authority_port) environment = [ "HOSTNAME=%s" % hostname, "IP=%s" % ip, "IP6=%s" % (ip6 or ""), "ETCD_AUTHORITY=%s" % etcd_authority, # etcd host:port "FELIX_ETCDADDR=%s" % etcd_authority, # etcd host:port ] binds = { "/proc": { "bind": "/proc_host", "ro": False }, log_dir: { "bind": "/var/log/calico", "ro": False }, "/run/docker/plugins": { "bind": "/usr/share/docker/plugins", "ro": False } } host_config = docker.utils.create_host_config( privileged=True, restart_policy={"Name": "Always"}, network_mode="host", binds=binds) _find_or_pull_node_image(node_image, docker_client) container = docker_client.create_container( node_image, name="calico-node", detach=True, environment=environment, host_config=host_config, volumes=["/proc_host", "/var/log/calico", "/usr/share/docker/plugins"]) cid = container["Id"] docker_client.start(container) print "Calico node is running with id: %s" % cid if not detach: _attach_and_stream(container)
def container(arguments): """ Main dispatcher for container commands. Calls the corresponding helper function. :param arguments: A dictionary of arguments already processed through this file's docstring with docopt :return: None """ validate_arguments(arguments) try: workload_id = None if "<CONTAINER>" in arguments: container_id = arguments.get("<CONTAINER>") if container_id.startswith("/") and os.path.exists(container_id): # The ID is a path. Don't do any docker lookups workload_id = escape_etcd(container_id) orchestrator_id = NAMESPACE_ORCHESTRATOR_ID else: info = get_container_info_or_exit(container_id) workload_id = info["Id"] orchestrator_id = DOCKER_ORCHESTRATOR_ID if arguments.get("ip"): if arguments.get("add"): container_ip_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) elif arguments.get("remove"): container_ip_remove(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) else: if arguments.get("add"): container_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) if arguments.get("remove"): container_remove(arguments.get("<CONTAINER>")) elif arguments.get("endpoint"): endpoint.endpoint_show(hostname, orchestrator_id, workload_id, None, True) elif arguments.get("profile"): if arguments.get("append"): endpoint.endpoint_profile_append(hostname, orchestrator_id, workload_id, None, arguments['<PROFILES>']) elif arguments.get("remove"): endpoint.endpoint_profile_remove(hostname, orchestrator_id, workload_id, None, arguments['<PROFILES>']) elif arguments.get("set"): endpoint.endpoint_profile_set(hostname, orchestrator_id, workload_id, None, arguments['<PROFILES>']) else: if arguments.get("add"): container_add(arguments.get("<CONTAINER>"), arguments.get("<IP>"), arguments.get("--interface")) if arguments.get("remove"): container_remove(arguments.get("<CONTAINER>")) except ConnectionError as e: # We hit a "Permission denied error (13) if the docker daemon # does not have sudo permissions if permission_denied_error(e): print_paragraph("Unable to run command. Re-run the " "command as root, or configure the docker " "group to run with sudo privileges (see docker " "installation guide for details).") else: print_paragraph("Unable to run docker commands. Is the docker " "daemon running?") sys.exit(1)
def validate_arguments(arguments): """ Validate argument values: <NAME> <VALUE> :param arguments: Docopt processed arguments """ config_data = _get_config_data(arguments) name = arguments["<NAME>"] if name not in config_data: print_paragraph("The configuration '%s' is not recognized as a " "valid option." % name) if arguments["--force"]: print_paragraph("The --force option is set.") return else: print_paragraph("Use the --force option to override.") sys.exit(1) value = arguments.get("<VALUE>") if value: _, valid_values = config_data[name] valid_values_re = re.compile(valid_values) if not valid_values_re.match(value): print_paragraph("The configuration value '%s' is not recognized " "as a valid value." % value) if arguments["--force"]: print_paragraph("The --force option is set.") return else: print_paragraph("Use the --force option to override.") sys.exit(1)
def validate_arguments(arguments): """ Validate argument values: <PROFILE> <SRCTAG> <SRCCIDR> <DSTTAG> <DSTCIDR> <ICMPTYPE> <ICMPCODE> <SRCPORTS> <DSTPORTS> Arguments not validated: <POSITION> :param arguments: Docopt processed arguments """ # Validate Profiles profile_ok = True if arguments.get("<PROFILE>") is not None: profile = arguments.get("<PROFILE>") profile_ok = validate_characters(profile) # Validate tags tag_src_ok = (arguments.get("<SRCTAG>") is None or validate_characters(arguments["<SRCTAG>"])) tag_dst_ok = (arguments.get("<DSTTAG>") is None or validate_characters(arguments["<DSTTAG>"])) # Validate IPs cidr_ok = True for arg in ["<SRCCIDR>", "<DSTCIDR>"]: if arguments.get(arg) is not None: cidr_ok = validate_cidr(arguments[arg]) if not cidr_ok: break icmp_ok = True for arg in ["<ICMPCODE>", "<ICMPTYPE>"]: if arguments.get(arg) is not None: icmp_ok = validate_icmp_type(arguments[arg]) if not icmp_ok: break ports_ok = True for arg in ["<SRCPORTS>", "<DSTPORTS>"]: if arguments.get(arg) is not None: ports_ok = validate_ports(arguments[arg]) if not ports_ok: break # Print error message if not profile_ok: print_paragraph("Profile names must be < 40 character long and can " "only contain numbers, letters, dots, dashes and " "underscores.") if not (tag_src_ok and tag_dst_ok): print_paragraph("Tags names can only contain numbers, letters, dots, " "dashes and underscores.") if not cidr_ok: print "Invalid CIDR specified." if not icmp_ok: print "Invalid ICMP type or ICMP code specified." if not ports_ok: print "Invalid SRCPORTS or DSTPORTS specified." # Exit if not valid if not (profile_ok and tag_src_ok and tag_dst_ok and cidr_ok and icmp_ok and ports_ok): sys.exit(1)
def save_diags(log_dir): # Create temp directory temp_dir = tempfile.mkdtemp() temp_diags_dir = os.path.join(temp_dir, 'diagnostics') os.mkdir(temp_diags_dir) print("Using temp dir: %s" % temp_dir) # Write date to file with open(os.path.join(temp_diags_dir, 'date'), 'w') as f: f.write("DATE=%s" % datetime.strftime(datetime.today(), "%Y-%m-%d_%H-%M-%S")) # Write hostname to file with open(os.path.join(temp_diags_dir, 'hostname'), 'w') as f: f.write("%s" % socket.gethostname()) # Write netstat output to file with open(os.path.join(temp_diags_dir, 'netstat'), 'w') as f: try: print("Dumping netstat output") netstat = sh.Command._create("netstat") f.writelines(netstat( # Display all sockets (default: connected) all=True, # Don't resolve names numeric=True)) except sh.CommandNotFound as e: print "Missing command: %s" % e.message # Write routes print("Dumping routes") with open(os.path.join(temp_diags_dir, 'route'), 'w') as f: try: route = sh.Command._create("route") f.write("route --numeric\n") f.writelines(route(numeric=True)) f.write('\n') except sh.CommandNotFound as e: print "Missing command: %s" % e.message try: ip = sh.Command._create("ip") f.write("ip route\n") f.writelines(ip("route")) f.write('\n') f.write("ip -6 route\n") f.writelines(ip("-6", "route")) f.write('\n') except sh.CommandNotFound as e: print "Missing command: %s" % e.message # Dump iptables with open(os.path.join(temp_diags_dir, 'iptables'), 'w') as f: try: iptables_save = sh.Command._create("iptables-save") print("Dumping iptables") f.writelines(iptables_save()) except sh.CommandNotFound as e: print "Missing command: %s" % e.message # Dump ipset list # TODO: ipset might not be installed on the host. But we don't want to # gather the diags in the container because it might not be running... with open(os.path.join(temp_diags_dir, 'ipset'), 'w') as f: try: ipset = sh.Command._create("ipset") print("Dumping ipset") f.writelines(ipset("list")) except sh.CommandNotFound as e: print "Missing command: %s" % e.message except sh.ErrorReturnCode_1 as e: print "Error running ipset. Maybe you need to run as root." # Ask Felix to dump stats to its log file - ignore errors as the # calico-node might not be running subprocess.call(["docker", "exec", "calico-node", "pkill", "-SIGUSR1", "felix"]) if os.path.isdir(log_dir): print("Copying Calico logs") # Skip the lock files as they can only be copied by root. copytree(log_dir, os.path.join(temp_diags_dir, "logs"), ignore=ignore_patterns('lock')) else: print('No logs found in %s; skipping log copying' % log_dir) print("Dumping datastore") # TODO: May want to move this into datastore.py as a dump-calico function try: datastore_client = DatastoreClient() datastore_data = datastore_client.etcd_client.read("/calico", recursive=True) with open(os.path.join(temp_diags_dir, 'etcd_calico'), 'w') as f: f.write("dir?, key, value\n") # TODO: python-etcd bug: Leaves show up twice in get_subtree(). for child in datastore_data.get_subtree(): if child.dir: f.write("DIR, %s,\n" % child.key) else: f.write("FILE, %s, %s\n" % (child.key, child.value)) except EtcdException: print "Unable to dump etcd datastore" # Create tar. tar_filename = datetime.strftime(datetime.today(), "diags-%d%m%y_%H%M%S.tar.gz") full_tar_path = os.path.join(temp_dir, tar_filename) with tarfile.open(full_tar_path, "w:gz") as tar: # pass in arcname, otherwise zip contains layers of subfolders tar.add(temp_dir, arcname="") print("\nDiags saved to %s\n" % (full_tar_path)) print_paragraph("If required, you can upload the diagnostics bundle to a " "file sharing service such as transfer.sh using curl or " "similar. For example:") print(" curl --upload-file %s https://transfer.sh/%s" % (full_tar_path, os.path.basename(full_tar_path)))
hostname=hostname, orchestrator_id=orchestrator_id, workload_id=workload_id, endpoint_id=endpoint_id) print_paragraph("Profiles %s appended to %s." % (", ".join(profile_names), endpoint_id)) except KeyError: print "Failed to append profiles to endpoint.\n" print_paragraph("Endpoint %s is unknown to Calico.\n" % endpoint_id) sys.exit(1) except ProfileAlreadyInEndpoint, e: print_paragraph("Profile %s is already in endpoint " "profile list" % e.profile_name) except MultipleEndpointsMatch: print_paragraph("More than 1 endpoint matches the provided criteria. " "Please provide additional parameters to refine the " "search.") sys.exit(1) def endpoint_profile_set(hostname, orchestrator_id, workload_id, endpoint_id, profile_names): """ Set the complete list of profiles for the container endpoint profile list. The hostname, orchestrator_id, workload_id, and endpoint_id are all optional parameters used to determine which endpoint is being targeted. The more parameters used, the faster the endpoint query will be. The query must be specific enough to match a single endpoint or it will fail. The profile list may not contain duplicate entries or invalid profile names.
def validate_arguments(arguments): """ Validate argument values: <PROFILE> <SRCTAG> <SRCCIDR> <DSTTAG> <DSTCIDR> <ICMPTYPE> <ICMPCODE> <SRCPORTS> <DSTPORTS> Arguments not validated: <POSITION> :param arguments: Docopt processed arguments """ # Validate Profiles profile_ok = True if arguments.get("<PROFILE>") is not None: profile = arguments.get("<PROFILE>") profile_ok = validate_characters(profile) # Validate tags tag_src_ok = (arguments.get("<SRCTAG>") is None or validate_characters(arguments["<SRCTAG>"])) tag_dst_ok = (arguments.get("<DSTTAG>") is None or validate_characters(arguments["<DSTTAG>"])) # Validate IPs cidr_ok = True cidr_list = [] for arg in ["<SRCCIDR>", "<DSTCIDR>"]: if arguments.get(arg) is not None: cidr_list.append(arguments[arg]) cidr_ok = validate_cidr(arguments[arg]) if not cidr_ok: break icmp_ok = True for arg in ["<ICMPCODE>", "<ICMPTYPE>"]: if arguments.get(arg) is not None: icmp_ok = validate_icmp_type(arguments[arg]) if not icmp_ok: break ports_ok = True for arg in ["<SRCPORTS>", "<DSTPORTS>"]: if arguments.get(arg) is not None: ports_ok = validate_ports(arguments[arg]) if not ports_ok: break cidr_versions_ok = True if cidr_list: ip_version = None if arguments.get("icmp"): ip_version = 4 elif arguments.get("icmpv6"): ip_version = 6 cidr_versions_ok = validate_cidr_versions(cidr_list, ip_version=ip_version) # Print error message if not profile_ok: print_paragraph("Profile names must be < 40 character long and can " "only contain numbers, letters, dots, dashes and " "underscores.") if not (tag_src_ok and tag_dst_ok): print_paragraph("Tags names can only contain numbers, letters, dots, " "dashes and underscores.") if not cidr_ok: print "Invalid CIDR specified." if not icmp_ok: print "Invalid ICMP type or ICMP code specified." if not ports_ok: print "Invalid SRCPORTS or DSTPORTS specified." if not cidr_versions_ok: print "Invalid or unmatching IP versions for SRCCIDR/DSTCIDR." # Exit if not valid if not (profile_ok and tag_src_ok and tag_dst_ok and cidr_ok and icmp_ok and ports_ok and cidr_versions_ok): sys.exit(1)
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import docker import docker.errors from pycalico.ipam import IPAMClient from pycalico.datastore import (ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT, ETCD_SCHEME_ENV, ETCD_SCHEME_DEFAULT, ETCD_KEY_FILE_ENV, ETCD_CERT_FILE_ENV, ETCD_CA_CERT_FILE_ENV, DataStoreError) from utils import DOCKER_VERSION from utils import print_paragraph from pycalico.util import validate_hostname_port try: client = IPAMClient() except DataStoreError as e: print_paragraph(e.message) sys.exit(1) DOCKER_URL = os.getenv("DOCKER_HOST", "unix://var/run/docker.sock") docker_client = docker.Client(version=DOCKER_VERSION, base_url=DOCKER_URL)