예제 #1
0
def save_diags(log_dir):
    # Create temp directory
    temp_dir = tempfile.mkdtemp()
    global temp_diags_dir
    temp_diags_dir = os.path.join(temp_dir, 'diagnostics')
    os.mkdir(temp_diags_dir)
    print("Using temp dir: %s" % temp_dir)

    write_diags(None, "date")
    write_diags(None, "hostname")
    write_diags("Dumping netstat", "netstat --all --numeric")
    write_diags("Dumping routes (IPv4)", "ip -4 route")
    write_diags("Dumping routes (IPv6)", "ip -6 route")
    write_diags("Dumping interface info (IPv4)", "ip -4 addr")
    write_diags("Dumping interface info (IPv6)", "ip -6 addr")
    write_diags("Dumping iptables (IPv4)", "iptables-save")
    write_diags("Dumping iptables (IPv6)", "ip6tables-save")
    write_diags("Dumping ipsets", "ipset list")
    # If running under rkt, get the journal for the calico/node container.
    write_diags("Copying journal for calico-node.service",
                "journalctl -u calico-node.service --no-pager")

    # Ask Felix to dump stats to its log file - ignore errors as the
    # calico/node container might not be running.  Gathering of the logs is
    # dependent on environment.
    print("Dumping felix stats")
    subprocess.call(["pkill", "-SIGUSR1", "felix"])

    # Otherwise, calico logs are in the log directory.
    if os.path.isdir(log_dir):
        print("Copying Calico logs")
        # Skip the lock files as they can only be copied by root.
        copytree(log_dir,
                 os.path.join(temp_diags_dir, "logs"),
                 ignore=ignore_patterns('lock'))
    else:
        print('No logs found in %s; skipping log copying' % log_dir)

    # Dump the contents of the etcd datastore.
    print("Dumping datastore")
    with DiagsErrorWriter(temp_diags_dir, 'etcd_calico') as f:
        try:
            datastore_client = DatastoreClient()
            datastore_data = datastore_client.etcd_client.read("/calico",
                                                               recursive=True)
            f.write("dir?, key, value\n")
            # TODO: python-etcd bug: Leaves show up twice in get_subtree().
            for child in datastore_data.get_subtree():
                if child.dir:
                    f.write("DIR,  %s,\n" % child.key)
                else:
                    f.write("FILE, %s, %s\n" % (child.key, child.value))
        except EtcdException, e:
            print "Unable to dump etcd datastore"
            f.write("Unable to dump etcd datastore: %s" % e)
예제 #2
0
    def __init__(self, network_name):

        self._client = DatastoreClient()
        """
        DatastoreClient for access to the Calico datastore.
        """

        self.profile_name = network_name
        """
        Name of profile for attach to endpoint.
        """

        # Validate the given network name to make sure it is compatible with
        # Calico policy.
        if not validate_characters(network_name):
            raise ValueError(
                "Invalid characters detected in the given network "
                "name, %s. Only letters a-z, numbers 0-9, and "
                "symbols _.- are supported.", network_name)
예제 #3
0
    def __init__(self):
        self._event_queue = Queue.Queue(maxsize=MAX_QUEUE_SIZE)
        """
        Queue to populate with events from API watches.
        """

        self.k8s_api = os.environ.get("K8S_API", DEFAULT_API)
        """
        Scheme, IP and port of the Kubernetes API.
        """

        self.auth_token = os.environ.get("K8S_AUTH_TOKEN", read_token_file())
        """
        Auth token to use when accessing the API.
        """
        _log.debug("Using auth token: %s", self.auth_token)

        self.ca_crt_exists = os.path.exists(CA_CERT_PATH)
        """
        True if a CA cert has been mounted by Kubernetes.
        """

        self._client = DatastoreClient()
        """
        Client for accessing the Calico datastore.
        """

        self._handlers = {}
        self.add_handler(RESOURCE_TYPE_NETWORK_POLICY, TYPE_ADDED,
                         add_update_network_policy)
        self.add_handler(RESOURCE_TYPE_NETWORK_POLICY, TYPE_DELETED,
                         delete_network_policy)
        self.add_handler(RESOURCE_TYPE_NAMESPACE, TYPE_ADDED,
                         add_update_namespace)
        self.add_handler(RESOURCE_TYPE_NAMESPACE, TYPE_DELETED,
                         delete_namespace)
        self.add_handler(RESOURCE_TYPE_POD, TYPE_ADDED, add_update_pod)
        self.add_handler(RESOURCE_TYPE_POD, TYPE_DELETED, delete_pod)
        """
예제 #4
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
import simplejson as json
from constants import *
from pycalico.datastore import DatastoreClient
from pycalico.datastore_datatypes import Rules, Rule

_log = logging.getLogger("__main__")
client = DatastoreClient()


def add_update_namespace(namespace):
    """
    Configures a Profile for the given Kubernetes namespace.
    """
    namespace_name = namespace["metadata"]["name"]
    _log.debug("Adding/updating namespace: %s", namespace_name)

    # Determine the profile name to create.
    profile_name = NS_PROFILE_FMT % namespace_name

    # Build the rules to use.
    rules = Rules(inbound_rules=[Rule(action="allow")],
                  outbound_rules=[Rule(action="allow")])
예제 #5
0
    def __init__(self, network_config, env):
        self._client = DatastoreClient()
        """
        DatastoreClient for access to the Calico datastore.
        """

        # Parse CNI_ARGS into dictionary so we can extract values.
        cni_args = parse_cni_args(env.get(CNI_ARGS_ENV, ""))

        self.k8s_pod_name = cni_args.get(K8S_POD_NAME)
        """
        Name of Kubernetes pod if running under Kubernetes, else None.
        """

        self.k8s_namespace = cni_args.get(K8S_POD_NAMESPACE)
        """
        Name of Kubernetes namespace if running under Kubernetes, else None.
        """

        self.network_config = network_config
        """
        Network config as provided in the CNI network file passed in
        via stdout.
        """

        self.network_name = network_config["name"]
        """
        Name of the network from the provided network config file.
        """

        self.ipam_type = network_config["ipam"]["type"]
        """
        Type of IPAM to use, e.g calico-ipam.
        """

        self.hostname = network_config.get("hostname", socket.gethostname())
        """
        The hostname to register endpoints under.
        """

        self.container_engine = get_container_engine(self.k8s_pod_name)
        """
        Chooses the correct container engine based on the given configuration.
        """

        self.ipam_env = env
        """
        Environment dictionary used when calling the IPAM plugin.
        """

        self.command = env[CNI_COMMAND_ENV]
        assert self.command in [CNI_CMD_DELETE, CNI_CMD_ADD], \
                "Invalid CNI command %s" % self.command
        """
        The command to execute for this plugin instance. Required.
        One of:
          - CNI_CMD_ADD
          - CNI_CMD_DELETE
        """

        self.container_id = env[CNI_CONTAINERID_ENV]
        """
        The container's ID in the containerizer. Required.
        """

        self.cni_netns = env[CNI_NETNS_ENV]
        """
        Relative path to the network namespace of this container.
        """

        self.interface = env[CNI_IFNAME_ENV]
        """
        Name of the interface to create within the container.
        """

        self.cni_path = env[CNI_PATH_ENV]
        """
        Path in which to search for CNI plugins.
        """

        self.running_under_k8s = self.k8s_namespace and self.k8s_pod_name
        if self.running_under_k8s:
            self.workload_id = "%s.%s" % (self.k8s_namespace,
                                          self.k8s_pod_name)
            self.orchestrator_id = "k8s"
        else:
            self.workload_id = self.container_id
            self.orchestrator_id = "cni"
        kubernetes_config = network_config.get("kubernetes", {})
        self.kubeconfig_path = kubernetes_config.get("kubeconfig")
        self.k8s_node_name = kubernetes_config.get("node_name",
                                                   socket.gethostname())
        """
        Configure orchestrator specific settings.
        workload_id: In Kubernetes, this is the pod's namespace and name.
                     Otherwise, this is the container ID.
        orchestrator_id: Either "k8s" or "cni".
        """

        # Ensure that the ipam_env CNI_ARGS contains the IgnoreUnknown=1 option
        # See https://github.com/appc/cni/pull/158
        # And https://github.com/appc/cni/pull/127
        self.ipam_env[CNI_ARGS_ENV] = 'IgnoreUnknown=1'
        if env.get(CNI_ARGS_ENV):
            # Append any existing args - if they are set.
            self.ipam_env[CNI_ARGS_ENV] += ";%s" % env.get(CNI_ARGS_ENV)

        self.policy_driver = get_policy_driver(self)
        """
예제 #6
0
    def __init__(self):
        self._event_queue = Queue.Queue(maxsize=MAX_QUEUE_SIZE)
        """
        Queue to populate with events from API watches.
        """

        self.k8s_api = os.environ.get("K8S_API", DEFAULT_API)
        """
        Scheme, IP and port of the Kubernetes API.
        """

        self.auth_token = os.environ.get("K8S_AUTH_TOKEN", read_token_file())
        """
        Auth token to use when accessing the API.
        """
        _log.debug("Using auth token: %s", self.auth_token)

        self.ca_crt_exists = os.path.exists(CA_CERT_PATH)
        """
        True if a CA cert has been mounted by Kubernetes.
        """

        self._client = DatastoreClient()
        """
        Client for accessing the Calico datastore.
        """

        self._leader_election_url = os.environ.get("ELECTION_URL",
                                                   "http://127.0.0.1:4040/")
        """
        Use this URL to get leader election status from the sidecar container.
        """

        elect = os.environ.get("LEADER_ELECTION", "false")
        self._leader_elect = elect.lower() == "true"
        """
        Whether or not leader election is enabled.  If set to False, this
        policy controller will assume it is the only instance.
        """

        self._handlers = {}
        """
        Keeps track of which handlers to execute for various events.
        """

        # Handlers for NetworkPolicy events.
        self.add_handler(RESOURCE_TYPE_NETWORK_POLICY, TYPE_ADDED,
                         add_update_network_policy)
        self.add_handler(RESOURCE_TYPE_NETWORK_POLICY, TYPE_MODIFIED,
                         add_update_network_policy)
        self.add_handler(RESOURCE_TYPE_NETWORK_POLICY, TYPE_DELETED,
                         delete_network_policy)

        # Handlers for Namespace events.
        self.add_handler(RESOURCE_TYPE_NAMESPACE, TYPE_ADDED,
                         add_update_namespace)
        self.add_handler(RESOURCE_TYPE_NAMESPACE, TYPE_MODIFIED,
                         add_update_namespace)
        self.add_handler(RESOURCE_TYPE_NAMESPACE, TYPE_DELETED,
                         delete_namespace)

        # Handlers for Pod events.
        self.add_handler(RESOURCE_TYPE_POD, TYPE_ADDED, add_pod)
        self.add_handler(RESOURCE_TYPE_POD, TYPE_MODIFIED, update_pod)
        self.add_handler(RESOURCE_TYPE_POD, TYPE_DELETED, delete_pod)
예제 #7
0
def save_diags(log_dir):
    # Create temp directory
    temp_dir = tempfile.mkdtemp()
    temp_diags_dir = os.path.join(temp_dir, 'diagnostics')
    os.mkdir(temp_diags_dir)
    print("Using temp dir: %s" % temp_dir)

    # Write date to file
    with open(os.path.join(temp_diags_dir, 'date'), 'w') as f:
        f.write("DATE=%s" %
                datetime.strftime(datetime.today(), "%Y-%m-%d_%H-%M-%S"))

    # Write hostname to file
    with open(os.path.join(temp_diags_dir, 'hostname'), 'w') as f:
        f.write("%s" % socket.gethostname())

    # Write netstat output to file
    with open(os.path.join(temp_diags_dir, 'netstat'), 'w') as f:
        try:
            print("Dumping netstat output")
            netstat = sh.Command._create("netstat")

            f.writelines(
                netstat(
                    # Display all sockets (default: connected)
                    all=True,
                    # Don't resolve names
                    numeric=True))

        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

    # Write routes
    print("Dumping routes")
    with open(os.path.join(temp_diags_dir, 'route'), 'w') as f:
        try:
            route = sh.Command._create("route")
            f.write("route --numeric\n")
            f.writelines(route(numeric=True))
            f.write('\n')
        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

        try:
            ip = sh.Command._create("ip")
            f.write("ip route\n")
            f.writelines(ip("route"))
            f.write('\n')

            f.write("ip -6 route\n")
            f.writelines(ip("-6", "route"))
            f.write('\n')
        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

    # Dump iptables
    with open(os.path.join(temp_diags_dir, 'iptables'), 'w') as f:
        try:
            iptables_save = sh.Command._create("iptables-save")
            print("Dumping iptables")
            f.writelines(iptables_save())
        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

    # Dump ipset list
    # TODO: ipset might not be installed on the host. But we don't want to
    # gather the diags in the container because it might not be running...
    with open(os.path.join(temp_diags_dir, 'ipset'), 'w') as f:
        try:
            ipset = sh.Command._create("ipset")
            print("Dumping ipset")
            f.writelines(ipset("list"))
        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message
        except sh.ErrorReturnCode_1 as e:
            print "Error running ipset. Maybe you need to run as root."

    # Ask Felix to dump stats to its log file - ignore errors as the
    # calico-node might not be running
    subprocess.call(
        ["docker", "exec", "calico-node", "pkill", "-SIGUSR1", "felix"])

    if os.path.isdir(log_dir):
        print("Copying Calico logs")
        # Skip the lock files as they can only be copied by root.
        copytree(log_dir,
                 os.path.join(temp_diags_dir, "logs"),
                 ignore=ignore_patterns('lock'))
    else:
        print('No logs found in %s; skipping log copying' % log_dir)

    print("Dumping datastore")
    # TODO: May want to move this into datastore.py as a dump-calico function
    try:
        datastore_client = DatastoreClient()
        datastore_data = datastore_client.etcd_client.read("/calico",
                                                           recursive=True)
        with open(os.path.join(temp_diags_dir, 'etcd_calico'), 'w') as f:
            f.write("dir?, key, value\n")
            # TODO: python-etcd bug: Leaves show up twice in get_subtree().
            for child in datastore_data.get_subtree():
                if child.dir:
                    f.write("DIR,  %s,\n" % child.key)
                else:
                    f.write("FILE, %s, %s\n" % (child.key, child.value))
    except EtcdException:
        print "Unable to dump etcd datastore"

    # Create tar.
    tar_filename = datetime.strftime(datetime.today(),
                                     "diags-%d%m%y_%H%M%S.tar.gz")
    full_tar_path = os.path.join(temp_dir, tar_filename)
    with tarfile.open(full_tar_path, "w:gz") as tar:
        # pass in arcname, otherwise zip contains layers of subfolders
        tar.add(temp_dir, arcname="")

    print("\nDiags saved to %s\n" % (full_tar_path))
    print_paragraph("If required, you can upload the diagnostics bundle to a "
                    "file sharing service such as transfer.sh using curl or "
                    "similar.  For example:")
    print("  curl --upload-file %s https://transfer.sh/%s" %
          (full_tar_path, os.path.basename(full_tar_path)))
예제 #8
0
    def __init__(self, network_config, env):
        self.network_config = network_config
        """
        Network config as provided in the CNI network file passed in
        via stdout.
        """

        self.env = env
        """
        Copy of the environment variable dictionary. Contains CNI_* 
        variables.
        """

        self._client = DatastoreClient()
        """
        DatastoreClient for access to the Calico datastore.
        """

        self.command = env[CNI_COMMAND_ENV]
        """
        The command to execute for this plugin instance. Required. 
        One of:
          - CNI_CMD_ADD
          - CNI_CMD_DELETE
        """

        self.container_id = env[CNI_CONTAINERID_ENV]
        """
        The container's ID in the containerizer. Required.
        """

        self.cni_netns = env[CNI_NETNS_ENV]
        """
        Relative path to the network namespace of this container.
        """

        self.interface = env[CNI_IFNAME_ENV]
        """
        Name of the interface to create within the container.
        """

        self.cni_args = parse_cni_args(env[CNI_ARGS_ENV])
        """
        Dictionary of additional CNI arguments provided via
        the CNI_ARGS environment variable.
        """

        self.cni_path = env[CNI_PATH_ENV]
        """
        Path in which to search for CNI plugins.
        """

        self.network_name = network_config["name"]
        """
        Name of the network from the provided network config file.
        """

        self.ipam_result = None
        """
        Stores the output generated by the IPAM plugin.  This is printed
        to stdout at the end of execution.
        """

        self.policy_driver = self._get_policy_driver()
        """
        Chooses the correct policy driver based on the given configuration
        """

        self.container_engine = self._get_container_engine()
        """
예제 #9
0
def save_diags(log_dir):
    # Create temp directory
    temp_dir = tempfile.mkdtemp()
    temp_diags_dir = os.path.join(temp_dir, 'diagnostics')
    os.mkdir(temp_diags_dir)
    print("Using temp dir: %s" % temp_dir)

    # Write date to file
    with DiagsErrorWriter(temp_diags_dir, 'date') as f:
        f.write("DATE=%s" %
                datetime.strftime(datetime.today(), "%Y-%m-%d_%H-%M-%S"))

    # Write hostname to file
    with DiagsErrorWriter(temp_diags_dir, 'hostname') as f:
        f.write("%s" % socket.gethostname())

    # Write netstat output to file
    with DiagsErrorWriter(temp_diags_dir, 'netstat') as f:
        try:
            print("Dumping netstat output")
            netstat = sh.Command._create("netstat")

            f.writelines(
                netstat(
                    # Display all sockets (default: connected)
                    all=True,
                    # Don't resolve names
                    numeric=True))

        except sh.CommandNotFound as e:
            print "  - Missing command: %s" % e.message
            f.writelines("Missing command: %s\n" % e.message)

    # Write routes
    print("Dumping routes")
    with DiagsErrorWriter(temp_diags_dir, 'route') as f:
        try:
            route = sh.Command._create("route")
            f.write("route --numeric\n")
            f.writelines(route(numeric=True))
            f.write('\n')
        except sh.CommandNotFound as e:
            print "  - Missing command: %s" % e.message
            f.writelines("Missing command: %s\n" % e.message)

        try:
            ip = sh.Command._create("ip")
            f.write("ip route\n")
            f.writelines(ip("route"))
            f.write('\n')

            f.write("ip -6 route\n")
            f.writelines(ip("-6", "route"))
            f.write('\n')
        except sh.CommandNotFound as e:
            print "  - Missing command: %s" % e.message
            f.writelines("Missing command: %s\n" % e.message)

    # Dump iptables
    with DiagsErrorWriter(temp_diags_dir, 'iptables') as f:
        try:
            iptables_save = sh.Command._create("iptables-save")
            print("Dumping iptables")
            f.writelines(iptables_save())
        except sh.CommandNotFound as e:
            print "  - Missing command: %s" % e.message
            f.writelines("Missing command: %s\n" % e.message)

    # Dump ipset list
    # TODO: ipset might not be installed on the host. But we don't want to
    # gather the diags in the container because it might not be running...
    with DiagsErrorWriter(temp_diags_dir, 'ipset') as f:
        try:
            ipset = sh.Command._create("ipset")
            print("Dumping ipset")
            f.writelines(ipset("list"))
        except sh.CommandNotFound as e:
            print "  - Missing command: %s" % e.message
            f.writelines("Missing command: %s\n" % e.message)
        except sh.ErrorReturnCode_1 as e:
            print "  - Error running ipset. Maybe you need to run as root."
            f.writelines("Error running ipset: %s\n" % e)

    # Ask Felix to dump stats to its log file - ignore errors as the
    # calico-node might not be running
    subprocess.call(
        ["docker", "exec", "calico-node", "pkill", "-SIGUSR1", "felix"])

    if os.path.isdir(log_dir):
        print("Copying Calico logs")
        # Skip the lock files as they can only be copied by root.
        copytree(log_dir,
                 os.path.join(temp_diags_dir, "logs"),
                 ignore=ignore_patterns('lock'))
    else:
        print('No logs found in %s; skipping log copying' % log_dir)

    print("Dumping datastore")
    # TODO: May want to move this into datastore.py as a dump-calico function
    with DiagsErrorWriter(temp_diags_dir, 'etcd_calico') as f:
        try:
            datastore_client = DatastoreClient()
            datastore_data = datastore_client.etcd_client.read("/calico",
                                                               recursive=True)
            f.write("dir?, key, value\n")
            # TODO: python-etcd bug: Leaves show up twice in get_subtree().
            for child in datastore_data.get_subtree():
                if child.dir:
                    f.write("DIR,  %s,\n" % child.key)
                else:
                    f.write("FILE, %s, %s\n" % (child.key, child.value))
        except EtcdException, e:
            print "Unable to dump etcd datastore"
            f.write("Unable to dump etcd datastore: %s" % e)
예제 #10
0
def save_diags(log_dir, upload=False):
    # Create temp directory
    temp_dir = tempfile.mkdtemp()
    temp_diags_dir = os.path.join(temp_dir, 'diagnostics')
    os.mkdir(temp_diags_dir)
    print("Using temp dir: %s" % temp_dir)

    # Write date to file
    with open(os.path.join(temp_diags_dir, 'date'), 'w') as f:
        f.write("DATE=%s" %
                datetime.strftime(datetime.today(), "%Y-%m-%d_%H-%M-%S"))

    # Write hostname to file
    with open(os.path.join(temp_diags_dir, 'hostname'), 'w') as f:
        f.write("%s" % socket.gethostname())

    # Write netstat output to file
    with open(os.path.join(temp_diags_dir, 'netstat'), 'w') as f:
        try:
            print("Dumping netstat output")
            netstat = sh.Command._create("netstat")

            f.writelines(
                netstat(
                    # Display all sockets (default: connected)
                    all=True,
                    # Don't resolve names
                    numeric=True))

        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

    # Write routes
    print("Dumping routes")
    with open(os.path.join(temp_diags_dir, 'route'), 'w') as f:
        try:
            route = sh.Command._create("route")
            f.write("route --numeric")
            f.writelines(route(numeric=True))
            f.write('\n')
        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

        try:
            ip = sh.Command._create("ip")
            f.write("ip route")
            f.writelines(ip("route"))
            f.write('\n')

            f.write("ip -6 route")
            f.writelines(ip("-6", "route"))
            f.write('\n')
        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

    # Dump iptables
    with open(os.path.join(temp_diags_dir, 'iptables'), 'w') as f:
        try:
            iptables_save = sh.Command._create("iptables-save")
            print("Dumping iptables")
            f.writelines(iptables_save())
        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

    # Dump ipset list
    # TODO: ipset might not be installed on the host. But we don't want to
    # gather the diags in the container because it might not be running...
    with open(os.path.join(temp_diags_dir, 'ipset'), 'w') as f:
        try:
            ipset = sh.Command._create("ipset")
            print("Dumping ipset")
            f.writelines(ipset("list"))
        except sh.CommandNotFound as e:
            print "Missing command: %s" % e.message

    if os.path.isdir(log_dir):
        print("Copying Calico logs")
        copytree(log_dir, os.path.join(temp_diags_dir, "logs"))
    else:
        print('No logs found in %s; skipping log copying' % log_dir)

    print("Dumping datastore")
    # TODO: May want to move this into datastore.py as a dump-calico function
    try:
        datastore_client = DatastoreClient()
        datastore_data = datastore_client.etcd_client.read("/calico",
                                                           recursive=True)
        with open(os.path.join(temp_diags_dir, 'etcd_calico'), 'w') as f:
            f.write(str(datastore_data))
    except EtcdException:
        print "Unable to dump etcd datastore"

    # Create tar and upload
    tar_filename = datetime.strftime(datetime.today(),
                                     "diags-%d%m%y_%H%M%S.tar.gz")
    full_tar_path = os.path.join(temp_dir, tar_filename)
    with tarfile.open(full_tar_path, "w:gz") as tar:
        # pass in arcname, otherwise zip contains layers of subfolders
        tar.add(temp_dir, arcname="")

    print("Diags saved to %s" % (full_tar_path))

    if upload:
        upload_temp_diags(full_tar_path)