Exemplo n.º 1
0
    def __produce_log_file(self, job_state):
        pod_r = Pod.objects(self._pykube_api).filter(selector="app=" +
                                                     job_state.job_id)
        log_string = ""
        for pod_obj in pod_r.response['items']:
            try:
                pod = Pod(self._pykube_api, pod_obj)
                log_string += "\n\n==== Pod " + pod.name + " log start ====\n\n"
                log_string += pod.logs(timestamps=True)
                log_string += "\n\n==== Pod " + pod.name + " log end   ===="
            except Exception as detail:
                log.info(
                    "Could not write log file for pod %s due to HTTPError %s",
                    pod_obj['metadata']['name'], detail)
        if isinstance(log_string, text_type):
            log_string = log_string.encode('utf8')

        logs_file_path = job_state.output_file
        try:
            with open(logs_file_path, mode="w") as logs_file:
                logs_file.write(log_string)
        except IOError as e:
            log.error("Couldn't produce log files for %s", job_state.job_id)
            log.exception(e)

        return logs_file_path
Exemplo n.º 2
0
    def loadconfig(self):
        if not self.api is None:
            return

        logger.debug("Loading kubeconfig...")
        try:
            self.kubeconfig = KubeConfig.from_file(env("LOCAL_KUBECONFIG_PATH", "/azk/deploy/.kube/config"))
            self.api = HTTPClient(self.kubeconfig)
            self.namespace = env("KUBE_NAMESPACE")
            self.context = env("KUBE_CONTEXT")
            if self.context is None:
                if "current-context" in self.kubeconfig.doc:
                    self.context = self.kubeconfig.doc["current-context"]
                else:
                    logger.fatal("KUBE_CONTEXT in env is not set and current-context is not set in kubeconfig.")
                    exit(1)

            if self.context not in self.kubeconfig.contexts:
                logger.fatal("Context '" + str(self.context) + "' is not found in kubeconfig.")
                exit(1)

            self.kubeconfig.set_current_context(self.context)
            logger.debug("Testing connectivity...")
            if self.namespace is None and "namespace" in self.kubeconfig.contexts[self.context]:
                self.namespace = self.kubeconfig.contexts[self.context]["namespace"]
            if self.namespace is None:
                logger.fatal("KUBE_NAMESPACE is not set and there is no namespace set in kubeconfig context " + str(self.kubeconfig.current_context) + ".")
                exit(1)
            pods = Pod.objects(self.api).filter(namespace=self.namespace)
            logger.info("Currently " + str(len(pods)) + " pods in '" + self.namespace + "' namespace, kubernetes connection appears to be working.")
        except Exception as e:
            logger.fatal("Unable to load kubeconfig/connection failed, " + str(e.strerror))
            exit(1)
Exemplo n.º 3
0
    def __get_pods(self):

        self.__logger.info(f"checking __get_pods")
        pod_objs = Pod.objects(self.__kube_api) \
            .filter(namespace=self.job_namespace, selector="job-name=" + self.uu_name) \
            .response['items']
        return [Pod(self.__kube_api, p) for p in pod_objs]
Exemplo n.º 4
0
    def __track_pod(self, seen_events):
        """Poll pod status while active"""
        watch = Pod.objects(self.__kube_api).filter(selector="luigi_task_id=" +
                                                    self.job_uuid).watch()
        status = self.__get_pod_status()
        log = ''
        # since we are tracking pod statuses, we have to retrieve events by a separate query, seen events are
        # stored to prevent double logging
        if status == self.__RUNNING_STATUS:
            for event in watch:
                event_string = self.__get_pod_events(seen_events)
                log_string = "Kubernetes pod state: " + event.object.obj[
                    "status"]["phase"]
                if len(event_string) > 0:
                    log_string += ".\nKubernetes events: \n" + event_string
                self.log_event(
                    api.LogEntry(self.run_id, api.TaskStatus.RUNNING,
                                 log_string, self.__repr__(), self.uu_name))
                if self.__SUCCESS_STATUS in event.object.obj["status"][
                        "phase"]:
                    status = self.__SUCCESS_STATUS
                    log = event.object.logs()
                    break
                if self.__FAILURE_STATUS in event.object.obj["status"][
                        "phase"]:
                    status = self.__FAILURE_STATUS
                    log = event.object.logs()
                    break

        self.log_event(
            api.LogEntry(self.run_id, api.TaskStatus.RUNNING, log,
                         self.__repr__(), self.uu_name))
        log_path = os.path.join(
            self.pipeline_api.log_dir,
            "{}.{}.log".format(self.task_family, self.create_id()))
        with open(log_path, "a") as log_file:
            log_file.write(log)
        # in case we missed something during watching status
        event_string = self.__get_pod_events(seen_events)
        if len(event_string) > 0:
            event_string += "Kubernetes events: \n"
            self.log_event(
                api.LogEntry(self.run_id, api.TaskStatus.RUNNING, event_string,
                             self.__repr__(), self.uu_name))

        if status == self.__SUCCESS_STATUS:
            self.log_event(
                api.LogEntry(self.run_id, api.TaskStatus.RUNNING,
                             "Kubernetes pod succeeded: " + self.uu_name,
                             self.__repr__(), self.uu_name))
            self.signal_complete()
        elif status == self.__RUNNING_STATUS:
            self.__track_pod(seen_events)
        else:
            self.log_event(
                api.LogEntry(self.run_id, api.TaskStatus.RUNNING,
                             "Kubernetes pod failed: " + self.uu_name,
                             self.__repr__(), self.uu_name))
            raise RuntimeError("Kubernetes pod " + self.uu_name + " failed")
Exemplo n.º 5
0
def run(name, shell_path):
    for pod in Pod.objects(kube_api).filter(selector={'app': name}).iterator():
        if pod.ready:  # use the first ready pod, otherwise we use the last pod
            break

    click.echo("Running shell in pod `{}` in kube ctx `{}`".format(
               pod.name, kube_api.config.current_context))
    os.execve(get_executable_path('kubectl'), ["kubectl", "exec", "-i", "-t", pod.name, shell_path], os.environ)
Exemplo n.º 6
0
    def get_mongo_pods(self):
        pods = Pod.objects(self.k8s).filter(selector={
            'role': 'mongo',
            'app': self.app_name,
            'creator': self.creator_name
        })

        ready_pods = []
        for p in pods:
            if 'podIP' not in p.obj['status']:
                continue
            ready_pods.append(p)

        return ready_pods
Exemplo n.º 7
0
    def __job_failed_due_to_low_memory(self, job_state):
        """
        checks the state of the pod to see if it was killed
        for being out of memory (pod status OOMKilled). If that is the case
        marks the job for resubmission (resubmit logic is part of destinations).
        """

        pods = Pod.objects(self._pykube_api).filter(selector="app=" + job_state.job_id)
        pod = Pod(self._pykube_api, pods.response['items'][0])

        if pod.obj['status']['phase'] == "Failed" and \
                pod.obj['status']['containerStatuses'][0]['state']['terminated']['reason'] == "OOMKilled":
            return True

        return False
Exemplo n.º 8
0
    def __job_failed_due_to_low_memory(self, job_state):
        """
        checks the state of the pod to see if it was killed
        for being out of memory (pod status OOMKilled). If that is the case
        marks the job for resubmission (resubmit logic is part of destinations).
        """

        pods = Pod.objects(self._pykube_api).filter(selector="app=" + job_state.job_id)
        pod = Pod(self._pykube_api, pods.response['items'][0])

        if pod.obj['status']['phase'] == "Failed" and \
                pod.obj['status']['containerStatuses'][0]['state']['terminated']['reason'] == "OOMKilled":
            return True

        return False
Exemplo n.º 9
0
 def __get_pod_status(self, pod_id):
     # Look for the required pod
     pod = Pod.objects(
         self.kube_api).filter(namespace=self.namespace).get(name=pod_id)
     Logger.info(str(pod), task_name=self.task_name)
     # Raise an exception if no such pod found
     if not pod:
         Logger.fail("Task failed to start: " + pod_id,
                     task_name=self.task_name)
         raise RuntimeError("Task with id {} not found".format(pod_id))
     # Figure out status and return it
     if self.__SUCCESS_STATUS in pod.obj["status"]["phase"]:
         return self.__SUCCESS_STATUS
     if self.__FAILURE_STATUS in pod.obj["status"]["phase"]:
         return self.__FAILURE_STATUS
     return self.__RUNNING_STATUS
Exemplo n.º 10
0
    def get_mongo_pods(self):
        pods = Pod.objects(self.k8s).filter(
            selector={
                'role': 'mongo',
                'app': self.app_name,
                'creator': self.creator_name
            }
        )

        ready_pods = []
        for p in pods:
            if 'podIP' not in p.obj['status']:
                continue
            ready_pods.append(p)

        return ready_pods
Exemplo n.º 11
0
    def track_pod(self, pod_id, seen_events):
        """Poll pod status while active"""
        watch = Pod.objects(self.kube_api).filter(field_selector={
            "metadata.name": pod_id
        }).watch()
        status = self.__get_pod_status(pod_id)
        log = ''
        # since we are tracking pod statuses, we have to retrieve events by a separate query, seen events are
        # stored to prevent double logging
        if status == self.__RUNNING_STATUS:
            for event in watch:
                event_string = self.__get_pod_events(pod_id, seen_events)
                log_string = "Tool state: " + event.object.obj["status"][
                    "phase"]
                if len(event_string) > 0:
                    log_string += ".\nTask events: \n" + event_string
                Logger.info(log_string, task_name=self.task_name)
                if self.__SUCCESS_STATUS in event.object.obj["status"][
                        "phase"]:
                    status = self.__SUCCESS_STATUS
                    log = event.object.logs()
                    break
                if self.__FAILURE_STATUS in event.object.obj["status"][
                        "phase"]:
                    status = self.__FAILURE_STATUS
                    log = event.object.logs()
                    break
        Logger.info(log, task_name=self.task_name)
        # in case we missed something during watching status
        event_string = self.__get_pod_events(pod_id, seen_events)
        if len(event_string) > 0:
            event_string += "Task events: \n"
            Logger.info(event_string, task_name=self.task_name)

        if status == self.__SUCCESS_STATUS:
            Logger.success("Task succeeded: {}".format(pod_id),
                           task_name=self.task_name)
        elif status == self.__RUNNING_STATUS:
            self.track_pod(pod_id, seen_events)
        else:
            Logger.fail("Task failed: {}.".format(pod_id),
                        task_name=self.task_name)
            raise RuntimeError("Task with id " + pod_id + " failed")
Exemplo n.º 12
0
    def __get_pod_status(self):
        # Look for the required pod
        pods = Pod.objects(self.__kube_api).filter(selector="luigi_task_id=" +
                                                   self.job_uuid)
        # Raise an exception if no such pod found
        if len(pods.response["items"]) == 0:
            self.log_event(
                api.LogEntry(self.run_id, api.TaskStatus.FAILURE,
                             "Kubernetes pod failed to raise: " + self.uu_name,
                             self.__repr__(), self.uu_name))
            raise RuntimeError("Kubernetes job " + self.uu_name + " not found")

        # Figure out status and return it
        pod = Pod(self.__kube_api, pods.response["items"][0])
        if self.__SUCCESS_STATUS in pod.obj["status"]["phase"]:
            return self.__SUCCESS_STATUS
        if self.__FAILURE_STATUS in pod.obj["status"]["phase"]:
            return self.__FAILURE_STATUS
        return self.__RUNNING_STATUS
Exemplo n.º 13
0
    def __job_failed_due_to_low_memory(self, job_state):
        """
        checks the state of the pod to see if it was killed
        for being out of memory (pod status OOMKilled). If that is the case
        marks the job for resubmission (resubmit logic is part of destinations).
        """

        pods = Pod.objects(self._pykube_api).filter(selector="app=" +
                                                    job_state.job_id)
        if len(pods.response['items']) == 0 or pods is None:
            log.error("Cannot find API server or app={} doesn't find any pods".
                      format(str(job_state.job_id)))
            return False
        pod = Pod(self._pykube_api, pods.response['items'][0])

        if pod.obj['status']['phase'] == "Failed" and \
                pod.obj['status']['containerStatuses'][0]['state']['terminated']['reason'] == "OOMKilled":
            return True

        return False
Exemplo n.º 14
0
    def __produce_log_file(self, job_state):
        pod_r = Pod.objects(self._pykube_api).filter(selector="app=" + job_state.job_id)
        logs = ""
        for pod_obj in pod_r.response['items']:
            try:
                pod = Pod(self._pykube_api, pod_obj)
                logs += "\n\n==== Pod " + pod.name + " log start ====\n\n"
                logs += pod.logs(timestamps=True)
                logs += "\n\n==== Pod " + pod.name + " log end   ===="
            except Exception as detail:
                log.info("Could not write pod\'s " + pod_obj['metadata']['name'] +
                         " log file due to HTTPError " + str(detail))

        logs_file_path = job_state.output_file
        logs_file = open(logs_file_path, mode="w")
        if isinstance(logs, text_type):
            logs = logs.encode('utf8')
        logs_file.write(logs)
        logs_file.close()
        return logs_file_path
Exemplo n.º 15
0
def run(name, shell):
    for pod in Pod.objects(kube_api).filter(selector={'app': name}).iterator():
        if pod.ready:  # use the first ready pod, otherwise we use the last pod
            break

    def env_vars():
        d = {
            'KYBER_USER': pwd.getpwuid(os.getuid()).pw_name,
            'KYBER_POD': pod.name,
            'KYBER_APP': name,
            'KYBER_KUBECTL_CONTEXT': kube_api.config.current_context,
        }
        return " ".join(["{}={}".format(key, val) for key, val in d.items()])

    cmd = '{env} {shell}'.format(env=env_vars(), shell=shell)

    click.echo("Running shell in pod `{}` in kube ctx `{}`".format(
        pod.name, kube_api.config.current_context))
    os.execve(
        get_executable_path('kubectl'),
        ["kubectl", "exec", "-i", "-t", pod.name, '--', shell, '-c', cmd],
        os.environ)
Exemplo n.º 16
0
    def __produce_log_file(self, job_state):
        pod_r = Pod.objects(self._pykube_api).filter(selector="app=" +
                                                     job_state.job_id)
        logs = ""
        for pod_obj in pod_r.response['items']:
            try:
                pod = Pod(self._pykube_api, pod_obj)
                logs += "\n\n==== Pod " + pod.name + " log start ====\n\n"
                logs += pod.logs(timestamps=True)
                logs += "\n\n==== Pod " + pod.name + " log end   ===="
            except Exception as detail:
                log.info("Could not write pod\'s " +
                         pod_obj['metadata']['name'] +
                         " log file due to HTTPError " + str(detail))

        logs_file_path = job_state.output_file
        logs_file = open(logs_file_path, mode="w")
        if isinstance(logs, text_type):
            logs = logs.encode('utf8')
        logs_file.write(logs)
        logs_file.close()
        return logs_file_path
def deploy(ctx, persistent_volume, load_balancer, public_hostname):
    """Deploy OpenShift to the cluster."""
    if not ctx.init_with_checks():
        print("Failed cursory checks, exiting.")
        exit(1)

    if ctx.consider_openshift_deployed:
        print("I think OpenShift is already deployed. Use undeploy first to remove it before installing it again.")
        print("Consider if you really need a full redeploy. You can update without re-deploying!")
        exit(1)

    print()
    if "openshift" in ctx.namespace_names or "openshift-origin" in ctx.namespace_names:
        print("The namespaces 'openshift' and/or 'openshift-origin' exist, this indicates a potentially existing/broken install.")
        if ctx.auto_confirm:
            print("Auto confirm (-y) option set, clearing existing installation.")
        else:
            print("Really consider the decision you're about to make.")
            if not click.confirm("Do you want to clear the existing installation?"):
                print("Okay, cancelling.")
                exit(1)

        # Handle oddities with finalizers?
        # todo: delete "openshift" and "openshift-infra" namespaces
        ctx.delete_namespace_byname("openshift-origin")
        time.sleep(1)

    ctx.temp_dir = tempfile.mkdtemp()
    print("Preparing to execute deploy...")
    print("Deploy temp dir: " + ctx.temp_dir)

    # Setup the deploy state namespace
    ctx.cleanup_osdeploy_namespace()
    ctx.create_osdeploy_namespace()

    # Check the persistentvolume exists
    if ctx.find_persistentvolume(persistent_volume) == None:
        print(" [!] persistentvolume with name " + persistent_volume + " does not exist. Did you create it?")
        exit(1)

    # Grab the service account key
    servicekey_pod = ctx.create_servicekey_pod()

    # Get the key
    ctx.service_cert = ctx.observe_servicekey_pod(servicekey_pod)

    # Kill the pod
    servicekey_pod.delete()

    # Save the key temporarily
    with open(ctx.temp_dir + "/serviceaccounts.public.key", 'w') as f:
        f.write(ctx.service_cert)

    # Create the namespaces
    ctx.create_namespace("openshift-origin")

    # Create the service
    if load_balancer:
        print("Will use load balancer type service.")
    else:
        print("Will use node port type service.")
    os_service = ctx.create_os_service(load_balancer)

    # Wait for it to be ready if it's a load balancer
    if load_balancer:
        print("Waiting for service load balancer IP to be allocated...")
        ctx.wait_for_loadbalancer(os_service)
    else:
        os_service.reload()

    tmp = os_service.obj["status"]["loadBalancer"]["ingress"][0]
    external_os_ip = None
    external_is_hostname = False
    if "hostname" in tmp:
        external_os_ip = tmp["hostname"]
        external_is_hostname = True
    else:
        external_os_ip = tmp["ip"]
    internal_os_ip = os_service.obj["spec"]["clusterIP"]
    ctx.os_internal_ip = internal_os_ip
    ctx.os_external_ip = external_os_ip

    print("External OpenShift IP: " + external_os_ip)
    print("Internal OpenShift IP: " + internal_os_ip)

    if public_hostname != None:
        print("You need to DNS map like this:")
        if external_is_hostname:
            print(public_hostname + ".\t300\tIN\tCNAME\t" + external_os_ip)
        else:
            print(public_hostname + ".\t300\tIN\tA\t" + external_os_ip)
        ctx.os_external_ip = public_hostname

    # Create a 'secret' containing the script to run to config.
    create_config_script = (resource_string(ctx.scripts_resource, 'create-config.sh'))

    # Build the secret
    create_config_secret_kv = {"create-config.sh": create_config_script}
    create_config_secret = ctx.build_secret("create-config-script", "openshift-deploy", create_config_secret_kv)
    create_config_secret.create()

    # Build the kubeconfig secret
    kubeconfig_secret_kv = {"kubeconfig": yaml.dump(ctx.config.doc).encode('ascii')}
    kubeconfig_secret = ctx.build_secret("kubeconfig", "openshift-deploy", kubeconfig_secret_kv)
    kubeconfig_secret.create()

    # Generate the openshift config by running a temporary pod on the cluster
    print("Generating openshift config via cluster...")
    conf_pod = ctx.build_config_pod(ctx.os_version)
    conf_pod.create()
    with open(ctx.temp_dir + "/config_bundle.tar.gz", 'wb') as f:
        conf_bundle = ctx.observe_config_pod(conf_pod)
        conf_bundle_data = base64.b64decode(conf_bundle)
        f.write(conf_bundle_data)
    conf_pod.delete()

    # Extract
    tar = tarfile.open(ctx.temp_dir + "/config_bundle.tar.gz")
    tar.extractall(ctx.temp_dir + "/config/")
    tar.close()

    # Move kubeconfig in
    with open(ctx.temp_dir + "/config/external-master.kubeconfig", 'w') as f:
        f.write(yaml.dump(ctx.config.doc))

    # Delete tarfile
    os.remove(ctx.temp_dir + "/config_bundle.tar.gz")

    # Do some processing on the master-config yaml
    conf = None
    with open(ctx.temp_dir + '/config/master-config.yaml') as f:
        conf = f.read()
    conf = ctx.fix_master_config(conf)

    # Write the serviceaccounts file again
    with open(ctx.temp_dir + "/config/serviceaccounts.public.key", 'w') as f:
        f.write(ctx.service_cert)

    # Write the fixed master config
    with open(ctx.temp_dir + "/config/master-config.yaml", 'w') as f:
        f.write(conf)

    # Allow the user to edit the openshift config last second
    print("Generated updated master-config.yaml.")
    if ctx.auto_confirm:
        print("Auto confirm (-y) option set, skipping master-config.yaml edit opportunity.")
    else:
        if click.confirm("Do you want to edit master-config.yaml?"):
            call([EDITOR, ctx.temp_dir + "/config/master-config.yaml"])

    # Cleanup a bit
    kubeconfig_secret.delete()
    create_config_secret.delete()

    # Serialize the config to a secret
    openshift_config_kv = {}
    for filen in os.listdir(ctx.temp_dir + "/config"):
        with open(ctx.temp_dir + "/config/" + filen, 'rb') as f:
            openshift_config_kv[filen] = f.read()
    openshift_config_secret = ctx.build_secret("openshift-config", "openshift-origin", openshift_config_kv)

    # Save the secret
    openshift_config_secret.create()

    # Starting etcd setup... build PersistentVolumeClaim
    etcd_pvc = ctx.build_pvc("openshift-etcd1", "openshift-origin", "2Gi")
    etcd_pvc.create()

    # Create the etcd controller
    etcd_rc = ctx.build_etcd_rc("openshift-etcd1")
    etcd_svc = ctx.build_etcd_service()

    print("Creating etcd service...")
    etcd_svc.create()

    print("Creating etcd controller...")
    etcd_rc.create()

    print("Waiting for etcd pod to be created...")
    etcd_pod = None
    # Wait for the pod to exist
    while etcd_pod == None:
        etcd_pods = Pod.objects(ctx.api).filter(selector={"app": "etcd"}, namespace="openshift-origin").response["items"]
        if len(etcd_pods) < 1:
            time.sleep(0.5)
            continue
        etcd_pod = Pod(ctx.api, etcd_pods[0])

    # Wait for it to run
    ctx.wait_for_pod_running(etcd_pod)

    # Create the controller config
    print("Creating openshift replication controller...")
    openshift_rc = ctx.build_openshift_rc(ctx.os_version)
    openshift_rc.create()

    print("Waiting for openshift pod to be created...")
    openshift_pod = None
    # Wait for the pod to exist
    while openshift_pod == None:
        pods = Pod.objects(ctx.api).filter(namespace="openshift-origin", selector={"app": "openshift"}).response["items"]
        if len(pods) < 1:
            time.sleep(0.5)
            continue
        openshift_pod = Pod(ctx.api, pods[0])

    # Wait for it to run
    ctx.wait_for_pod_running(openshift_pod)

    print()
    print(" == OpenShift Deployed ==")
    print("External IP: " + ctx.os_external_ip)

    ctx.fetch_namespaces()
    ctx.cleanup_osdeploy_namespace()
    shutil.rmtree(ctx.temp_dir)
Exemplo n.º 18
0
 def __get_pods(self):
     pod_objs = Pod.objects(self.__kube_api, namespace=self.kubernetes_namespace) \
         .filter(selector="job-name=" + self.uu_name) \
         .response['items']
     return [Pod(self.__kube_api, p) for p in pod_objs]
Exemplo n.º 19
0
                edge_service_port = edge_kube_service_object_metadata['labels'][EDGE_SVC_PORT_LABEL]

        if not edge_service_external_ip:
                edge_service_external_ip = edge_kube_service_object['spec']['externalIPs'][0]
        if not edge_service_port:
                edge_service_port = edge_kube_service_object['ports'][0]['nodePort']
        print('EDGE service port: ' + str(edge_service_port))
        print('EDGE service ip: ' + edge_service_external_ip)

# From each pod with "job-type=Service"  we shall take:
# -- PodIP
# -- PodID
# -- N entries by a template
# --- svc-port-N
# --- svc-path-N
pods = Pod.objects(kube_api).filter(selector={'job-type': 'Service'})\
                            .filter(field_selector={"status.phase": "Running"})

services_list = {}
for pod_spec in pods.response['items']:
        pod_id = pod_spec['metadata']['name']
        pod_ip = pod_spec['status']['podIP']
        pod_run_id = pod_spec['metadata']['labels']['runid']

        if not pod_run_id:
                print('RunID not found for pod: ' + pod_id + ', skipping')
                continue

        services_list.update(get_service_list(pod_id, pod_run_id, pod_ip))

print('Found ' + str(len(services_list)) + ' running PODs with job-type: Service')
Exemplo n.º 20
0
 def get_pod(self, run_id):
     pods = Pod.objects(self.__kube_api).filter(selector={'runid': run_id})
     if len(pods.response['items']) == 0:
         return None
     else:
         return PodModel(pods.response['items'][0], run_id)
Exemplo n.º 21
0
async def get_resource_logs(request, session):
    cluster = request.app[CLUSTER_MANAGER].get(request.match_info["cluster"])
    namespace = request.match_info.get("namespace")
    plural = request.match_info["plural"]
    name = request.match_info["name"]
    tail_lines = int(request.rel_url.query.get("tail_lines") or 200)
    clazz = await cluster.resource_registry.get_class_by_plural_name(
        plural, namespaced=True)
    query = wrap_query(clazz.objects(cluster.api), request, session)
    if namespace:
        query = query.filter(namespace=namespace)
    resource = await kubernetes.get_by_name(query, name)

    if resource.kind == "Pod":
        pods = [resource]
    elif resource.obj.get("spec", {}).get("selector", {}).get("matchLabels"):
        query = wrap_query(Pod.objects(cluster.api), request, session).filter(
            namespace=namespace,
            selector=resource.obj["spec"]["selector"]["matchLabels"],
        )
        pods = await kubernetes.get_list(query)
    else:
        raise web.HTTPNotFound(text="Resource has no logs")

    logs = []

    show_container_logs = request.app[CONFIG].show_container_logs
    if show_container_logs:
        for pod in pods:
            color = pod_color(pod.name)
            for container in pod.obj["spec"]["containers"]:
                container_log = await kubernetes.logs(
                    pod,
                    container=container["name"],
                    timestamps=True,
                    tail_lines=tail_lines,
                )
                for line in container_log.split("\n"):
                    # this is a hacky way to determine whether it's a multi-line log message
                    # (our current year of the timestamp starts with "20"..)
                    if line.startswith("20") or not logs:
                        logs.append((line, pod.name, color, container["name"]))
                    else:
                        logs[-1] = (
                            logs[-1][0] + "\n" + line,
                            pod.name,
                            color,
                            container["name"],
                        )

    logs.sort()

    return {
        "cluster": cluster.name,
        "namespace": namespace,
        "plural": plural,
        "resource": resource,
        "tail_lines": tail_lines,
        "pods": pods,
        "logs": logs,
        "show_container_logs": show_container_logs,
    }
Exemplo n.º 22
0
async def get_resource_view(request, session):
    cluster = request.app[CLUSTER_MANAGER].get(request.match_info["cluster"])
    namespace = request.match_info.get("namespace")
    plural = request.match_info["plural"]
    name = request.match_info["name"]
    params = request.rel_url.query
    view = params.get("view")
    clazz = await cluster.resource_registry.get_class_by_plural_name(
        plural, namespaced=bool(namespace))
    query = wrap_query(clazz.objects(cluster.api), request, session)
    if namespace:
        query = query.filter(namespace=namespace)
    resource = await kubernetes.get_by_name(query, name)

    if resource.kind == "Secret" and not request.app[CONFIG].show_secrets:
        # mask out all secret values, but still show keys
        for key in resource.obj["data"].keys():
            resource.obj["data"][
                key] = "**SECRET-CONTENT-HIDDEN-BY-KUBE-WEB-VIEW**"
        # the secret data is also leaked in annotations ("last-applied-configuration")
        # => hide annotations
        resource.metadata["annotations"] = {
            "annotations-hidden": "by-kube-web-view"
        }

    if params.get("download") == "yaml":
        return await download_yaml(request, resource)

    owners = []
    for ref in resource.metadata.get("ownerReferences", []):
        owner_class = await cluster.resource_registry.get_class_by_api_version_kind(
            ref["apiVersion"], ref["kind"], namespaced=bool(namespace))
        owners.append({"name": ref["name"], "class": owner_class})

    selector = field_selector = None
    if resource.kind == "Node":
        field_selector = {"spec.nodeName": resource.name}
    elif resource.obj.get("spec", {}).get("selector", {}).get("matchLabels"):
        # e.g. Deployment, DaemonSet, ..
        selector = resource.obj["spec"]["selector"]["matchLabels"]
    elif resource.obj.get("spec", {}).get("selector"):
        # e.g. Service
        selector = resource.obj["spec"]["selector"]

    if selector or field_selector:
        query = wrap_query(Pod.objects(cluster.api), request,
                           session).filter(namespace=namespace or pykube.all)

        if selector:
            query = query.filter(selector=selector)
        if field_selector:
            query = query.filter(field_selector=field_selector)

        table = await kubernetes.get_table(query)
        sort_table(table, params.get("sort"))
        table.obj["cluster"] = cluster
    else:
        table = None

    field_selector = {
        "involvedObject.name": resource.name,
        "involvedObject.namespace": namespace or "",
        "involvedObject.kind": resource.kind,
        "involvedObject.uid": resource.metadata["uid"],
    }
    events = await kubernetes.get_list(
        wrap_query(Event.objects(cluster.api), request,
                   session).filter(namespace=namespace or pykube.all,
                                   field_selector=field_selector))

    if resource.kind == "Namespace":
        namespace = resource.name

    return {
        "cluster": cluster.name,
        "namespace": namespace,
        "plural": plural,
        "resource": resource,
        "owners": owners,
        "view": view,
        "table": table,
        "events": events,
        "get_cell_class": get_cell_class,
    }
Exemplo n.º 23
0
def find_pod_object_by_name(pykube_api, job_name, namespace=None):
    return Pod.objects(pykube_api).filter(selector="job-name=" + job_name,
                                          namespace=namespace)
Exemplo n.º 24
0
 def load_from_kube(self, kube):
     self.pods = Pod.objects(kube).filter(namespace=self.namespace)
     self.services = Service.objects(kube).filter(namespace=self.namespace)
     self.replication_controllers = ReplicationController.objects(kube).filter(namespace=self.namespace)
     logging.debug("Loaded from kube " + str(len(self.pods)) + " pods, " + str(len(self.services)) + " services, and " + str(len(self.replication_controllers)) + " rcs.")
Exemplo n.º 25
0
def editconfig(ctx):
    """Interactively edits master-config.yaml"""
    ctx.temp_dir = tempfile.mkdtemp()
    if ctx.auto_confirm:
        print(
            "Note: -y option is not supported for purely interactive commands."
        )
        ctx.auto_confirm = False

    if not ctx.init_with_checks():
        print("Failed cursory checks, exiting.")
        exit(1)

    if not ctx.consider_openshift_deployed:
        print(
            "I think OpenShift is not yet deployed. Use deploy first to create it."
        )
        exit(1)

    old_secret = ctx.fetch_config_to_dir(ctx.temp_dir)
    mc_path = ctx.temp_dir + "/master-config.yaml"
    if not os.path.exists(mc_path):
        print(
            "Fetched config files but they don't contain master-config.yaml, something's wrong. Try getconfig."
        )
        shutil.rmtree(ctx.temp_dir)
        exit(1)

    last_mtime = os.path.getmtime(mc_path)
    print("Config files are at: " + ctx.temp_dir)
    print("Feel free to edit as you will...")
    print("Launching editor...")
    call([EDITOR, mc_path])
    now_mtime = os.path.getmtime(mc_path)
    if now_mtime == last_mtime:
        print("No changes made, exiting.")
        shutil.rmtree(ctx.temp_dir)
        exit(0)

    if not click.confirm("Do you want to upload the changed config files?"):
        print("Okay, cancelling.")
        shutil.rmtree(ctx.temp_dir)
        exit(0)

    print("Preparing to upload config files...")
    # Serialize the config to a secret
    openshift_config_kv = {}
    for filen in os.listdir(ctx.temp_dir):
        with open(ctx.temp_dir + "/" + filen, 'rb') as f:
            openshift_config_kv[filen] = f.read()
    openshift_config_secret = ctx.build_secret("openshift-config",
                                               "openshift-origin",
                                               openshift_config_kv)
    openshift_config_secret._original_obj = old_secret.obj

    print("Attempting to patch secret...")
    openshift_config_secret.update()
    print("Updates applied.")

    if not click.confirm(
            "Do you want to restart openshift to apply the changes?"):
        print("Okay, I'm done. Have a nice day!")
        shutil.rmtree(ctx.temp_dir)
        exit(0)

    print("Restarting openshift pod...")
    try:
        pods = Pod.objects(ctx.api).filter(namespace="openshift-origin",
                                           selector={
                                               "app": "openshift"
                                           }).response["items"]
        if len(pods) >= 1:
            openshift_pod = Pod(ctx.api, pods[0])
            print("Deleting pod " + openshift_pod.obj["metadata"]["name"] +
                  "...")
            openshift_pod.delete()
    except:
        print(
            "Something went wrong restarting openshift, do it yourself please!"
        )

    shutil.rmtree(ctx.temp_dir)
Exemplo n.º 26
0
def deploy(ctx, persistent_volume, load_balancer, public_hostname,
           create_volume, master_config_override, server_key):
    """Deploy OpenShift to the cluster."""
    if not load_balancer and public_hostname != None:
        print(
            "You must specify --load-balancer with --public-hostname, I can't map a public hostname without a load balancer."
        )
        exit(1)

    if not ctx.init_with_checks():
        print("Failed cursory checks, exiting.")
        exit(1)

    if ctx.consider_openshift_deployed:
        print(
            "I think OpenShift is already deployed. Use undeploy first to remove it before installing it again."
        )
        print(
            "Consider if you really need a full redeploy. You can update without re-deploying!"
        )
        exit(1)

    print()
    if "openshift" in ctx.namespace_names or "openshift-origin" in ctx.namespace_names:
        print(
            "The namespaces 'openshift' and/or 'openshift-origin' exist, this indicates a potentially existing/broken install."
        )
        if ctx.auto_confirm:
            print(
                "Auto confirm (-y) option set, clearing existing installation."
            )
        else:
            print("Really consider the decision you're about to make.")
            if not click.confirm(
                    "Do you want to clear the existing installation?"):
                print("Okay, cancelling.")
                exit(1)

        # Handle oddities with finalizers?
        # todo: delete "openshift" and "openshift-infra" namespaces
        ctx.delete_namespace_byname("openshift-origin")
        time.sleep(1)

    ctx.temp_dir = tempfile.mkdtemp()
    print("Preparing to execute deploy...")
    print("Deploy temp dir: " + ctx.temp_dir)

    # Setup the deploy state namespace
    ctx.cleanup_osdeploy_namespace()
    ctx.create_osdeploy_namespace()

    # Check the persistentvolume exists
    if not create_volume and ctx.find_persistentvolume(
            persistent_volume) == None:
        print(" [!] persistentvolume with name " + persistent_volume +
              " does not exist. Did you create it?")
        exit(1)

    # Create the namespaces
    ctx.create_namespace("openshift-origin")

    # Create the service
    if load_balancer:
        print("Will use load balancer type service.")
    else:
        print("Will use node port type service.")
    os_service = ctx.create_os_service(load_balancer)

    # Wait for it to be ready if it's a load balancer
    if load_balancer:
        print("Waiting for service load balancer IP to be allocated...")
        ctx.wait_for_loadbalancer(os_service)
    else:
        os_service.reload()

    internal_os_ip = os_service.obj["spec"]["clusterIP"]
    ctx.os_internal_ip = internal_os_ip

    if load_balancer:
        tmp = os_service.obj["status"]["loadBalancer"]["ingress"][0]
        external_os_ip = None
        external_is_hostname = False
        if "hostname" in tmp:
            external_os_ip = tmp["hostname"]
            external_is_hostname = True
        else:
            external_os_ip = tmp["ip"]
        ctx.os_external_ip = external_os_ip
        print("External OpenShift IP: " + external_os_ip)
    else:
        external_os_ip = internal_os_ip
        print("External OpenShift IP: nodes (node port)")

    print("Internal OpenShift IP: " + internal_os_ip)

    if public_hostname != None:
        print("You need to DNS map like this:")
        if external_is_hostname:
            print(public_hostname + ".\t300\tIN\tCNAME\t" + external_os_ip)
        else:
            print(public_hostname + ".\t300\tIN\tA\t" + external_os_ip)
        ctx.os_external_ip = public_hostname

    # Create a 'secret' containing the script to run to config.
    create_config_script = (resource_string(ctx.scripts_resource,
                                            'create-config.sh'))

    # Build the secret
    create_config_secret_kv = {"create-config.sh": create_config_script}
    create_config_secret = ctx.build_secret("create-config-script",
                                            "openshift-deploy",
                                            create_config_secret_kv)
    create_config_secret.create()

    # Build the kubeconfig secret
    kubeconfig_secret_kv = {
        "kubeconfig": yaml.dump(ctx.config.doc).encode('ascii')
    }
    kubeconfig_secret = ctx.build_secret("kubeconfig", "openshift-deploy",
                                         kubeconfig_secret_kv)
    kubeconfig_secret.create()

    # Generate the openshift config by running a temporary pod on the cluster
    print("Generating openshift config via cluster...")
    conf_pod = ctx.build_config_pod(ctx.os_version)
    conf_pod.create()
    with open(ctx.temp_dir + "/config_bundle.tar.gz", 'wb') as f:
        conf_bundle = ctx.observe_config_pod(conf_pod)
        conf_bundle_data = base64.b64decode(conf_bundle)
        f.write(conf_bundle_data)
    conf_pod.delete()

    # Extract
    tar = tarfile.open(ctx.temp_dir + "/config_bundle.tar.gz")
    tar.extractall(ctx.temp_dir + "/config/")
    tar.close()

    # Move kubeconfig in
    with open(ctx.temp_dir + "/config/external-master.kubeconfig", 'w') as f:
        f.write(yaml.dump(ctx.config.doc))

    # Delete tarfile
    os.remove(ctx.temp_dir + "/config_bundle.tar.gz")

    # Do some processing on the master-config yaml
    conf = None
    with open(ctx.temp_dir + '/config/master-config.yaml') as f:
        conf = yaml.load(f)
    conf = ctx.fix_master_config(conf)

    # Write the serviceaccounts file again
    with open(server_key, 'r') as fs:
        with open(ctx.temp_dir + "/config/serviceaccounts.public.key",
                  'w') as fd:
            fd.write(fs.read())

    # Load patches if needed
    master_config_override_kv = None
    if master_config_override != None:
        print("Loading " + master_config_override + "...")
        with open(master_config_override, 'r') as f:
            master_config_override_kv = yaml.load(f)
        conf = deepupdate(conf, master_config_override_kv)

    # Write the fixed master config
    with open(ctx.temp_dir + "/config/master-config.yaml", 'w') as f:
        f.write(yaml.dump(conf, default_flow_style=False))

    # Allow the user to edit the openshift config last second
    print("Generated updated master-config.yaml.")
    if ctx.auto_confirm:
        print(
            "Auto confirm (-y) option set, skipping master-config.yaml edit opportunity."
        )
    else:
        if click.confirm("Do you want to edit master-config.yaml?"):
            call([EDITOR, ctx.temp_dir + "/config/master-config.yaml"])

    # Cleanup a bit
    kubeconfig_secret.delete()
    create_config_secret.delete()

    # Serialize the config to a secret
    openshift_config_kv = {}
    for filen in os.listdir(ctx.temp_dir + "/config"):
        with open(ctx.temp_dir + "/config/" + filen, 'rb') as f:
            openshift_config_kv[filen] = f.read()
    openshift_config_secret = ctx.build_secret("openshift-config",
                                               "openshift-origin",
                                               openshift_config_kv)

    # Save the secret
    openshift_config_secret.create()

    # Starting etcd setup... build PersistentVolumeClaim
    etcd_pvc = ctx.build_pvc("openshift-etcd1", "openshift-origin", "2Gi",
                             create_volume)
    etcd_pvc.create()

    # Create the etcd controller
    etcd_rc = ctx.build_etcd_rc("openshift-etcd1")
    etcd_svc = ctx.build_etcd_service()

    print("Creating etcd service...")
    etcd_svc.create()

    print("Creating etcd controller...")
    etcd_rc.create()

    print("Waiting for etcd pod to be created...")
    etcd_pod = None
    # Wait for the pod to exist
    while etcd_pod == None:
        etcd_pods = Pod.objects(ctx.api).filter(
            selector={
                "app": "etcd"
            }, namespace="openshift-origin").response["items"]
        if len(etcd_pods) < 1:
            time.sleep(0.5)
            continue
        etcd_pod = Pod(ctx.api, etcd_pods[0])

    # Wait for it to run
    ctx.wait_for_pod_running(etcd_pod)

    # Create the controller config
    print("Creating openshift replication controller...")
    openshift_rc = ctx.build_openshift_rc(ctx.os_version)
    openshift_rc.create()

    print("Waiting for openshift pod to be created...")
    openshift_pod = None
    # Wait for the pod to exist
    while openshift_pod == None:
        pods = Pod.objects(ctx.api).filter(namespace="openshift-origin",
                                           selector={
                                               "app": "openshift"
                                           }).response["items"]
        if len(pods) < 1:
            time.sleep(0.5)
            continue
        openshift_pod = Pod(ctx.api, pods[0])

    # Wait for it to run
    ctx.wait_for_pod_running(openshift_pod)

    print()
    print(" == OpenShift Deployed ==")
    print("External IP: " + ctx.os_external_ip)

    ctx.fetch_namespaces()
    ctx.cleanup_osdeploy_namespace()
    shutil.rmtree(ctx.temp_dir)
Exemplo n.º 27
0
 def __get_pods(self):
     pod_objs = Pod.objects(self.__kube_api) \
         .filter(selector="job-name=" + self.uu_name) \
         .response['items']
     return [Pod(self.__kube_api, p) for p in pod_objs]
def editconfig(ctx):
    """Interactively edits master-config.yaml"""
    ctx.temp_dir = tempfile.mkdtemp()
    if ctx.auto_confirm:
        print("Note: -y option is not supported for purely interactive commands.")
        ctx.auto_confirm = False

    if not ctx.init_with_checks():
        print("Failed cursory checks, exiting.")
        exit(1)

    if not ctx.consider_openshift_deployed:
        print("I think OpenShift is not yet deployed. Use deploy first to create it.")
        exit(1)

    old_secret = ctx.fetch_config_to_dir(ctx.temp_dir)
    mc_path = ctx.temp_dir + "/master-config.yaml"
    if not os.path.exists(mc_path):
        print("Fetched config files but they don't contain master-config.yaml, something's wrong. Try getconfig.")
        shutil.rmtree(ctx.temp_dir)
        exit(1)

    last_mtime = os.path.getmtime(mc_path)
    print("Config files are at: " + ctx.temp_dir)
    print("Feel free to edit as you will...")
    print("Launching editor...")
    call([EDITOR, mc_path])
    now_mtime = os.path.getmtime(mc_path)
    if now_mtime == last_mtime:
        print("No changes made, exiting.")
        shutil.rmtree(ctx.temp_dir)
        exit(0)

    if not click.confirm("Do you want to upload the changed config files?"):
        print("Okay, cancelling.")
        shutil.rmtree(ctx.temp_dir)
        exit(0)

    print("Preparing to upload config files...")
    # Serialize the config to a secret
    openshift_config_kv = {}
    for filen in os.listdir(ctx.temp_dir):
        with open(ctx.temp_dir + "/" + filen, 'rb') as f:
            openshift_config_kv[filen] = f.read()
    openshift_config_secret = ctx.build_secret("openshift-config", "openshift-origin", openshift_config_kv)
    openshift_config_secret._original_obj = old_secret.obj

    print("Attempting to patch secret...")
    openshift_config_secret.update()
    print("Updates applied.")

    if not click.confirm("Do you want to restart openshift to apply the changes?"):
        print("Okay, I'm done. Have a nice day!")
        shutil.rmtree(ctx.temp_dir)
        exit(0)

    print("Restarting openshift pod...")
    try:
        pods = Pod.objects(ctx.api).filter(namespace="openshift-origin", selector={"app": "openshift"}).response["items"]
        if len(pods) >= 1:
            openshift_pod = Pod(ctx.api, pods[0])
            print("Deleting pod " + openshift_pod.obj["metadata"]["name"] + "...")
            openshift_pod.delete()
    except:
        print("Something went wrong restarting openshift, do it yourself please!")

    shutil.rmtree(ctx.temp_dir)