示例#1
0
def check_status_exec_profiler(app_name):
    """
    This function prints out all the tasks that are not running.
    If all the tasks are running: return ``True``; else return ``False``.
    """
    jupiter_config.set_globals()
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    dag_info = utilities.k8s_read_config(path1)
    dag = dag_info[1]

    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)

    # We have defined the namespace for deployments in jupiter_config
    namespace = jupiter_config.EXEC_NAMESPACE

    # Get proper handles or pointers to the k8-python tool to call different functions.
    extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
    v1_delete_options = client.V1DeleteOptions()
    core_v1_api = client.CoreV1Api()
    """
        Loop through the list of tasks in the dag and delete the respective k8 deployment, replicaset, pods, and service.
        The deletion should follow this particular order for a proper removal.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g.,
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """
    result = True
    for key, value in dag.items():

        # First check if there is a deployment existing with
        # the name = key in the respective namespac    # Check if there is a replicaset running by using the label app={key}
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + app_name + "-" + key
        resp = None
        if taskmap[key][1] == False:

            resp = core_v1_api.list_namespaced_pod(namespace,
                                                   label_selector=label)
            # if a pod is running just delete it
            if resp.items:
                a = resp.items[0]
                if a.status.phase != "Running":
                    print("Pod ", key, "status:", a.status.phase)
                    result = False

            # print("Pod Deleted. status='%s'" % str(del_resp_2.status))

    if result:
        print("All systems GOOOOO!!")
    else:
        print("Wait before trying again!!!!!!")
    return result
示例#2
0
def export_log(namespace):
    resp = core_v1_api.list_namespaced_pod(namespace)
    if resp.items:
        for item in resp.items:
            name = item.metadata.name
            file_name = '%s/%s.log' % (results_path, name)
            cmd = 'kubectl logs -n%s %s > %s' % (namespace, name, file_name)
            os.system(cmd)
示例#3
0
def check_status_exec_profiler_workers():
    """
    This function prints out all the workers that are not running.
    If all the workers are running: return ``True``; else return ``False``.
    """
    jupiter_config.set_globals()
    
    path1 = jupiter_config.HERE + 'nodes.txt'
    nodes = k8s_get_nodes(path1)

    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)
    namespace = jupiter_config.EXEC_NAMESPACE


    # We have defined the namespace for deployments in jupiter_config

    # Get proper handles or pointers to the k8-python tool to call different functions.
    extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
    v1_delete_options = client.V1DeleteOptions()
    core_v1_api = client.CoreV1Api()

    result = True
    for key in nodes:

        if key == "home":
            continue

        # First check if there is a deployment existing with
        # the name = key in the respective namespac    # Check if there is a replicaset running by using the label app={key}
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + key + "exec_profiler"

        resp = None

        resp = core_v1_api.list_namespaced_pod(namespace, label_selector = label)
        # if a pod is running just delete it
        if resp.items:
            a=resp.items[0]
            if a.status.phase != "Running":
                print("Pod Not Running", key)
                result = False

            # print("Pod Deleted. status='%s'" % str(del_resp_2.status))

    if result:
        print("All systems GOOOOO!!")
    else:
        print("Wait before trying again!!!!")

    return result
示例#4
0
def check_workers_running(app_config, namespace):
    """Checks if all worker tasks are up and running.

    Arguments:
        app_config {app_config_parser.AppConfig} -- app config objectj
        namespace {string} -- k8s namespace of execution profiler

    Returns:
        bool -- True if all workers are running, False if not.
    """
    # Load kube config before executing k8s client API calls.
    config.load_kube_config(config_file=jupiter_config.get_kubeconfig())
    k8s_apps_v1 = client.AppsV1Api()
    core_v1_api = client.CoreV1Api()

    result = True
    for node in app_config.node_map():
        if node.startswith('home'):
            # ignore checking on home status
            continue

        label = "app=" + app_config.app_name + '-' + node

        resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            a = resp.items[0]
            if a.status.phase != "Running":
                log.debug(
                    "Execution Profiler pod not yet running on {}".format(
                        node))
                result = False

    if result is True:
        log.info("All drupe profiler workers successfully running.")

    return result
示例#5
0
def delete_all_circe():
    """Tear down all CIRCE deployments.
    """
    jupiter_config.set_globals()
    """
        This loads the task graph
    """
    path1 = jupiter_config.APP_PATH + 'configuration.txt'
    dag_info = k8s_read_config(path1)
    dag = dag_info[1]
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)

    # We have defined the namespace for deployments in jupiter_config
    namespace = jupiter_config.DEPLOYMENT_NAMESPACE

    # Get proper handles or pointers to the k8-python tool to call different functions.
    extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
    v1_delete_options = client.V1DeleteOptions()
    core_v1_api = client.CoreV1Api()
    """
        Loop through the list of tasks in the dag and delete the respective k8 deployment, replicaset, pods, and service.
        The deletion should follow this particular order for a proper removal.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g., 
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """
    for key, value in dag.items():

        # First check if there is a deployment existing with
        # the name = key in the respective namespace
        resp = None
        try:
            resp = extensions_v1_beta1_api.read_namespaced_deployment(
                key, namespace)
        except ApiException as e:
            print("No Such Deplyment Exists")

        # if a deployment with the name = key exists in the namespace, delete it
        if resp:
            del_resp_0 = extensions_v1_beta1_api.delete_namespaced_deployment(
                key, namespace, v1_delete_options)
            print("Deployment '%s' Deleted. status='%s'" %
                  (key, str(del_resp_0.status)))

        # Check if there is a replicaset running by using the label app={key}
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + key
        resp = extensions_v1_beta1_api.list_replica_set_for_all_namespaces(
            label_selector=label)
        # if a replicaset exist, delete it

        # print resp.items[0].metadata.namespace
        for i in resp.items:
            if i.metadata.namespace == namespace:
                del_resp_1 = extensions_v1_beta1_api.delete_namespaced_replica_set(
                    i.metadata.name, namespace, v1_delete_options)
                print("Relicaset '%s' Deleted. status='%s'" %
                      (key, str(del_resp_1.status)))

        # Check if there is a pod still running by using the label app={key}
        resp = None

        resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            del_resp_2 = core_v1_api.delete_namespaced_pod(
                resp.items[0].metadata.name, namespace, v1_delete_options)
            print("Pod Deleted. status='%s'" % str(del_resp_2.status))

        # Check if there is a service running by name = task#
        resp = None
        try:
            resp = core_v1_api.read_namespaced_service(key, namespace)
        except ApiException as e:
            print("Exception Occurred")
        # if a service is running, kill it
        if resp:
            del_resp_2 = core_v1_api.delete_namespaced_service(key, namespace)
            print("Service Deleted. status='%s'" % str(del_resp_2.status))

        # At this point you should not have any of the related service, pods, deployment running
    #end for

    #delete home deployment and service
    resp = None
    try:
        resp = extensions_v1_beta1_api.read_namespaced_deployment(
            'home', namespace)
    except ApiException as e:
        print("No Such Deplyment Exists")

    # if home exists, delete it
    if resp:
        del_resp_0 = extensions_v1_beta1_api.delete_namespaced_deployment(
            'home', namespace, v1_delete_options)
        print("Deployment '%s' Deleted. status='%s'" %
              ('home', str(del_resp_0.status)))

    # Check if there is a replicaset running by using the label app=home
    # The label of kubernets are used to identify replicaset associate to each task
    label = "app=home"
    resp = extensions_v1_beta1_api.list_replica_set_for_all_namespaces(
        label_selector=label)
    # if a replicaset exist, delete it

    # print resp.items[0].metadata.namespace
    for i in resp.items:
        if i.metadata.namespace == namespace:
            del_resp_1 = extensions_v1_beta1_api.delete_namespaced_replica_set(
                i.metadata.name, namespace, v1_delete_options)
            print("Relicaset '%s' Deleted. status='%s'" %
                  ('home', str(del_resp_1.status)))

    # Check if there is a pod still running by using the label app='home'
    resp = None
    resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
    # if a pod is running just delete it
    if resp.items:
        del_resp_2 = core_v1_api.delete_namespaced_pod(
            resp.items[0].metadata.name, namespace, v1_delete_options)
        print("Home pod Deleted. status='%s'" % str(del_resp_2.status))

    # Check if there is a service running by name = task#
    resp = None
    try:
        resp = core_v1_api.read_namespaced_service('home', namespace)
    except ApiException as e:
        print("Exception Occurred")
    # if a service is running, kill it
    if resp:
        del_resp_2 = core_v1_api.delete_namespaced_service('home', namespace)
        print("Service Deleted. status='%s'" % str(del_resp_2.status))
示例#6
0
def delete_all_stream(app_name):
    """Tear down all CIRCE deployments.
    """

    jupiter_config.set_globals()
    """
        This loads the task graph
    """
    path1 = jupiter_config.APP_PATH + 'configuration.txt'
    dag_info = utilities.k8s_read_config(path1)
    dag = dag_info[1]
    path2 = jupiter_config.HERE + 'nodes.txt'
    node_list, homes, datasources = utilities.k8s_get_all_elements(path2)

    logging.debug('Starting to teardown the datasources')
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)

    # We have defined the namespace for deployments in jupiter_config
    namespace = jupiter_config.DEPLOYMENT_NAMESPACE

    # Get proper handles or pointers to the k8-python tool to call different functions.
    extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
    v1_delete_options = client.V1DeleteOptions()
    core_v1_api = client.CoreV1Api()

    for datasource in datasources:
        logging.debug('Data source information')

        #delete home deployment and service
        home_name = app_name + "-stream" + datasource
        logging.debug(home_name)
        #home_name ="home"
        resp = None
        try:
            resp = extensions_v1_beta1_api.read_namespaced_deployment(
                home_name, namespace)
        except ApiException as e:
            logging.debug("No Such Deplyment Exists")

        # if home exists, delete it
        if resp:
            del_resp_0 = extensions_v1_beta1_api.delete_namespaced_deployment(
                home_name, namespace, v1_delete_options)
            logging.debug("Deployment '%s' Deleted. status='%s'" %
                          (home_name, str(del_resp_0.status)))

        # Check if there is a replicaset running by using the label app=home
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + app_name + "-stream" + datasource
        # label = "app=home"
        resp = extensions_v1_beta1_api.list_namespaced_replica_set(
            label_selector=label, namespace=namespace)
        # if a replicaset exist, delete it

        # logging.debug resp.items[0].metadata.namespace
        for i in resp.items:
            if i.metadata.namespace == namespace:
                del_resp_1 = extensions_v1_beta1_api.delete_namespaced_replica_set(
                    i.metadata.name, namespace, v1_delete_options)
                logging.debug("Relicaset '%s' Deleted. status='%s'" %
                              (home_name, str(del_resp_1.status)))

        # Check if there is a pod still running by using the label app='home'
        resp = None
        resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            del_resp_2 = core_v1_api.delete_namespaced_pod(
                resp.items[0].metadata.name, namespace, v1_delete_options)
            logging.debug("Home pod Deleted. status='%s'" %
                          str(del_resp_2.status))

        # Check if there is a service running by name = task#
        resp = None
        try:
            resp = core_v1_api.read_namespaced_service(home_name,
                                                       namespace=namespace)
        except ApiException as e:
            logging.debug("Exception Occurred")
        # if a service is running, kill it
        if resp:
            del_resp_2 = core_v1_api.delete_namespaced_service(
                home_name, namespace, v1_delete_options)
            #del_resp_2 = core_v1_api.delete_namespaced_service(home_name, namespace=namespace)
            logging.debug("Service Deleted. status='%s'" %
                          str(del_resp_2.status))