示例#1
0
def get_all_execs(app_name):
    """
        This load all of the service ips of execution profiler deployments.
    """
    jupiter_config.set_globals()

    mapping = {}
    """
        This loads the node lists in use
    """
    path1 = jupiter_config.HERE + 'nodes.txt'
    nodes = utilities.k8s_get_nodes(path1)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    """
        Loop through the list of nodes and deletes the all profiler related k8 deployment, replicaset, pods, and service.
        The deletion should follow this particular order for a proper removal.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g., 
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """

    # We have defined the namespace for deployments in jupiter_config
    namespace = jupiter_config.EXEC_NAMESPACE

    # Get proper handles or pointers to the k8-python tool to call different functions.
    api = client.ExtensionsV1beta1Api()
    body = client.V1DeleteOptions()

    # First check if there is a exisitng profiler deployment with
    # the name = key in the respective namespace
    key = 'home'
    pod_name = app_name + "-home"

    resp = None
    api_2 = client.CoreV1Api()

    try:
        resp = api_2.read_namespaced_service(pod_name, namespace)
    except ApiException as e:
        logging.debug("Exception Occurred")
    # if a service is running, kill it
    if resp:
        logging.debug(resp.spec.cluster_ip)
        mapping[key] = resp.spec.cluster_ip

    # At this point you should not have any of the profiler related service, pod, or deployment running
    return mapping
示例#2
0
def check_status_profilers():
    """Verify if all the network profilers have been deployed and UP in the system.
    """
    jupiter_config.set_globals()

    path1 = jupiter_config.HERE + 'nodes.txt'
    nodes = utilities.k8s_get_nodes(path1)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    namespace = jupiter_config.PROFILER_NAMESPACE

    # We have defined the namespace for deployments in jupiter_config

    # Get proper handles or pointers to the k8-python tool to call different functions.
    extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
    v1_delete_options = client.V1DeleteOptions()
    core_v1_api = client.CoreV1Api()

    result = True
    for key in nodes:

        # First check if there is a deployment existing with
        # the name = key in the respective namespac    # Check if there is a replicaset running by using the label app={key}
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + key + "profiler"

        resp = None

        resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            a = resp.items[0]
            if a.status.phase != "Running":
                print("Pod Not Running", key)
                result = False

            # print("Pod Deleted. status='%s'" % str(del_resp_2.status))

    if result:
        print("All systems GOOOOO!!")
    else:
        print("Wait before trying again!!!!")

    return result
示例#3
0
def k8s_profiler_scheduler():
    """
        Deploy DRUPE in the system. 
    """
    jupiter_config.set_globals()
    """
        This loads the task graph and node list
    """
    home_ips = ''
    home_ids = ''
    nexthost_ips = ''
    nexthost_names = ''
    path2 = jupiter_config.HERE + 'nodes.txt'
    nodes = utilities.k8s_get_nodes(path2)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    """
        We have defined the namespace for deployments in jupiter_config
    """
    namespace = jupiter_config.PROFILER_NAMESPACE
    """
        Get proper handles or pointers to the k8-python tool to call different functions.
    """
    api = client.CoreV1Api()
    k8s_beta = client.ExtensionsV1beta1Api()

    # first_task = dag_info[0]
    # dag = dag_info[1]
    # hosts = temp_info[2]
    # print("hosts:")
    # pprint(hosts)
    # print(len(dag_info))
    # pprint(dag_info[0])
    # pprint(dag_info[1])
    # pprint(dag_info[2])
    service_ips = {}
    pprint(nodes)

    # # get the list of nodes
    # ret = v1.list_node()
    """
        Loop through the list of nodes and run all profiler related k8 deployment, replicaset, pods, and service.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g., 
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """
    for i in nodes:
        if i.startswith('home'):
            home_body = write_profiler_service_specs(name=i,
                                                     label=i + "profiler")
            ser_resp = api.create_namespaced_service(namespace, home_body)
            print("Home service created. status = '%s'" % str(ser_resp.status))
            try:
                resp = api.read_namespaced_service(i, namespace)
                service_ips[i] = resp.spec.cluster_ip
                home_ids = home_ids + ':' + i
                home_ips = home_ips + ':' + service_ips[i]
            except ApiException as e:
                print(e)
                print("Exception Occurred")

    print('Home Profilers were created successfully!')
    print(service_ips)
    print(home_ips)

    for i in nodes:
        """
            Generate the yaml description of the required service for each task
        """
        if i.startswith('home'):
            continue
        body = write_profiler_service_specs(name=i, label=i + "profiler")

        # Call the Kubernetes API to create the service

        try:
            ser_resp = api.create_namespaced_service(namespace, body)
            print("Service created. status = '%s'" % str(ser_resp.status))
            print(i)
            resp = api.read_namespaced_service(i, namespace)
        except ApiException as e:
            print(e)
            print("Exception Occurred")

        # print resp.spec.cluster_ip
        service_ips[i] = resp.spec.cluster_ip
        nexthost_ips = nexthost_ips + ':' + service_ips[i]
        nexthost_names = nexthost_names + ':' + i

    print('Worker Profilers were created successfully!')
    print(service_ips)
    print(nexthost_ips)
    print(nexthost_names)

    for i in nodes:

        # print nodes[i][0]
        """
            We check whether the node is a scheduler.
            Since we do not run any task on the scheduler, we donot run any profiler on it as well.
        """
        if i.startswith('home'):
            continue

        #print(i)
        """
            Generate the yaml description of the required deployment for the profiles
        """
        dep = write_profiler_specs(name=i,
                                   label=i + "profiler",
                                   image=jupiter_config.PROFILER_WORKER_IMAGE,
                                   host=nodes[i][0],
                                   dir='{}',
                                   all_node=nexthost_names,
                                   all_node_ips=nexthost_ips,
                                   serv_ip=service_ips[i],
                                   home_ips=home_ips,
                                   home_ids=home_ids)
        # # pprint(dep)
        # # Call the Kubernetes API to create the deployment
        resp = k8s_beta.create_namespaced_deployment(body=dep,
                                                     namespace=namespace)
        print("Deployment created. status ='%s'" % str(resp.status))

    while 1:
        if check_status_profilers():
            break
        time.sleep(30)

    for i in nodes:
        if i.startswith('home'):
            # have to somehow make sure that the worker nodes are on and working by this time
            home_dep = write_profiler_specs(
                name=i,
                label=i + "profiler",
                image=jupiter_config.PROFILER_HOME_IMAGE,
                host=jupiter_config.HOME_NODE,
                dir='{}',
                all_node=nexthost_names,
                all_node_ips=nexthost_ips,
                serv_ip=service_ips[i],
                home_ips=home_ips,
                home_ids=home_ids)
            resp = k8s_beta.create_namespaced_deployment(body=home_dep,
                                                         namespace=namespace)
            print("Home deployment created. status = '%s'" % str(resp.status))

            pprint(service_ips)

    return (service_ips)
示例#4
0
def delete_all_profilers():
    """Tear down all DRUPE deployments.
    """

    jupiter_config.set_globals()
    """
        This loads the node lists in use
    """
    path1 = jupiter_config.HERE + 'nodes.txt'
    nodes = utilities.k8s_get_nodes(path1)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    """
        Loop through the list of nodes and deletes the all profiler related k8 deployment, replicaset, pods, and service.
        The deletion should follow this particular order for a proper removal.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g., 
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """
    for key in nodes:

        # We have defined the namespace for deployments in jupiter_config
        namespace = jupiter_config.PROFILER_NAMESPACE

        # Get proper handles or pointers to the k8-python tool to call different functions.
        api = client.ExtensionsV1beta1Api()
        body = client.V1DeleteOptions()

        # First check if there is a exisitng profiler deployment with
        # the name = key in the respective namespace
        resp = None
        try:
            resp = api.read_namespaced_deployment(key, namespace)
        except ApiException as e:
            print("Exception Occurred")

        # if a deployment with the name = key exists in the namespace, delete it
        if resp:
            del_resp_0 = api.delete_namespaced_deployment(key, namespace, body)
            print("Deployment '%s' Deleted. status='%s'" %
                  (key, str(del_resp_0.status)))

        # Check if there is a replicaset running by using the label "app={key} + profiler" e.g, "app=node1profiler"
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + key + "profiler"
        resp = api.list_namespaced_replica_set(label_selector=label,
                                               namespace=namespace)
        # if a replicaset exist, delete it
        # pprint(resp)
        # print resp.items[0].metadata.namespace
        for i in resp.items:
            if i.metadata.namespace == namespace:
                del_resp_1 = api.delete_namespaced_replica_set(
                    i.metadata.name, namespace, body)
                print("Relicaset '%s' Deleted. status='%s'" %
                      (key, str(del_resp_1.status)))

        # Check if there is a pod still running by using the label
        resp = None
        api_2 = client.CoreV1Api()
        resp = api_2.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            del_resp_2 = api_2.delete_namespaced_pod(
                resp.items[0].metadata.name, namespace, body)
            print("Pod Deleted. status='%s'" % str(del_resp_2.status))

        # Check if there is a service running by name = key
        resp = None
        api_2 = client.CoreV1Api()
        try:
            resp = api_2.read_namespaced_service(key, namespace)
        except ApiException as e:
            print("Exception Occurred")
        # if a service is running, kill it
        if resp:
            del_resp_2 = api_2.delete_namespaced_service(key, namespace, body)
            #del_resp_2 = api_2.delete_namespaced_service(key, namespace)
            print("Service Deleted. status='%s'" % str(del_resp_2.status))
示例#5
0
def delete_all_exec(app_name):
    """Tear down all execution profiler deployments.
    """
    jupiter_config.set_globals()
    """
        This loads the task graph
    """
    path1 = jupiter_config.APP_PATH + 'configuration.txt'
    dag_info = utilities.k8s_read_config(path1)
    dag = dag_info[1]
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)

    # We have defined the namespace for deployments in jupiter_config
    namespace = jupiter_config.EXEC_NAMESPACE

    # Get proper handles or pointers to the k8-python tool to call different functions.
    extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
    v1_delete_options = client.V1DeleteOptions()
    core_v1_api = client.CoreV1Api()
    """
        Loop through the list of tasks in the dag and delete the respective k8 deployment, replicaset, pods, and service.
        The deletion should follow this particular order for a proper removal.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g., 
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """
    for key, value in dag.items():

        # First check if there is a deployment existing with
        # the name = key in the respective namespace
        print(key)
        print(value)

        pod_name = app_name + "-" + key

        resp = None
        try:
            resp = extensions_v1_beta1_api.read_namespaced_deployment(
                pod_name, namespace)
        except ApiException as e:
            print("No Such Deplyment Exists")

        # if a deployment with the name = key exists in the namespace, delete it
        if resp:
            del_resp_0 = extensions_v1_beta1_api.delete_namespaced_deployment(
                pod_name, namespace, v1_delete_options)
            print("Deployment '%s' Deleted. status='%s'" %
                  (key, str(del_resp_0.status)))

        # Check if there is a replicaset running by using the label app={key}
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + app_name + "-" + key
        resp = extensions_v1_beta1_api.list_namespaced_replica_set(
            label_selector=label, namespace=namespace)
        # if a replicaset exist, delete it

        # print resp.items[0].metadata.namespace
        for i in resp.items:
            if i.metadata.namespace == namespace:
                del_resp_1 = extensions_v1_beta1_api.delete_namespaced_replica_set(
                    i.metadata.name, namespace, v1_delete_options)
                print("Relicaset '%s' Deleted. status='%s'" %
                      (key, str(del_resp_1.status)))

        # Check if there is a pod still running by using the label app={key}
        resp = None

        resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            del_resp_2 = core_v1_api.delete_namespaced_pod(
                resp.items[0].metadata.name, namespace, v1_delete_options)
            print("Pod Deleted. status='%s'" % str(del_resp_2.status))

        # Check if there is a service running by name = task#
        resp = None
        try:
            resp = core_v1_api.read_namespaced_service(pod_name, namespace)
        except ApiException as e:
            print("Exception Occurred")
        # if a service is running, kill it
        if resp:
            #del_resp_2 = core_v1_api.delete_namespaced_service(pod_name, namespace)
            del_resp_2 = core_v1_api.delete_namespaced_service(
                pod_name, namespace, v1_delete_options)
            print("Service Deleted. status='%s'" % str(del_resp_2.status))

        # At this point you should not have any of the related service, pods, deployment running
    #end for

    home_name = app_name + '-home'
    #delete home deployment and service
    resp = None
    try:
        resp = extensions_v1_beta1_api.read_namespaced_deployment(
            home_name, namespace)
    except ApiException as e:
        print("No Such Deplyment Exists")

    # if home exists, delete it
    if resp:
        del_resp_0 = extensions_v1_beta1_api.delete_namespaced_deployment(
            home_name, namespace, v1_delete_options)
        print("Deployment '%s' Deleted. status='%s'" %
              ('home', str(del_resp_0.status)))

    # Check if there is a replicaset running by using the label app=home
    # The label of kubernets are used to identify replicaset associate to each task
    label = "app=" + app_name + "-home"
    resp = extensions_v1_beta1_api.list_namespaced_replica_set(
        label_selector=label, namespace=namespace)
    # if a replicaset exist, delete it

    # print resp.items[0].metadata.namespace
    for i in resp.items:
        if i.metadata.namespace == namespace:
            del_resp_1 = extensions_v1_beta1_api.delete_namespaced_replica_set(
                i.metadata.name, namespace, v1_delete_options)
            print("Relicaset '%s' Deleted. status='%s'" %
                  ('home', str(del_resp_1.status)))

    # Check if there is a pod still running by using the label app='home'
    resp = None
    resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
    # if a pod is running just delete it
    if resp.items:
        del_resp_2 = core_v1_api.delete_namespaced_pod(
            resp.items[0].metadata.name, namespace, v1_delete_options)
        print("Home pod Deleted. status='%s'" % str(del_resp_2.status))

    # Check if there is a service running by name = task#

    resp = None
    try:
        resp = core_v1_api.read_namespaced_service(home_name, namespace)
    except ApiException as e:
        print("Exception Occurred")
    # if a service is running, kill it
    if resp:
        #del_resp_2 = core_v1_api.delete_namespaced_service(home_name, namespace)
        del_resp_2 = core_v1_api.delete_namespaced_service(
            home_name, namespace, v1_delete_options)
        print("Service Deleted. status='%s'" % str(del_resp_2.status))
    """
        This loads the node lists in use
    """
    path1 = jupiter_config.HERE + 'nodes.txt'
    nodes = utilities.k8s_get_nodes(path1)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    """
        Loop through the list of nodes and deletes the all profiler related k8 deployment, replicaset, pods, and service.
        The deletion should follow this particular order for a proper removal.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g.,
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """
    for key in nodes:

        # We have defined the namespace for deployments in jupiter_config
        namespace = jupiter_config.EXEC_NAMESPACE

        # Get proper handles or pointers to the k8-python tool to call different functions.
        api = client.ExtensionsV1beta1Api()
        body = client.V1DeleteOptions()

        # First check if there is a exisitng profiler deployment with
        # the name = key in the respective namespace
        pod_name = app_name + "-" + key
        resp = None
        try:
            resp = api.read_namespaced_deployment(pod_name, namespace)
        except ApiException as e:
            print("Exception Occurred")

        # if a deployment with the name = key exists in the namespace, delete it
        if resp:
            del_resp_0 = api.delete_namespaced_deployment(
                pod_name, namespace, body)
            print("Deployment '%s' Deleted. status='%s'" %
                  (key, str(del_resp_0.status)))

        # Check if there is a replicaset running by using the label "app={key} + profiler" e.g, "app=node1profiler"
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + app_name + '-' + key + "exec_profiler"
        resp = api.list_namespaced_replica_set(label_selector=label,
                                               namespace=namespace)
        # if a replicaset exist, delete it
        # pprint(resp)
        # print resp.items[0].metadata.namespace
        for i in resp.items:
            if i.metadata.namespace == namespace:
                del_resp_1 = api.delete_namespaced_replica_set(
                    i.metadata.name, namespace, body)
                print("Relicaset '%s' Deleted. status='%s'" %
                      (key, str(del_resp_1.status)))

        # Check if there is a pod still running by using the label
        resp = None
        api_2 = client.CoreV1Api()
        resp = api_2.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            del_resp_2 = api_2.delete_namespaced_pod(
                resp.items[0].metadata.name, namespace, body)
            print("Pod Deleted. status='%s'" % str(del_resp_2.status))

        # Check if there is a service running by name = key
        resp = None
        api_2 = client.CoreV1Api()
        try:
            resp = api_2.read_namespaced_service(pod_name, namespace)
        except ApiException as e:
            print("Exception Occurred")
        # if a service is running, kill it
        if resp:
            del_resp_2 = api_2.delete_namespaced_service(
                pod_name, namespace, v1_delete_options)
            #del_resp_2 = api_2.delete_namespaced_service(pod_name, namespace)
            print("Service Deleted. status='%s'" % str(del_resp_2.status))