Пример #1
0
def check_status_circe_computing(app_name):
    """
    This function prints out all the tasks that are not running.
    If all the tasks are running: return ``True``; else return ``False``.
    """

    jupiter_config.set_globals()

    path1 = jupiter_config.HERE + 'nodes.txt'
    nodes, homes = utilities.k8s_get_nodes_worker(path1)

    sys.path.append(jupiter_config.CIRCE_PATH)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    namespace = jupiter_config.DEPLOYMENT_NAMESPACE

    # We have defined the namespace for deployments in jupiter_config

    # Get proper handles or pointers to the k8-python tool to call different functions.
    extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
    v1_delete_options = client.V1DeleteOptions()
    core_v1_api = client.CoreV1Api()

    result = True
    for key in nodes:
        # First check if there is a deployment existing with
        # the name = key in the respective namespac    # Check if there is a replicaset running by using the label app={key}
        # The label of kubernets are used to identify replicaset associate to each task
        label = "app=" + app_name + '-' + key

        resp = None

        resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            a = resp.items[0]
            if a.status.phase != "Running":
                print("Pod Not Running", key)
                result = False

            # print("Pod Deleted. status='%s'" % str(del_resp_2.status))

    if result:
        print("All the computing nodes GOOOOO!!")
    else:
        print("Wait before trying again!!!!")

    return result
Пример #2
0
def check_status_waves(app_name):
    """Verify if all the WAVE home and workers have been deployed and UP in the system.
    """
    jupiter_config.set_globals()
    """
        This loads the node lists in use
    """
    path1 = jupiter_config.HERE + 'nodes.txt'
    nodes, homes = utilities.k8s_get_nodes_worker(path1)
    pprint(nodes)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    namespace = jupiter_config.MAPPER_NAMESPACE

    # We have defined the namespace for deployments in jupiter_config

    # Get proper handles or pointers to the k8-python tool to call different functions.
    extensions_v1_beta1_api = client.ExtensionsV1beta1Api()
    v1_delete_options = client.V1DeleteOptions()
    core_v1_api = client.CoreV1Api()

    result = True
    for key in nodes:

        label = "app=%s_wave_" % (app_name)
        label = label + key
        resp = None

        resp = core_v1_api.list_namespaced_pod(namespace, label_selector=label)
        # if a pod is running just delete it
        if resp.items:
            a = resp.items[0]
            if a.status.phase != "Running":
                print("Pod Not Running", key)
                result = False

    if result:
        print("All systems GOOOOO!!")
    else:
        print("Wait before trying again!!!!")

    return result
Пример #3
0
def k8s_pricing_circe_scheduler(dag_info, temp_info, profiler_ips,
                                execution_ips, app_name):
    """
    This script deploys CIRCE in the system. 
    
    Args:
        dag_info : DAG info and mapping
        temp_info : schedule information
        profiler_ips : IPs of network profilers
        execution_ips : IP of execution profilers 
        app_name (str): application name
    """
    print('INPUT PROFILERS')
    print(profiler_ips)
    jupiter_config.set_globals()

    sys.path.append(jupiter_config.CIRCE_PATH)

    global configs, taskmap, path1

    path1 = jupiter_config.HERE + 'nodes.txt'
    nodes, homes = utilities.k8s_get_nodes_worker(path1)
    pprint(nodes)

    configs = json.load(open(jupiter_config.APP_PATH + 'scripts/config.json'))
    taskmap = configs["taskname_map"]
    executionmap = configs["exec_profiler"]
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    """
        We have defined the namespace for deployments in jupiter_config
    """
    namespace = jupiter_config.DEPLOYMENT_NAMESPACE
    """
        Get proper handles or pointers to the k8-python tool to call different functions.
    """
    api = client.CoreV1Api()
    k8s_beta = client.ExtensionsV1beta1Api()

    #get DAG and home machine info
    first_task = dag_info[0]
    dag = dag_info[1]
    hosts = temp_info[2]
    # mapping = [task+":"+dag_info[2][task] for task in dag_info[2].keys()]
    # mapping_str = "#".join(mapping)
    service_ips = {}
    #list of all service IPs including home and task controllers
    computing_service_ips = {}
    all_profiler_ips = ''
    all_profiler_nodes = ''

    print('-------- First create the home node service')
    """
        First create the home node's service.
    """

    for key in homes:
        print(key)
        all_profiler_ips = all_profiler_ips + ':' + profiler_ips[key]
        all_profiler_nodes = all_profiler_nodes + ':' + key
        home_name = app_name + "-" + key
        home_body = write_circe_service_specs(name=home_name)
        ser_resp = api.create_namespaced_service(namespace, home_body)
        print("Home service created. status = '%s'" % str(ser_resp.status))

        try:
            resp = api.read_namespaced_service(home_name, namespace)
        except ApiException as e:
            print("Exception Occurred")

        service_ips[key] = resp.spec.cluster_ip
    """
        Iterate through the list of tasks and run the related k8 deployment, replicaset, pod, and service on the respective node.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g., 
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """

    print('-------- Create task controllers service')
    """
        Create task controllers' service (all the tasks)
    """

    for key, value in dag.items():

        task = key
        print('---')
        print(task)

        # all_current_nodes = all_current_nodes + task + ":" + currentnodes + "!"
        """
            Generate the yaml description of the required service for each task
        """
        pod_name = app_name + "-" + task

        body = write_circe_service_specs(name=pod_name)

        # Call the Kubernetes API to create the service
        ser_resp = api.create_namespaced_service(namespace, body)
        print("Service created. status = '%s'" % str(ser_resp.status))

        try:
            resp = api.read_namespaced_service(pod_name, namespace)
        except ApiException as e:
            print("Exception Occurred")

        # print resp.spec.cluster_ip
        service_ips[task] = resp.spec.cluster_ip

    all_node_ips = ':'.join(service_ips.values())
    all_node = ':'.join(service_ips.keys())

    print(all_node)
    print('-------- Create computing nodes service')
    """
        Create computing nodes' service
    """

    for node in nodes:
        """
            Generate the yaml description of the required service for each computing node
        """

        pod_name = app_name + "-" + node
        body = write_circe_service_specs(name=pod_name)

        # Call the Kubernetes API to create the service
        ser_resp = api.create_namespaced_service(namespace, body)
        print("Service created. status = '%s'" % str(ser_resp.status))

        try:
            resp = api.read_namespaced_service(pod_name, namespace)
        except ApiException as e:
            print("Exception Occurred")

        # print resp.spec.cluster_ip
        computing_service_ips[node] = resp.spec.cluster_ip
        all_profiler_ips = all_profiler_ips + ':' + profiler_ips[node]
        all_profiler_nodes = all_profiler_nodes + ':' + node

    all_computing_ips = ':'.join(computing_service_ips.values())
    all_computing_nodes = ':'.join(computing_service_ips.keys())

    # all_profiler_ips = all_profiler_ips[1:]
    # all_profiler_nodes = all_profiler_nodes[1:]

    # print(all_computing_nodes)
    # print(all_computing_ips)
    """
    Start circe
    """

    print('---------  Start computing nodes')
    """
        Start computing nodes
    """

    home_nodes = {}
    for key in homes:
        home_nodes[key] = service_ips[key]

    home_nodes_str = ' '.join('{0}:{1}'.format(key, val)
                              for key, val in sorted(home_nodes.items()))

    for i in nodes:

        # print nodes[i][0]
        """
            We check whether the node is a home / master.
            We do not run the controller on the master.
        """
        """
            Generate the yaml description of the required deployment for WAVE workers
        """
        pod_name = app_name + "-" + i
        #print(pod_name)
        dep = write_circe_computing_specs(
            name=pod_name,
            label=pod_name,
            image=jupiter_config.WORKER_COMPUTING_IMAGE,
            host=nodes[i][0],
            all_node=all_node,
            node_name=i,
            all_node_ips=all_node_ips,
            all_computing_nodes=all_computing_nodes,
            all_computing_ips=all_computing_ips,
            self_ip=computing_service_ips[i],
            profiler_ip=profiler_ips[i],
            all_profiler_ips=all_profiler_ips,
            all_profiler_nodes=all_profiler_nodes,
            execution_home_ip=execution_ips['home'],
            home_node_ip=home_nodes_str,
            child=jupiter_config.HOME_CHILD)
        #pprint(dep)
        # # Call the Kubernetes API to create the deployment
        resp = k8s_beta.create_namespaced_deployment(body=dep,
                                                     namespace=namespace)
        print("Deployment created. status ='%s'" % str(resp.status))

    while 1:
        if check_status_circe_computing(app_name):
            break
        time.sleep(30)

    print('--------- Start task controllers')
    """
        Start task controllers (DAG)
    """

    print(dag_info)
    print(service_ips)

    for key, value in dag.items():

        task = key
        nexthosts = ''
        next_svc = ''
        """
            We inject the host info for the child task via an environment variable valled CHILD_NODES to each pod/deployment.
            We perform it by concatenating the child-hosts via delimeter ':'
            For example if the child nodes are k8node1 and k8node2, we will set CHILD_NODES=k8node1:k8node2
            Note that the k8node1 and k8node2 in the example are the unique node ids of the kubernets cluster nodes.
        """

        inputnum = str(value[0])
        flag = str(value[1])

        for i in range(2, len(value)):
            if i != 2:
                nexthosts = nexthosts + ':'
            nexthosts = nexthosts + str(hosts.get(value[i])[0])

        for i in range(2, len(value)):
            if i != 2:
                next_svc = next_svc + ':'
            next_svc = next_svc + str(service_ips.get(value[i]))
        # print("NEXT HOSTS")
        # print(nexthosts)
        # print("NEXT SVC")
        # print(next_svc)

        #Generate the yaml description of the required deployment for each task

        # print('------------- Retrieve node ')
        # print(task)
        # print(dag_info[2][task])

        pod_name = app_name + "-" + task

        if taskmap[key][1] and executionmap[key]:  #DAG
            print('--------- Start task controllers DAG')
            dep = write_circe_controller_specs(
                flag=str(flag),
                inputnum=str(inputnum),
                name=pod_name,
                node_name=hosts.get(task)[1],
                image=jupiter_config.WORKER_CONTROLLER_IMAGE,
                child=nexthosts,
                child_ips=next_svc,
                host=hosts.get(task)[1],
                dir='{}',
                home_node_ip=home_nodes_str,
                node_id=dag_info[2][task],
                own_ip=service_ips[key],
                task_name=task,
                all_node=all_node,
                all_node_ips=all_node_ips,
                first_task=jupiter_config.HOME_CHILD,
                all_computing_nodes=all_computing_nodes,
                all_computing_ips=all_computing_ips)
        elif taskmap[key][1] and not executionmap[key]:  #nonDAG controllers:
            print('--------- Start task controllers nonDAG')
            #Generate the yaml description of the required deployment for each task
            dep = write_circe_nondag_specs(
                flag=str(flag),
                inputnum=str(inputnum),
                name=pod_name,
                node_name=hosts.get(task)[1],
                image=jupiter_config.NONDAG_CONTROLLER_IMAGE,
                child=nexthosts,
                task_name=task,
                child_ips=next_svc,
                host=hosts.get(task)[1],
                dir='{}',
                home_node_ip=home_nodes_str,
                own_ip=service_ips[task],
                all_node=all_node,
                all_node_ips=all_node_ips,
                all_computing_nodes=all_computing_nodes,
                all_computing_ips=all_computing_ips,
                node_id=dag_info[2][key])
        else:
            print('--------- Start nonDAG workers')
            dep = write_circe_specs_non_dag_tasks(
                flag=str(flag),
                inputnum=str(inputnum),
                name=pod_name,
                node_name=task,
                image=jupiter_config.NONDAG_WORKER_IMAGE,
                child=nexthosts,
                host=hosts.get(task)[1],
                child_ips=next_svc,
                task_name=task,
                home_node_ip=home_nodes_str,
                own_ip=service_ips[key],
                all_node=all_node,
                all_node_ips=all_node_ips,
                all_computing_nodes=all_computing_nodes,
                all_computing_ips=all_computing_ips,
                node_id=dag_info[2][key])
            pprint(dep)

        pprint(dep)
        # # Call the Kubernetes API to create the deployment
        resp = k8s_beta.create_namespaced_deployment(body=dep,
                                                     namespace=namespace)
        print("Deployment created. status = '%s'" % str(resp.status))

    while 1:
        if check_status_circe_controller(dag, app_name):
            break
        time.sleep(30)

    print('-------- Start home node')

    for key in homes:
        home_name = app_name + "-" + key
        home_dep = write_circe_home_specs(
            name=home_name,
            image=jupiter_config.PRICING_HOME_IMAGE,
            host=jupiter_config.HOME_NODE,
            child=jupiter_config.HOME_CHILD,
            child_ips=service_ips.get(jupiter_config.HOME_CHILD),
            all_computing_nodes=all_computing_nodes,
            all_computing_ips=all_computing_ips,
            all_node=all_node,
            all_node_ips=all_node_ips,
            profiler_ip=profiler_ips[key],
            all_profiler_ips=all_profiler_ips,
            all_profiler_nodes=all_profiler_nodes,
            dir='{}')
        resp = k8s_beta.create_namespaced_deployment(body=home_dep,
                                                     namespace=namespace)
        print("Home deployment created. status = '%s'" % str(resp.status))

    pprint(service_ips)
Пример #4
0
def k8s_exec_scheduler(app_name):
    """
        This script deploys execution profiler in the system. 
    """
    jupiter_config.set_globals()

    global configs, taskmap, path1, path2

    configs = json.load(open(jupiter_config.APP_PATH + 'scripts/config.json'))
    taskmap = configs["taskname_map"]

    path1 = jupiter_config.APP_PATH + 'configuration.txt'
    path2 = jupiter_config.HERE + 'nodes.txt'

    dag_info = utilities.k8s_read_dag(path1)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    """
        We have defined the namespace for deployments in jupiter_config
    """
    namespace = jupiter_config.EXEC_NAMESPACE
    """
        Get proper handles or pointers to the k8-python tool to call different functions.
    """
    api = client.CoreV1Api()
    k8s_beta = client.ExtensionsV1beta1Api()

    #get DAG and home machine info
    first_task = dag_info[0]
    dag = dag_info[1]
    # hosts
    service_ips = {}
    #list of all service IPs
    """
        First create the home node's service.
    """

    home_name = app_name + '-home'
    home_body = write_exec_service_specs_home(name=home_name)

    ser_resp = api.create_namespaced_service(namespace, home_body)
    print("Home service created. status = '%s'" % str(ser_resp.status))

    try:
        resp = api.read_namespaced_service(home_name, namespace)
    except ApiException as e:
        print("Exception Occurred")

    service_ips['home'] = resp.spec.cluster_ip
    """
        Iterate through the list of tasks and run the related k8 deployment, replicaset, pod, and service on the respective node.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g.,
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """

    for key, value in dag.items():

        task = key
        nexthosts = ''
        """
            Generate the yaml description of the required service for each task
        """
        pod_name = app_name + '-' + task
        if taskmap[key][1] == False:
            body = write_exec_service_specs_home(name=pod_name)

            # Call the Kubernetes API to create the service
            ser_resp = api.create_namespaced_service(namespace, body)
            print("Service created. status = '%s'" % str(ser_resp.status))

            try:
                resp = api.read_namespaced_service(pod_name, namespace)
            except ApiException as e:
                print("Exception Occurred")

            # print resp.spec.cluster_ip
            service_ips[task] = resp.spec.cluster_ip
        else:
            service_ips[task] = service_ips['home']

    all_node_ips = ':'.join(service_ips.values())
    all_node = ':'.join(service_ips.keys())
    print(all_node)

    #nodes = utilities.k8s_get_nodes(path2)
    nodes, homes = utilities.k8s_get_nodes_worker(path2)
    allprofiler_ips = ''
    allprofiler_names = ''

    for i in nodes:
        """
            Generate the yaml description of the required service for each task
        """
        pod_name = app_name + '-' + i
        body = write_exec_service_specs(name=pod_name,
                                        label=pod_name + "exec_profiler")

        # Call the Kubernetes API to create the service

        try:
            ser_resp = api.create_namespaced_service(namespace, body)
            print("Service created. status = '%s'" % str(ser_resp.status))
            print(i)
            resp = api.read_namespaced_service(pod_name, namespace)
        except ApiException as e:
            print("Exception Occurred")

        # print resp.spec.cluster_ip
        allprofiler_ips = allprofiler_ips + ':' + resp.spec.cluster_ip
        allprofiler_names = allprofiler_names + ':' + i
    """
    All services have started for CIRCE and deployment is yet to begin
    In the meantime, start dft_coded_detector services and their deployments
    """
    """
    Start circe
    """
    for key, value in dag.items():

        task = key
        nexthosts = ''
        next_svc = ''
        pod_name = app_name + '-' + task
        """
            We inject the host info for the child task via an environment variable valled CHILD_NODES to each pod/deployment.
            We perform it by concatenating the child-hosts via delimeter ':'
            For example if the child nodes are k8node1 and k8node2, we will set CHILD_NODES=k8node1:k8node2
            Note that the k8node1 and k8node2 in the example are the unique node ids of the kubernets cluster nodes.
        """
        inputnum = str(value[0])
        flag = str(value[1])
        nexthosts = nexthosts + 'test'

        for i in range(2, len(value)):
            if i != 2:
                next_svc = next_svc + ':'
            next_svc = next_svc + str(service_ips.get(value[i]))
        print("NEXT HOSTS")
        print(nexthosts)
        print("NEXT SVC")
        print(next_svc)

        if taskmap[key][1] == False:
            #Generate the yaml description of the required deployment for each task
            # TODO-check
            dep = write_exec_specs_non_dag_tasks(
                flag=str(flag),
                inputnum=str(inputnum),
                name=pod_name,
                node_name=task,
                image=jupiter_config.WORKER_IMAGE,
                child=nexthosts,
                child_ips=next_svc,
                task_name=task,
                home_node_ip=service_ips.get("home"),
                own_ip=service_ips[key],
                all_node=all_node,
                all_node_ips=all_node_ips)
            pprint(dep)

            # # Call the Kubernetes API to create the deployment
            resp = k8s_beta.create_namespaced_deployment(body=dep,
                                                         namespace=namespace)
            print("Deployment created. status = '%s'" % str(resp.status))

    for i in nodes:

        # print nodes[i][0]
        """
            We check whether the node is a scheduler.
            Since we do not run any task on the scheduler, we donot run any profiler on it as well.
        """
        #if i != 'home':
        """
            Generate the yaml description of the required deployment for the profiles
        """
        #print(i)
        pod_name = app_name + '-' + i
        dep = write_exec_specs(name=pod_name,
                               label=pod_name + "exec_profiler",
                               node_name=i,
                               image=jupiter_config.EXEC_WORKER_IMAGE,
                               host=nodes[i][0],
                               home_node_ip=service_ips['home'],
                               all_node=all_node,
                               all_node_ips=all_node_ips)
        # # pprint(dep)
        # # Call the Kubernetes API to create the deployment
        resp = k8s_beta.create_namespaced_deployment(body=dep,
                                                     namespace=namespace)
        print("Deployment created. status ='%s'" % str(resp.status))
    """
        Check if all the tera detectors are running
    """
    while 1:
        if check_status_exec_profiler(app_name):
            break
        time.sleep(30)

    while 1:
        if check_status_exec_profiler_workers(app_name):
            break
        time.sleep(30)

    task = 'home'
    key = 'home'
    home_name = app_name + '-home'
    home_dep = write_exec_specs_home_control(
        flag=str(flag),
        inputnum=str(inputnum),
        name=home_name,
        node_name=home_name,
        task_name=task,
        image=jupiter_config.EXEC_HOME_IMAGE,
        child=nexthosts,
        child_ips=next_svc,
        host=jupiter_config.HOME_NODE,
        dir='{}',
        home_node_ip=service_ips.get("home"),
        own_ip=service_ips[key],
        all_node=all_node,
        all_node_ips=all_node_ips,
        allprofiler_ips=allprofiler_ips,
        allprofiler_names=allprofiler_names)

    resp = k8s_beta.create_namespaced_deployment(body=home_dep,
                                                 namespace=namespace)
    print("Home deployment created. status = '%s'" % str(resp.status))

    # pprint(service_ips)

    return (service_ips)
Пример #5
0
def k8s_wave_scheduler(profiler_ips, app_name):
    """
        Deploy WAVE in the system. 
    """
    jupiter_config.set_globals()
    """
        This loads the node list
    """
    all_profiler_ips = ''
    nexthost_ips = ''
    nexthost_names = ''
    path2 = jupiter_config.HERE + 'nodes.txt'
    nodes, homes = utilities.k8s_get_nodes_worker(path2)
    pprint(nodes)
    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file=jupiter_config.KUBECONFIG_PATH)
    """
        We have defined the namespace for deployments in jupiter_config
    """
    namespace = jupiter_config.MAPPER_NAMESPACE
    """
        Get proper handles or pointers to the k8-python tool to call different functions.
    """
    api = client.CoreV1Api()
    k8s_beta = client.ExtensionsV1beta1Api()

    service_ips = {}
    """
        Loop through the list of nodes and run all WAVE related k8 deployment, replicaset, pods, and service.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g., 
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """
    home_name = app_name + '-home'
    home_label = app_name + '-wave_home'
    home_body = write_wave_service_specs(name=home_name, label=home_label)
    ser_resp = api.create_namespaced_service(namespace, home_body)
    print("Home service created. status = '%s'" % str(ser_resp.status))

    try:
        resp = api.read_namespaced_service(home_name, namespace)
    except ApiException as e:
        print("Exception Occurred")

    service_ips['home'] = resp.spec.cluster_ip
    home_ip = service_ips['home']

    for i in nodes:
        """
            Generate the yaml description of the required service for each task
        """
        if i != 'home':
            pod_name = app_name + '-' + i
            pod_label = app_name + '-wave_' + i
            body = write_wave_service_specs(name=pod_name, label=pod_label)

            # Call the Kubernetes API to create the service

            try:
                ser_resp = api.create_namespaced_service(namespace, body)
                print("Service created. status = '%s'" % str(ser_resp.status))
                print(i)
                resp = api.read_namespaced_service(pod_name, namespace)
            except ApiException as e:
                print("Exception Occurred")

            # print resp.spec.cluster_ip
            service_ips[i] = resp.spec.cluster_ip
            nexthost_ips = nexthost_ips + ':' + service_ips[i]
            nexthost_names = nexthost_names + ':' + i
            all_profiler_ips = all_profiler_ips + ':' + profiler_ips[i]
    print(service_ips)
    print(nexthost_ips)
    print(nexthost_names)

    print("####################################")
    print(profiler_ips)
    print("####################################")
    print(all_profiler_ips)

    home_profiler_ips = {}
    for key in homes:
        home_profiler_ips[key] = profiler_ips[key]

    home_profiler_str = ' '.join(
        '{0}:{1}'.format(key, val)
        for key, val in sorted(home_profiler_ips.items()))

    for i in nodes:

        # print nodes[i][0]
        """
            We check whether the node is a home / master.
            We do not run the controller on the master.
        """
        if i != 'home':
            """
                Generate the yaml description of the required deployment for WAVE workers
            """
            pod_name = app_name + '-' + i
            label_name = app_name + '-wave_' + i
            dep = write_wave_specs(name=pod_name,
                                   label=label_name,
                                   image=jupiter_config.WAVE_WORKER_IMAGE,
                                   host=nodes[i][0],
                                   all_node=nexthost_names,
                                   all_node_ips=nexthost_ips,
                                   self_name=i,
                                   home_ip=home_ip,
                                   home_name=home_name,
                                   serv_ip=service_ips[i],
                                   profiler_ip=profiler_ips[i],
                                   all_profiler_ips=all_profiler_ips,
                                   home_profiler_ip=home_profiler_str)
            # # pprint(dep)
            # # Call the Kubernetes API to create the deployment
            resp = k8s_beta.create_namespaced_deployment(body=dep,
                                                         namespace=namespace)
            print("Deployment created. status ='%s'" % str(resp.status))

    # have to somehow make sure that the worker nodes are on and working by this time

    while 1:
        if check_status_waves(app_name):
            break
        time.sleep(30)

    home_name = app_name + '-home'
    label_name = app_name + '-wave_home'
    print(profiler_ips)
    print(service_ips)
    home_dep = write_wave_specs(name=home_name,
                                label=label_name,
                                image=jupiter_config.WAVE_HOME_IMAGE,
                                host=jupiter_config.HOME_NODE,
                                all_node=nexthost_names,
                                all_node_ips=nexthost_ips,
                                self_name='home',
                                home_ip=home_ip,
                                home_name=home_name,
                                serv_ip=service_ips['home'],
                                profiler_ip=profiler_ips['home'],
                                all_profiler_ips=all_profiler_ips,
                                home_profiler_ip=home_profiler_str)
    resp = k8s_beta.create_namespaced_deployment(body=home_dep,
                                                 namespace=namespace)
    print("Home deployment created. status = '%s'" % str(resp.status))

    pprint(service_ips)
Пример #6
0
def k8s_heft_scheduler(profiler_ips, ex_profiler_ips, node_names,app_name):
    """
        This script deploys HEFT in the system. 
    """
    jupiter_config.set_globals()

    """
        This loads the node list
    """
    nexthost_ips = ''
    nexthost_names = ''
    path2 = jupiter_config.HERE + 'nodes.txt'
    nodes, homes = utilities.k8s_get_nodes_worker(path2)

    """
        This loads the kubernetes instance configuration.
        In our case this is stored in admin.conf.
        You should set the config file path in the jupiter_config.py file.
    """
    config.load_kube_config(config_file = jupiter_config.KUBECONFIG_PATH)

    """
        We have defined the namespace for deployments in jupiter_config
    """
    namespace = jupiter_config.MAPPER_NAMESPACE

    """
        Get proper handles or pointers to the k8-python tool to call different functions.
    """
    api = client.CoreV1Api()
    k8s_beta = client.ExtensionsV1beta1Api()

    service_ips = {};

    """
        Loop through the list of nodes and run all WAVE related k8 deployment, replicaset, pods, and service.
        You can always check if a service/pod/deployment is running after running this script via kubectl command.
        E.g.,
            kubectl get svc -n "namespace name"
            kubectl get deployement -n "namespace name"
            kubectl get replicaset -n "namespace name"
            kubectl get pod -n "namespace name"
    """
    home_name = app_name+'-home'

    home_body = write_heft_service_specs(name = home_name, label = home_name)
    ser_resp = api.create_namespaced_service(namespace, home_body)
    print("Home service created. status = '%s'" % str(ser_resp.status))

    try:
        resp = api.read_namespaced_service(home_name, namespace)
    except ApiException as e:
        print("Exception Occurred")


    service_ips[home_name] = resp.spec.cluster_ip
    home_ip = service_ips[home_name]
    node_profiler_ips = profiler_ips.copy()
    home_profiler_ips = {}
    for key in homes:
        print(key)
        home_profiler_ips[key] = profiler_ips[key]
        del node_profiler_ips[key]

    profiler_ips_str = ' '.join('{0}:{1}'.format(key, val) for key, val in sorted(node_profiler_ips.items()))
    home_profiler_str = ' '.join('{0}:{1}'.format(key, val) for key, val in sorted(home_profiler_ips.items()))
    
    home_dep = write_heft_specs(name = home_name, label = home_name,
                                image = jupiter_config.HEFT_IMAGE,
                                host = jupiter_config.HOME_NODE,
                                node_names = node_names, 
                                home_ip = home_ip,
                                profiler_ips = profiler_ips_str,
                                execution_home_ip = ex_profiler_ips['home'],
                                home_profiler_ip = home_profiler_str)
    resp = k8s_beta.create_namespaced_deployment(body = home_dep, namespace = namespace)
    print("Home deployment created. status = '%s'" % str(resp.status))

    pprint(service_ips)