def update_pod_with_init_container(pod, cmd, cmk_img, cmk_img_pol, args):
    container_template = k8s.get_container_template()
    container_template["image"] = cmk_img
    container_template["imagePullPolicy"] = cmk_img_pol
    container_template["args"][0] = args
    # Each container name should be distinct within a Pod.
    container_template["name"] = cmd
    pod_init_containers_list = []

    version = util.parse_version(k8s.get_kubelet_version(None))

    if version >= util.parse_version("v1.7.0"):
        pod["spec"]["initContainers"] = [container_template]
    else:

        init_containers_key = "pod.beta.kubernetes.io/init-containers"

        if init_containers_key in pod["metadata"]["annotations"]:
            init_containers = \
                pod["metadata"]["annotations"][init_containers_key]
            pod_init_containers_list = json.loads(init_containers)

        pod_init_containers_list.append(container_template)
        pod["metadata"]["annotations"][init_containers_key] = \
            json.dumps(pod_init_containers_list)
예제 #2
0
def update_pod_with_init_container(pod, cmd, cmk_img, cmk_img_pol, args):
    container_template = k8s.get_container_template()
    container_template["image"] = cmk_img
    container_template["imagePullPolicy"] = cmk_img_pol
    container_template["args"][0] = args
    # Each container name should be distinct within a Pod.
    container_template["name"] = cmd
    # Note(balajismaniam): Downward API for spec.nodeName doesn't seem to
    # work with init-containers. Removing it as a work-around. Needs further
    # investigation.
    container_template["env"].pop()
    pod_init_containers_list = []

    version = util.parse_version(k8s.get_kubelet_version(None))

    if version >= util.parse_version("v1.7.0"):
        pod["spec"]["initContainers"] = [container_template]
    else:

        init_containers_key = "pod.beta.kubernetes.io/init-containers"

        if init_containers_key in pod["metadata"]["annotations"]:
            init_containers = \
                pod["metadata"]["annotations"][init_containers_key]
            pod_init_containers_list = json.loads(init_containers)

        pod_init_containers_list.append(container_template)
        pod["metadata"]["annotations"][init_containers_key] = \
            json.dumps(pod_init_containers_list)
예제 #3
0
def cluster_init(host_list, all_hosts, cmd_list, cmk_img, cmk_img_pol,
                 conf_dir, install_dir, num_exclusive_cores, num_shared_cores,
                 pull_secret, serviceaccount, exclusive_mode, shared_mode,
                 namespace, excl_non_isolcpus):

    logging.info("Used ServiceAccount: {}".format(serviceaccount))
    cmk_node_list = get_cmk_node_list(host_list, all_hosts)
    logging.debug("CMK node list: {}".format(cmk_node_list))

    cmk_cmd_list = [cmd.strip() for cmd in cmd_list.split(',')]
    logging.debug("CMK command list: {}".format(cmk_cmd_list))

    # Check if all the flag values passed are valid.
    # Check if cmk_cmd_list is valid.
    valid_cmd_list = ["init", "discover", "install", "reconcile", "nodereport"]
    for cmk_cmd in cmk_cmd_list:
        if cmk_cmd not in valid_cmd_list:
            raise RuntimeError(
                "CMK command should be one of {}".format(valid_cmd_list))
    if "init" in cmk_cmd_list and cmk_cmd_list[0] != "init":
        raise RuntimeError("init command should be run and listed first.")

    # Check if cmk_img_pol is valid.
    valid_img_pol_list = ["Never", "IfNotPresent", "Always"]
    if cmk_img_pol not in valid_img_pol_list:
        raise RuntimeError(
            "Image pull policy should be one of {}".format(valid_img_pol_list))

    # Check if num_exclusive_cores and num_shared_cores are positive integers.
    if not num_exclusive_cores.isdigit():
        raise RuntimeError("num_exclusive_cores cores should be a positive "
                           "integer.")
    if not num_shared_cores.isdigit():
        raise RuntimeError("num_shared_cores cores should be a positive "
                           "integer.")

    # Split the cmk_cmd_list based on whether the cmd should be run as
    # one-shot job or long-running daemons.
    cmd_init_list = ["init", "discover", "install"]
    cmk_cmd_init_list = [cmd for cmd in cmk_cmd_list if cmd in cmd_init_list]
    cmk_cmd_list = [cmd for cmd in cmk_cmd_list if cmd not in cmd_init_list]

    # Run the pods based on the cmk_cmd_init_list and cmk_cmd_list with
    # provided options.
    if cmk_cmd_init_list:
        run_pods(None, cmk_cmd_init_list, cmk_img, cmk_img_pol, conf_dir,
                 install_dir, num_exclusive_cores, num_shared_cores,
                 cmk_node_list, pull_secret, serviceaccount, shared_mode,
                 exclusive_mode, namespace, excl_non_isolcpus)
    if cmk_cmd_list:
        run_pods(cmk_cmd_list, None, cmk_img, cmk_img_pol, conf_dir,
                 install_dir, num_exclusive_cores, num_shared_cores,
                 cmk_node_list, pull_secret, serviceaccount, shared_mode,
                 exclusive_mode, namespace, excl_non_isolcpus)

    # Run mutating webhook admission controller on supported cluster
    version = util.parse_version(k8s.get_kube_version(None))
    if version >= util.parse_version("v1.9.0"):
        deploy_webhook(namespace, conf_dir, install_dir, serviceaccount,
                       cmk_img)
예제 #4
0
def discover(namespace, no_taint=False):

    version = util.parse_version(k8s.get_kube_version(None))
    if version == util.parse_version("v1.8.0"):
        logging.fatal("K8s 1.8.0 is not supported. Update K8s to "
                      "version >=1.8.1 or rollback to previous versions")
        sys.exit(1)

    if version >= util.parse_version("v1.8.1"):
        # Patch the node with the appropriate CMK ER.
        logging.debug("Patching the node with the appropriate CMK ER.")
        add_node_er(namespace)
    else:
        # Patch the node with the appropriate CMK OIR.
        logging.debug("Patching the node with the appropriate CMK OIR.")
        add_node_oir(namespace)

    # Add appropriate CMK label to the node.
    logging.debug("Adding appropriate CMK label to the node.")
    add_node_label()

    # Add appropriate CMK taint to the node.
    if not no_taint:
        logging.debug("Adding appropriate CMK taint to the node.")
        add_node_taint()
예제 #5
0
def add_node_taint():
    node_name = os.getenv("NODE_NAME")
    try:
        node_resp = get_k8s_node(node_name)
    except K8sApiException as err:
        logging.error("Exception when getting the node obj: {}".format(err))
        logging.error(ABORTING_DISCOVER)
        sys.exit(1)

    version = util.parse_version(k8s.get_kube_version(None))
    node_taints_list = []
    node_taints = []

    if version >= util.parse_version("v1.7.0"):
        node_taints = node_resp["spec"]["taints"]
        if node_taints:
            node_taints_list = node_taints
        patch_path = "/spec/taints"
    else:
        node_taint_key = "scheduler.alpha.kubernetes.io/taints"
        if node_taint_key in node_resp["metadata"]["annotations"]:
            node_taints = node_resp["metadata"]["annotations"][node_taint_key]
        patch_path = "/metadata/annotations/scheduler.alpha.kubernetes.io~1taints"  # noqa: E501
        if node_taints:
            node_taints_list = json.loads(node_taints)

    # Filter existing "cmk" taint, if it exists.
    node_taints_list = [t for t in node_taints_list if t["key"] != "cmk"]

    node_taints_list.append({
        "key": "cmk",
        "value": "true",
        "effect": "NoSchedule"
    })

    if version >= util.parse_version("v1.7.0"):
        value = node_taints_list
    else:
        value = json.dumps(node_taints_list)

    # See: https://tools.ietf.org/html/rfc6902#section-4.1
    patch_body = [
        {
            "op": "add",
            "path": patch_path,
            "value": value
        }
    ]

    try:
        patch_k8s_node(patch_body)
    except K8sApiException as err:
        logging.error("Exception when tainting the node: {}".format(err))
        logging.error(ABORTING_DISCOVER)
        sys.exit(1)
예제 #6
0
def create_ds(config, spec, ns_name, version):
    if version >= util.parse_version(VERSION_NAME):
        k8s_api = apps_api_client_from_config(config)
        return k8s_api.create_namespaced_daemon_set(ns_name, spec)
    else:
        k8s_api = extensions_client_from_config(config)
        return k8s_api.create_namespaced_daemon_set(ns_name, spec)
예제 #7
0
파일: k8s.py 프로젝트: lamtov/lamtv10_ops
def delete_ds(config,
              version,
              ds_name,
              ns_name="default",
              body=V1DeleteOptions()):
    k8s_api_core = client_from_config(config)

    if version >= util.parse_version("v1.9.0"):
        k8s_api_apps = apps_api_client_from_config(config)
        k8s_api_apps.delete_namespaced_daemon_set(ds_name,
                                                  ns_name,
                                                  grace_period_seconds=0,
                                                  orphan_dependents=False)
    else:
        k8s_api_ext = extensions_client_from_config(config)
        k8s_api_ext.delete_namespaced_daemon_set(ds_name,
                                                 ns_name,
                                                 grace_period_seconds=0,
                                                 orphan_dependents=False)

    # Pod in ds has fixed label so we use label selector
    data = k8s_api_core.list_namespaced_pod(
        ns_name, label_selector="app={}".format(ds_name)).to_dict()
    # There should be only one pod
    for pod in data["items"]:
        logging.debug("Removing pod \"{}\"".format(pod["metadata"]["name"]))
        delete_pod(None, pod["metadata"]["name"], ns_name)
    return
예제 #8
0
def test_k8s_create_ds_apps():
    mock = MagicMock()
    with patch('intel.k8s.apps_api_client_from_config',
               MagicMock(return_value=mock)):
        k8s.create_ds(None, "test_dsspec", "test_namespace",
                      util.parse_version("v1.9.0"))
        called_methods = mock.method_calls
        assert len(called_methods) == 1
        assert called_methods[0][0] == "create_namespaced_daemon_set"
예제 #9
0
def test_k8s_delete_ds():
    mock_core = MagicMock()
    mock_ext = MagicMock()
    with patch('intel.k8s.client_from_config',
               MagicMock(return_value=mock_core)), \
        patch('intel.k8s.extensions_client_from_config',
              MagicMock(return_value=mock_ext)):
        k8s.delete_ds(None, util.parse_version("v1.8.0"), "fake_ds")
        method_calls_core = mock_core.method_calls
        method_calls_ext = mock_ext.method_calls
        assert method_calls_ext[0][0] == 'delete_namespaced_daemon_set'
        assert method_calls_core[0][0] == 'list_namespaced_pod'
예제 #10
0
def test_parse_version_fail():
    with pytest.raises(ValueError):
        util.parse_version("invalid_version")

    with pytest.raises(ValueError):
        util.parse_version("a1.2.3")

    with pytest.raises(ValueError):
        util.parse_version("v-1.6.0")
예제 #11
0
def ds_from(pod, version):
    ds_template = {}
    if version >= util.parse_version(VERSION_NAME):
        ds_template = {
            "apiVersion": "apps/v1",
            "kind": "DaemonSet",
            "metadata": {
                "name": pod["metadata"]["name"].replace("pod", "ds")
            },
            "spec": {
                "selector": {
                    "matchLabels": {
                        "app": pod["metadata"]["name"].replace("pod", "ds")
                    }
                },
                "template": {
                    "metadata": {
                        "labels": {
                            "app":
                                pod["metadata"]["name"].replace("pod", "ds")
                        }
                    },
                    "spec": pod["spec"]
                }
            }
        }
    # for k8s versions older than 1.9.0 use extensions/v1beta1 API
    else:
        ds_template = {
            "apiVersion": "extensions/v1beta1",
            "kind": "DaemonSet",
            "metadata": {
                "name": pod["metadata"]["name"].replace("pod", "ds")
            },
            "spec": {
                "template": {
                    "metadata": {
                        "labels": {
                            "app":
                                pod["metadata"]["name"].replace("pod", "ds")
                        }
                    },
                    "spec": pod["spec"]
                }
            }
        }
    return ds_template
def run_cmd_pods(cmd_list, cmd_init_list, cmk_img, cmk_img_pol, conf_dir,
                 install_dir, num_exclusive_cores, num_shared_cores,
                 cmk_node_list, pull_secret, serviceaccount, shared_mode,
                 exclusive_mode, namespace):
    pod = k8s.get_pod_template()
    if pull_secret:
        update_pod_with_pull_secret(pod, pull_secret)
    if cmd_list:
        update_pod(pod, "Always", conf_dir, install_dir, serviceaccount)
        version = util.parse_version(k8s.get_kubelet_version(None))
        if version >= util.parse_version("v1.7.0"):
            pod["spec"]["tolerations"] = [{
                "operator": "Exists"}]
        for cmd in cmd_list:
            args = ""
            if cmd == "reconcile":
                args = "/cmk/cmk.py isolate --pool=infra /cmk/cmk.py -- reconcile --interval=5 --publish"  # noqa: E501
            elif cmd == "nodereport":
                args = "/cmk/cmk.py isolate --pool=infra /cmk/cmk.py -- node-report --interval=5 --publish"  # noqa: E501

            update_pod_with_container(pod, cmd, cmk_img, cmk_img_pol, args)
    elif cmd_init_list:
        update_pod(pod, "Never", conf_dir, install_dir, serviceaccount)
        for cmd in cmd_init_list:
            args = ""
            if cmd == "init":
                args = ("/cmk/cmk.py init --num-exclusive-cores={} "
                        "--num-shared-cores={} --shared-mode={} "
                        "--exclusive-mode={}")\
                    .format(num_exclusive_cores, num_shared_cores, shared_mode,
                            exclusive_mode)
                # If init is the only cmd in cmd_init_list, it should be run
                # as regular container as spec.containers is a required field.
                # Otherwise, it should be run as init-container.
                if len(cmd_init_list) == 1:
                    update_pod_with_container(pod, cmd, cmk_img,
                                              cmk_img_pol, args)
                else:
                    update_pod_with_init_container(pod, cmd, cmk_img,
                                                   cmk_img_pol, args)
            else:
                if cmd == "discover":
                    args = "/cmk/cmk.py discover"
                elif cmd == "install":
                    args = "/cmk/cmk.py install"
                update_pod_with_container(pod, cmd, cmk_img, cmk_img_pol,
                                          args)

    for node_name in cmk_node_list:
        if cmd_list:
            update_pod_with_node_details(pod, node_name, cmd_list)
            daemon_set = k8s.ds_from(pod=pod)
        elif cmd_init_list:
            update_pod_with_node_details(pod, node_name, cmd_init_list)

        try:
            if cmd_list:
                cr_pod_resp = k8s.create_ds(None, daemon_set, namespace)
                logging.debug("Response while creating ds for {} command(s): "
                              "{}".format(cmd_list, cr_pod_resp))
            elif cmd_init_list:
                cr_pod_resp = k8s.create_pod(None, pod, namespace)
                logging.debug("Response while creating pod for {} command(s): "
                              "{}".format(cmd_init_list, cr_pod_resp))
        except K8sApiException as err:
            if cmd_list:
                logging.error("Exception when creating pod for {} command(s): "
                              "{}".format(cmd_list, err))
            elif cmd_init_list:
                logging.error("Exception when creating pod for {} command(s): "
                              "{}".format(cmd_init_list, err))
            logging.error("Aborting cluster-init ...")
            sys.exit(1)
예제 #13
0
def test_parse_version_success():
    assert util.parse_version("1") == util.parse_version("1.0.0")
    assert util.parse_version("v2") > util.parse_version("v1.0.0")
    assert util.parse_version("v10.0.0") > util.parse_version("v2.0.0")
    assert util.parse_version("v1.10.0") > util.parse_version("v1.9.0")
    assert util.parse_version("v1.10.11") > util.parse_version("v1.9.2")
    assert util.parse_version("v1.18.11") == util.parse_version("v1.18.11")
    assert util.parse_version("v1.9.11-dirty") > util.parse_version("v1.5.2")
    assert util.parse_version("v1.1-dirty") > util.parse_version("v1")
    assert util.parse_version("v1.10.0-rc.1") > util.parse_version("v1.9")
    assert (util.parse_version("v1.12.0") ==
            util.parse_version("v1.12.0-alpha.0.2126+c9e1abca8c6d24-dirty"))