def step8_pod_use_default_pool_burstable():
    memory_request = "500Mi"
    cpu_request = "250m"
    try:
        common_utils.helm_install(
            chart_name="default/cpu-pooling-default1",
            release_name="cpu-pooling",
            values=
            "registry_url={reg_url},nodename={node_name},mem_request={mem},"
            "cpu_request={cpu}".format(reg_url=reg,
                                       node_name=nodename,
                                       mem=memory_request,
                                       cpu=cpu_request))
        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_pod7,
            expected_result="1",
            filter=r'(Running)\s*[0]',
            timeout=90)

        allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(
            cpu_pooling_pod7['obj_name'])
        default_pool = cpupools[nodename]['default']
        if not common_utils.allowed_cpus_is_in_cpu_pool(
                allowed_cpu_for_pod, default_pool):
            raise Exception('{pod} not allocate CPU from default pool!'.format(
                pod=cpu_pooling_pod7['obj_name']))
        check_qos_of_pod(cpu_pooling_pod7['obj_name'], "Burstable")
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod7,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=60)
def step2_exclusive_and_shared():
    try:
        common_utils.helm_install(
            chart_name="default/cpu-pooling-mix2",
            release_name="cpu-pooling",
            values="registry_url={reg_url}".format(reg_url=reg))

        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_pod6,
            expected_result="1",
            filter=r'(Running)\s*[0]',
            timeout=90)
        allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(
            cpu_pooling_pod6['obj_name'])
        requested_cpupool = cpupools[nodename]['exclusive_caas'] + cpupools[
            nodename]['shared_caas']
        if not common_utils.allowed_cpus_is_in_cpu_pool(
                allowed_cpu_for_pod, requested_cpupool):
            raise Exception(
                '{pod} not allocate CPUs from {req_pool} pool!'.format(
                    pod=cpu_pooling_pod6['obj_name'],
                    req_pool=requested_cpupool))
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod6,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=90)
def step5_annotation_without_cpus():
    try:
        common_utils.helm_install(
            chart_name="default/cpu-pooling-annotation3",
            release_name="cpu-pooling",
            values="registry_url={reg_url}".format(reg_url=reg))
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod11,
            tester_function=common_utils.test_kubernetes_object_available,
            timeout=30,
            delay=3)

        result = ex.execute_unix_command(
            'kubectl describe replicasets {0}'.format(
                cpu_pooling_pod11['obj_name']))

        error = "'cpus' field is mandatory in annotation"

        if error not in result:
            raise Exception(
                'Replicaset description does not contain expected error! -' +
                result)
        else:
            logger.info(error)
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod11,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=60)
def step6_request_for_default_pool():
    try:
        common_utils.helm_install(
            chart_name="default/cpu-pooling-default2",
            release_name="cpu-pooling",
            values="registry_url={reg_url}".format(reg_url=reg))
        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_pod8,
            expected_result="1",
            filter=r'(Pending)\s*[0]',
            timeout=30,
            delay=3)
        error = "Insufficient nokia.k8s.io/default"
        result = ex.execute_unix_command(
            'kubectl describe pod {podname}'.format(
                podname=cpu_pooling_pod8['obj_name']))

        if error not in result:
            raise Exception(
                'Replicaset description does not contain expected error! -' +
                result)
        else:
            logger.info(error)
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod8,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=60)
def step3_more_replicas_than_cpus():
    num_of_replicas = len(cpupools[nodename]['exclusive_caas'])
    try:
        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling",
                                  values="registry_url={reg_url},nodename={node_name},replicas={cpus}"
                                  .format(reg_url=reg, cpus=num_of_replicas+1, node_name=nodename))
        cpu_pooling_pod2['obj_count'] = str(num_of_replicas)
        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2,
                                                    expected_result="1",
                                                    filter=r'(Pending)\s*[0]',
                                                    timeout=90,
                                                    delay=3)
        result = ex.execute_unix_command('kubectl describe pod {podname}'.format(podname=cpu_pooling_pod2['obj_name']))
        error = 'Insufficient nokia.k8s.io/exclusive_caas'

        if error not in result:
            raise Exception('Replicaset description does not contain expected error! -' + result)
        else:
            logger.info(error)
    finally:
        cpu_pooling_pod2['obj_count'] = "1"

        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
                                             tester_function=common_utils.test_kubernetes_object_not_available,
                                             timeout=90)
def step14():
    # Static ip, dynamic ip allocation and none ip in the same pod
    # Check if the same ips can be allocated, which were failing in step 13
    install_chart(danmnet_pods12)
    install_chart(danmnet_pods14)
    common_utils.test_kubernetes_object_quality(
        kube_object=danmnet_pods12,
        expected_result=danmnet_pods12['obj_count'],
        filter=r'(Running)\s*[0]',
        timeout=90)
    pod_list = get_pod_list(danmnet_pods12)
    alloc_pool = get_alloc_pool('cnet_pod6', network_attach_properties,
                                'clusternetwork')
    danmnet_pods12['ip_list'] = get_pod_ips(pod_list, if_name='eth1')
    check_dynamic_ips(alloc_pool, danmnet_pods12['ip_list'])
    danmnet_pods12['ip_list'] = get_pod_ips(pod_list, if_name='eth0')
    if IPAddress(danmnet_pods12['ip_list'][0]) != IPAddress('10.10.0.250'):
        raise Exception("static ip in pod danmnet-pods12 is not as expected")

    common_utils.test_kubernetes_object_quality(
        kube_object=danmnet_pods14,
        expected_result=danmnet_pods14['obj_count'],
        filter=r'(Running)\s*[0]',
        timeout=90)
    pod_list = get_pod_list(danmnet_pods14)
    danmnet_pods14['ip_list'] = get_pod_ips(pod_list, if_name='eth2')
    if IPAddress(danmnet_pods14['ip_list'][0]) != IPAddress('10.10.0.254'):
        raise Exception("static ip in pod danmnet-pods14 is not as expected")
    common_utils.helm_delete("danmnet-pods12")
    common_utils.helm_delete("danmnet-pods14")
    common_utils.check_kubernetes_object(
        kube_object=danmnet_pods14,
        tester_function=common_utils.test_kubernetes_object_not_available,
        timeout=20)
    check_dep_count(danmnet_pods12["namespace"], exp_count=0)
def step6_more_cpu_annotation_than_request():
    annotation_cpu = 2
    request_cpu = 1
    cpu_pooling_pod2['obj_type'] = 'replicaset'
    try:
        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling",
                                  values="registry_url={url},nodename={node_name},proc_req={proc},pool_req={req}"
                                  .format(url=reg, proc=annotation_cpu, req=request_cpu, node_name=nodename))
        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
                                             tester_function=common_utils.test_kubernetes_object_available,
                                             timeout=10,
                                             delay=3)
        result = ex.execute_unix_command('kubectl describe replicaset {0}'.format(cpu_pooling_pod2['obj_name']))
        error = 'Exclusive CPU requests {req} do not match to annotation {proc}'.format(req=request_cpu,
                                                                                        proc=annotation_cpu)

        if error not in result:
            raise Exception('Replicaset description does not contain expected error! -' + result)
        else:
            logger.info(error)
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
                                             tester_function=common_utils.test_kubernetes_object_not_available,
                                             timeout=90)
        cpu_pooling_pod2['obj_type'] = 'pod'
Ejemplo n.º 8
0
def step1_with_two_process():
    try:
        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive3",
                                  release_name="cpu-pooling",
                                  values="registry_url=" + reg + ",nodename=" +
                                  nodename)
        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_pod3,
            expected_result="1",
            filter=r'(Running)\s*[0]',
            timeout=10)

        exclusive_cpus = cpupools[nodename]['exclusive_caas']

        proc1_cpu, proc2_cpu = get_cpu_core_of_processes(
            cpu_pooling_pod3['obj_name'], "dumb-init -c sleep 1000")
        if proc1_cpu not in exclusive_cpus:
            raise Exception(
                '{pod}: Proc1 running on non exclusive cpu core {cpu}!'.format(
                    pod=cpu_pooling_pod3['obj_name'], cpu=proc1_cpu))
        if proc2_cpu not in exclusive_cpus:
            raise Exception(
                '{pod}: Proc2 running on non exclusive cpu core {cpu}!'.format(
                    pod=cpu_pooling_pod3['obj_name'], cpu=proc2_cpu))
        if proc1_cpu == proc2_cpu:
            raise Exception(
                '{pod}: Two processes use same cpu core: {cpu}!'.format(
                    pod=cpu_pooling_pod3['obj_name'], cpu=proc2_cpu))
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod3,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=60)
def step2_check_pv_retaining():
    common_utils.helm_delete("storage-test")
    common_utils.check_kubernetes_object(
        kube_object=pv_test_pod,
        tester_function=common_utils.test_kubernetes_object_not_available,
        timeout=90)
    _install_storage_test_helm_chart()
    pabot.release_lock("pv_test_ip")
def step14_check_realloc_ips_of_prev_step_with_dynamic_and_none_ip_alloc():
    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod12",
                              release_name="tenantnetwork-attach-pod12")
    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod14",
                              release_name="tenantnetwork-attach-pod14")
    common_utils.test_kubernetes_object_quality(
        kube_object=tennet_pod12,
        expected_result=tennet_pod12['obj_count'],
        filter=r'(Running)\s*[0]',
        timeout=90)
    pod_list = danm_utils.get_pod_list(tennet_pod12)
    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_01",
                                           tenantnetwork_attach_properties,
                                           "tenantnetwork")
    danm_utils.check_dynamic_ips(
        alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet_1'))
    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_05",
                                           tenantnetwork_attach_properties,
                                           "tenantnetwork")
    danm_utils.check_dynamic_ips(
        alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='eth0'))

    common_utils.test_kubernetes_object_quality(
        kube_object=tennet_pod14,
        expected_result=tennet_pod14['obj_count'],
        filter=r'(Running)\s*[0]',
        timeout=90)
    pod_list = danm_utils.get_pod_list(tennet_pod14)
    # danm_utils.check_dynamic_ips(alloc_pool, [tennet_pod14['ip_list'][2]])
    danm_utils.check_dynamic_ips(
        alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet5'))
    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_06",
                                           tenantnetwork_attach_properties,
                                           "tenantnetwork")
    danm_utils.check_dynamic_ips(
        alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='eth0'))
    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_01",
                                           tenantnetwork_attach_properties,
                                           "tenantnetwork")
    danm_utils.check_dynamic_ips(
        alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet_2'))
    common_utils.helm_delete("tenantnetwork-attach-pod12")
    common_utils.helm_delete("tenantnetwork-attach-pod14")
    common_utils.check_kubernetes_object(
        kube_object=tennet_pod12,
        tester_function=common_utils.test_kubernetes_object_not_available,
        timeout=20)
    common_utils.check_kubernetes_object(
        kube_object=tennet_pod14,
        tester_function=common_utils.test_kubernetes_object_not_available,
        timeout=20)
    danm_utils.check_dep_count(tennet_pod12["namespace"], exp_count=0)
def step3_check_static_ip_shortage():
    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod3",
                              release_name="tenantnetwork-attach-pod3")
    common_utils.test_kubernetes_object_quality(
        kube_object=tennet_pod3,
        expected_result=tennet_pod3['obj_count'],
        filter=r'(ContainerCreating)\s*[0]',
        timeout=30)
    common_utils.helm_delete("tenantnetwork-attach-pod2")
    common_utils.check_kubernetes_object(
        kube_object=tennet_pod2,
        tester_function=common_utils.test_kubernetes_object_not_available,
        timeout=60)
    common_utils.helm_delete("tenantnetwork-attach-pod1")
def step4_check_attach_in_kubesystem_namespace():
    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod4",
                              release_name="tenantnetwork-attach-pod4")
    common_utils.test_kubernetes_object_quality(
        kube_object=tennet_pod4,
        expected_result=tennet_pod4['obj_count'],
        filter=r'(Running)\s*[0]',
        timeout=60,
        delay=10)
    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_02",
                                           tenantnetwork_attach_properties,
                                           'tenantnetwork')
    danm_utils.check_dynamic_ips(alloc_pool, tennet_pod4['ip_list'])
    common_utils.helm_delete(release_name="tenantnetwork-attach-pod4")
def step3():
    # Danmnet_pods3 pods are not running because static ips are already allocated
    common_utils.test_kubernetes_object_quality(
        kube_object=danmnet_pods3,
        expected_result=danmnet_pods3['obj_count'],
        filter=r'(ContainerCreating)\s*[0]',
        timeout=90)
    # Delete danmnet_pods1, danmnet_pods2
    common_utils.helm_delete("danmnet-pods2")
    common_utils.check_kubernetes_object(
        kube_object=danmnet_pods2,
        tester_function=common_utils.test_kubernetes_object_not_available,
        timeout=90)
    common_utils.helm_delete("danmnet-pods1")
def step1_check_default_pool_cpu_node_capacity():
    node_cpu_capacity = get_node_cpu_capacity(nodename)
    cpu_request = "{0}m".format(node_cpu_capacity)
    try:
        common_utils.helm_install(
            chart_name="default/cpu-pooling-default1",
            release_name="cpu-pooling",
            values=
            "registry_url={reg_url},nodename={node_name},cpu_request={cpu},cpu_limit={cpu}"
            .format(reg_url=reg, node_name=nodename, cpu=cpu_request))
        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_pod7,
            expected_result="1",
            filter=r'(Running)\s*[0]',
            timeout=90)
        logger.info(
            "Default pool allocation successfull with maximum allocatable cpus!"
        )
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod7,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=60)

        cpu_request = "{0}m".format(node_cpu_capacity + 10)
        common_utils.helm_install(
            chart_name="default/cpu-pooling-default1",
            release_name="cpu-pooling",
            values=
            "registry_url={reg_url},nodename={node_name},cpu_request={cpu},cpu_limit={cpu}"
            .format(reg_url=reg, node_name=nodename, cpu=cpu_request))
        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_pod7,
            expected_result="1",
            filter=r'(Pending)\s*[0]',
            timeout=90,
            delay=3)
        logger.info(
            "Default pool allocation failed with more cpu than allocatable as expected!"
        )
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod7,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=60)
def step9_1_exclusive_1_shared():
    try:
        common_utils.helm_install(
            chart_name="default/cpu-pooling-mix1",
            release_name="cpu-pooling",
            values="registry_url={reg_url},nodename={node_name}".format(
                reg_url=reg, node_name=nodename))
        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_pod5,
            expected_result="1",
            filter=r'(Running)\s*[0]',
            timeout=90)
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod5,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=90)
def step2_with_annotation():
    try:
        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling",
                                  values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
                                                                                              node_name=nodename))
        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2,
                                                    expected_result="1",
                                                    filter=r'(Running)\s*[0]',
                                                    timeout=90)

        allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod2['obj_name'])
        exclusive_cpus = cpupools[nodename]['exclusive_caas']
        if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, exclusive_cpus):
            raise Exception('{pod} not allocate CPU from exclusive pool!'.format(pod=cpu_pooling_pod2['obj_name']))
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
                                             tester_function=common_utils.test_kubernetes_object_not_available,
                                             timeout=90)
def step_7_allocate_all_exclusive_and_new_one_start_running_after_needed_resource_is_freed_up():
    max_exclusive_pool_size = len(cpupools[nodename]['exclusive_caas'])
    try:
        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling1",
                                  values="registry_url={reg_url},nodename={node_name},proc_req={cpus},pool_req={cpus}"
                                  .format(reg_url=reg, cpus=max_exclusive_pool_size, node_name=nodename))
        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2,
                                                    expected_result="1",
                                                    filter=r'(Running)\s*[0]',
                                                    timeout=90)
        logger.info("Allocation of all exclusive CPU successfull!")

        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive1", release_name="cpu-pooling2",
                                  values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
                                                                                              node_name=nodename))
        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1,
                                                    expected_result="1",
                                                    filter=r'(Pending)\s*[0]',
                                                    timeout=90,
                                                    delay=3)
        logger.info("Try to allocate more exclusive CPU -> Pod in Pending!")
        common_utils.helm_delete("cpu-pooling1")
        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
                                             tester_function=common_utils.test_kubernetes_object_not_available,
                                             timeout=90)
        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1,
                                                    expected_result="1",
                                                    filter=r'(Running)\s*[0]',
                                                    timeout=90)

    finally:
        if common_utils.helm_list("cpu-pooling1") != "0":
            common_utils.helm_delete("cpu-pooling1")
        common_utils.helm_delete("cpu-pooling2")
        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod1,
                                             tester_function=common_utils.test_kubernetes_object_not_available,
                                             timeout=90)
        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
                                             tester_function=common_utils.test_kubernetes_object_not_available,
                                             timeout=90)
def step12_none_ip_pod_restart_loop():
    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod10",
                              release_name="tenantnetwork-attach-pod10")
    common_utils.test_kubernetes_object_quality(
        kube_object=tennet_pod10,
        expected_result=tennet_pod10['obj_count'],
        filter=r'(ContainerCreating)\s*[0]',
        timeout=90)
    common_utils.helm_delete("tenantnetwork-attach-pod3")
    common_utils.helm_delete("tenantnetwork-attach-pod5")
    common_utils.helm_delete("tenantnetwork-attach-pod6")
    common_utils.helm_delete("tenantnetwork-attach-pod7")
    common_utils.helm_delete("tenantnetwork-attach-pod8")
    common_utils.helm_delete("tenantnetwork-attach-pod9")
    common_utils.helm_delete("tenantnetwork-attach-pod10")
def step12():
    # None ip, pod is restarting
    common_utils.test_kubernetes_object_quality(
        kube_object=danmnet_pods10,
        expected_result=danmnet_pods10['obj_count'],
        filter=r'(ContainerCreating)\s*[0]',
        timeout=90)
    common_utils.helm_delete("danmnet-pods3")
    common_utils.helm_delete("danmnet-pods4")
    common_utils.helm_delete("danmnet-pods5")
    common_utils.helm_delete("danmnet-pods6")
    common_utils.helm_delete("danmnet-pods7")
    common_utils.helm_delete("danmnet-pods8")
    common_utils.helm_delete("danmnet-pods9")
    common_utils.helm_delete("danmnet-pods10")
    common_utils.check_kubernetes_object(
        kube_object=danmnet_pods_all,
        tester_function=common_utils.test_kubernetes_object_not_available,
        timeout=20)
    check_dep_count(danmnet_pods1["namespace"], exp_count=0)
def Teardown():
    common_utils.helm_delete("danmnet-pods12")
    common_utils.helm_delete("danmnet-pods14")
    danm_utils.delete_resources_by_manifest_path(
        "/tmp/network-attach-test/templates/")
def step10_cpu_allowed_list_set_after_test_pod_deployed():
    cpu_setter_deleted = False
    try:
        cpu_pooling_setter["obj_count"] = ex.execute_unix_command(
            "kubectl get pod --all-namespaces | "
            "grep setter | wc -l")
        ex.execute_unix_command(
            "kubectl get ds -n kube-system cpu-setter -o yaml")
        ex.execute_unix_command(
            "kubectl get ds -n kube-system cpu-setter -o yaml > setter.yaml")
        ex.execute_unix_command("kubectl delete ds -n kube-system cpu-setter")

        cpu_setter_deleted = True

        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_setter,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=90)

        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive1",
                                  release_name="cpu-pooling",
                                  values="registry_url=" + reg + ",nodename=" +
                                  nodename)
        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_pod1,
            expected_result="1",
            filter=r'(Running)\s*[0]',
            timeout=90)

        allowed_cpus_for_pod_before = common_utils.get_cpu_allowed_list_from_pod(
            cpu_pooling_pod1['obj_name'])

        ex.execute_unix_command("kubectl create -f setter.yaml")

        common_utils.test_kubernetes_object_quality(
            kube_object=cpu_pooling_setter,
            expected_result=cpu_pooling_setter["obj_count"],
            filter=r'(Running)\s*[0]',
            timeout=90)
        cpu_setter_deleted = False
        allowed_cpus_for_pod_after = common_utils.get_cpu_allowed_list_from_pod(
            cpu_pooling_pod1['obj_name'])
        exclusive_cpus = cpupools[nodename]['exclusive_caas']
        if not common_utils.allowed_cpus_is_in_cpu_pool(
                allowed_cpus_for_pod_after, exclusive_cpus):
            raise Exception(
                '{pod} not allocate CPU from exclusive pool!'.format(
                    pod=cpu_pooling_pod1['obj_name']))
        if set(allowed_cpus_for_pod_before) == set(allowed_cpus_for_pod_after):
            raise Exception(
                'Allocated CPUs before setter deployed is equal with CPU set after deploy!'
            )
    finally:
        common_utils.helm_delete("cpu-pooling")
        common_utils.check_kubernetes_object(
            kube_object=cpu_pooling_pod1,
            tester_function=common_utils.test_kubernetes_object_not_available,
            timeout=90)
        setter_count = ex.execute_unix_command(
            "kubectl get pod --all-namespaces | grep setter | wc -l")
        if cpu_setter_deleted:
            if setter_count != "0":
                search_cmd = "kubectl get pod -n kube-system |grep setter | awk '{print $1}'"
                del_cmd = "kubectl -n kube-system delete pod --grace-period=0 --force --wait=false"

                ex.execute_unix_command(
                    "for i in `{search}`; do {delete} $i; done".format(
                        search=search_cmd, delete=del_cmd))
                common_utils.check_kubernetes_object(
                    kube_object=cpu_pooling_setter,
                    tester_function=common_utils.
                    test_kubernetes_object_not_available,
                    timeout=90)
            ex.execute_unix_command("kubectl create -f setter.yaml")

            common_utils.test_kubernetes_object_quality(
                kube_object=cpu_pooling_setter,
                expected_result=cpu_pooling_setter["obj_count"],
                filter=r'(Running)\s*[0]',
                timeout=90)