def step2_exclusive_and_shared(): try: common_utils.helm_install( chart_name="default/cpu-pooling-mix2", release_name="cpu-pooling", values="registry_url={reg_url}".format(reg_url=reg)) common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_pod6, expected_result="1", filter=r'(Running)\s*[0]', timeout=90) allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod( cpu_pooling_pod6['obj_name']) requested_cpupool = cpupools[nodename]['exclusive_caas'] + cpupools[ nodename]['shared_caas'] if not common_utils.allowed_cpus_is_in_cpu_pool( allowed_cpu_for_pod, requested_cpupool): raise Exception( '{pod} not allocate CPUs from {req_pool} pool!'.format( pod=cpu_pooling_pod6['obj_name'], req_pool=requested_cpupool)) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod6, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90)
def step8_pod_use_default_pool_burstable(): memory_request = "500Mi" cpu_request = "250m" try: common_utils.helm_install( chart_name="default/cpu-pooling-default1", release_name="cpu-pooling", values= "registry_url={reg_url},nodename={node_name},mem_request={mem}," "cpu_request={cpu}".format(reg_url=reg, node_name=nodename, mem=memory_request, cpu=cpu_request)) common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_pod7, expected_result="1", filter=r'(Running)\s*[0]', timeout=90) allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod( cpu_pooling_pod7['obj_name']) default_pool = cpupools[nodename]['default'] if not common_utils.allowed_cpus_is_in_cpu_pool( allowed_cpu_for_pod, default_pool): raise Exception('{pod} not allocate CPU from default pool!'.format( pod=cpu_pooling_pod7['obj_name'])) check_qos_of_pod(cpu_pooling_pod7['obj_name'], "Burstable") finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod7, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60)
def step6_more_cpu_annotation_than_request(): annotation_cpu = 2 request_cpu = 1 cpu_pooling_pod2['obj_type'] = 'replicaset' try: common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling", values="registry_url={url},nodename={node_name},proc_req={proc},pool_req={req}" .format(url=reg, proc=annotation_cpu, req=request_cpu, node_name=nodename)) common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2, tester_function=common_utils.test_kubernetes_object_available, timeout=10, delay=3) result = ex.execute_unix_command('kubectl describe replicaset {0}'.format(cpu_pooling_pod2['obj_name'])) error = 'Exclusive CPU requests {req} do not match to annotation {proc}'.format(req=request_cpu, proc=annotation_cpu) if error not in result: raise Exception('Replicaset description does not contain expected error! -' + result) else: logger.info(error) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) cpu_pooling_pod2['obj_type'] = 'pod'
def step6(): common_utils.check_kubernetes_object( kube_object=danmnet_pods4, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) check_danmnet_endpoints(danmnet_pods4, 'test-net2', danmnet_pods4['ip_list'])
def step5_annotation_without_cpus(): try: common_utils.helm_install( chart_name="default/cpu-pooling-annotation3", release_name="cpu-pooling", values="registry_url={reg_url}".format(reg_url=reg)) common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod11, tester_function=common_utils.test_kubernetes_object_available, timeout=30, delay=3) result = ex.execute_unix_command( 'kubectl describe replicasets {0}'.format( cpu_pooling_pod11['obj_name'])) error = "'cpus' field is mandatory in annotation" if error not in result: raise Exception( 'Replicaset description does not contain expected error! -' + result) else: logger.info(error) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod11, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60)
def step6_request_for_default_pool(): try: common_utils.helm_install( chart_name="default/cpu-pooling-default2", release_name="cpu-pooling", values="registry_url={reg_url}".format(reg_url=reg)) common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_pod8, expected_result="1", filter=r'(Pending)\s*[0]', timeout=30, delay=3) error = "Insufficient nokia.k8s.io/default" result = ex.execute_unix_command( 'kubectl describe pod {podname}'.format( podname=cpu_pooling_pod8['obj_name'])) if error not in result: raise Exception( 'Replicaset description does not contain expected error! -' + result) else: logger.info(error) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod8, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60)
def step3_more_replicas_than_cpus(): num_of_replicas = len(cpupools[nodename]['exclusive_caas']) try: common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling", values="registry_url={reg_url},nodename={node_name},replicas={cpus}" .format(reg_url=reg, cpus=num_of_replicas+1, node_name=nodename)) cpu_pooling_pod2['obj_count'] = str(num_of_replicas) common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2, expected_result="1", filter=r'(Pending)\s*[0]', timeout=90, delay=3) result = ex.execute_unix_command('kubectl describe pod {podname}'.format(podname=cpu_pooling_pod2['obj_name'])) error = 'Insufficient nokia.k8s.io/exclusive_caas' if error not in result: raise Exception('Replicaset description does not contain expected error! -' + result) else: logger.info(error) finally: cpu_pooling_pod2['obj_count'] = "1" common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90)
def step14(): # Static ip, dynamic ip allocation and none ip in the same pod # Check if the same ips can be allocated, which were failing in step 13 install_chart(danmnet_pods12) install_chart(danmnet_pods14) common_utils.test_kubernetes_object_quality( kube_object=danmnet_pods12, expected_result=danmnet_pods12['obj_count'], filter=r'(Running)\s*[0]', timeout=90) pod_list = get_pod_list(danmnet_pods12) alloc_pool = get_alloc_pool('cnet_pod6', network_attach_properties, 'clusternetwork') danmnet_pods12['ip_list'] = get_pod_ips(pod_list, if_name='eth1') check_dynamic_ips(alloc_pool, danmnet_pods12['ip_list']) danmnet_pods12['ip_list'] = get_pod_ips(pod_list, if_name='eth0') if IPAddress(danmnet_pods12['ip_list'][0]) != IPAddress('10.10.0.250'): raise Exception("static ip in pod danmnet-pods12 is not as expected") common_utils.test_kubernetes_object_quality( kube_object=danmnet_pods14, expected_result=danmnet_pods14['obj_count'], filter=r'(Running)\s*[0]', timeout=90) pod_list = get_pod_list(danmnet_pods14) danmnet_pods14['ip_list'] = get_pod_ips(pod_list, if_name='eth2') if IPAddress(danmnet_pods14['ip_list'][0]) != IPAddress('10.10.0.254'): raise Exception("static ip in pod danmnet-pods14 is not as expected") common_utils.helm_delete("danmnet-pods12") common_utils.helm_delete("danmnet-pods14") common_utils.check_kubernetes_object( kube_object=danmnet_pods14, tester_function=common_utils.test_kubernetes_object_not_available, timeout=20) check_dep_count(danmnet_pods12["namespace"], exp_count=0)
def step1_with_two_process(): try: common_utils.helm_install(chart_name="default/cpu-pooling-exclusive3", release_name="cpu-pooling", values="registry_url=" + reg + ",nodename=" + nodename) common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_pod3, expected_result="1", filter=r'(Running)\s*[0]', timeout=10) exclusive_cpus = cpupools[nodename]['exclusive_caas'] proc1_cpu, proc2_cpu = get_cpu_core_of_processes( cpu_pooling_pod3['obj_name'], "dumb-init -c sleep 1000") if proc1_cpu not in exclusive_cpus: raise Exception( '{pod}: Proc1 running on non exclusive cpu core {cpu}!'.format( pod=cpu_pooling_pod3['obj_name'], cpu=proc1_cpu)) if proc2_cpu not in exclusive_cpus: raise Exception( '{pod}: Proc2 running on non exclusive cpu core {cpu}!'.format( pod=cpu_pooling_pod3['obj_name'], cpu=proc2_cpu)) if proc1_cpu == proc2_cpu: raise Exception( '{pod}: Two processes use same cpu core: {cpu}!'.format( pod=cpu_pooling_pod3['obj_name'], cpu=proc2_cpu)) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod3, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60)
def step6_check_step4_deletion_success(): common_utils.check_kubernetes_object( kube_object=tennet_pod4, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60) danm_utils.check_danmnet_endpoints_deleted( tennet_pod4, 'tennet_attach_02', tenantnetwork_attach_properties, tennet_pod4['ip_list'])
def step2_check_pv_retaining(): common_utils.helm_delete("storage-test") common_utils.check_kubernetes_object( kube_object=pv_test_pod, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) _install_storage_test_helm_chart() pabot.release_lock("pv_test_ip")
def setup(): # flags = ["--horizontal-pod-autoscaler-downscale-stabilization=10s", "--horizontal-pod-autoscaler-sync-period=10s"] # common_utils.modify_static_pod_config(common_utils.add_flag_to_command, "cm.yml", flags) common_utils.helm_install( chart_name="default/custom-metrics", release_name="podinfo", values="registry_url={reg_url}".format(reg_url=reg)) common_utils.check_kubernetes_object( kube_object=podinfo_pod, tester_function=common_utils.test_kubernetes_object_available, additional_filter="Running", timeout=90)
def step14_check_realloc_ips_of_prev_step_with_dynamic_and_none_ip_alloc(): common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod12", release_name="tenantnetwork-attach-pod12") common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod14", release_name="tenantnetwork-attach-pod14") common_utils.test_kubernetes_object_quality( kube_object=tennet_pod12, expected_result=tennet_pod12['obj_count'], filter=r'(Running)\s*[0]', timeout=90) pod_list = danm_utils.get_pod_list(tennet_pod12) alloc_pool = danm_utils.get_alloc_pool("tennet_attach_01", tenantnetwork_attach_properties, "tenantnetwork") danm_utils.check_dynamic_ips( alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet_1')) alloc_pool = danm_utils.get_alloc_pool("tennet_attach_05", tenantnetwork_attach_properties, "tenantnetwork") danm_utils.check_dynamic_ips( alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='eth0')) common_utils.test_kubernetes_object_quality( kube_object=tennet_pod14, expected_result=tennet_pod14['obj_count'], filter=r'(Running)\s*[0]', timeout=90) pod_list = danm_utils.get_pod_list(tennet_pod14) # danm_utils.check_dynamic_ips(alloc_pool, [tennet_pod14['ip_list'][2]]) danm_utils.check_dynamic_ips( alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet5')) alloc_pool = danm_utils.get_alloc_pool("tennet_attach_06", tenantnetwork_attach_properties, "tenantnetwork") danm_utils.check_dynamic_ips( alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='eth0')) alloc_pool = danm_utils.get_alloc_pool("tennet_attach_01", tenantnetwork_attach_properties, "tenantnetwork") danm_utils.check_dynamic_ips( alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet_2')) common_utils.helm_delete("tenantnetwork-attach-pod12") common_utils.helm_delete("tenantnetwork-attach-pod14") common_utils.check_kubernetes_object( kube_object=tennet_pod12, tester_function=common_utils.test_kubernetes_object_not_available, timeout=20) common_utils.check_kubernetes_object( kube_object=tennet_pod14, tester_function=common_utils.test_kubernetes_object_not_available, timeout=20) danm_utils.check_dep_count(tennet_pod12["namespace"], exp_count=0)
def step2_check_scale_out_custom(): common_utils.helm_install( chart_name="default/http-traffic-gen", release_name="http-traffic-gen", values="registry_url={reg_url}".format(reg_url=reg)) common_utils.check_kubernetes_object( kube_object=http_traffic_gen, tester_function=common_utils.test_kubernetes_object_available, additional_filter="Running", timeout=45) expected_replicas = 3 timeout = 1000 check_scaling(expected_replicas, timeout)
def _install_storage_test_helm_chart(): if stack_infos.get_worker_nodes(): common_utils.helm_install(chart_name="default/storage-test-worker", release_name="storage-test") else: common_utils.helm_install(chart_name="default/storage-test-oam", release_name="storage-test") common_utils.wait_if_pressure() common_utils.check_kubernetes_object( kube_object=pv_test_pod, tester_function=common_utils.test_kubernetes_object_available, additional_filter="Running", timeout=60)
def install_charts(): common_utils.helm_install(chart_name="default/persistentvolume-claim", release_name="pvc") common_utils.wait_if_pressure() common_utils.check_kubernetes_object( kube_object=pv_test_pvc, tester_function=common_utils.test_kubernetes_object_available, additional_filter="Bound", timeout=90) _install_storage_test_helm_chart() global pv_name # pylint: disable=global-statement pv_name = execute.execute_unix_command( "kubectl get pvc | grep pvc- | awk {'print$3'}")
def step3_check_static_ip_shortage(): common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod3", release_name="tenantnetwork-attach-pod3") common_utils.test_kubernetes_object_quality( kube_object=tennet_pod3, expected_result=tennet_pod3['obj_count'], filter=r'(ContainerCreating)\s*[0]', timeout=30) common_utils.helm_delete("tenantnetwork-attach-pod2") common_utils.check_kubernetes_object( kube_object=tennet_pod2, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60) common_utils.helm_delete("tenantnetwork-attach-pod1")
def step3(): # Danmnet_pods3 pods are not running because static ips are already allocated common_utils.test_kubernetes_object_quality( kube_object=danmnet_pods3, expected_result=danmnet_pods3['obj_count'], filter=r'(ContainerCreating)\s*[0]', timeout=90) # Delete danmnet_pods1, danmnet_pods2 common_utils.helm_delete("danmnet-pods2") common_utils.check_kubernetes_object( kube_object=danmnet_pods2, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) common_utils.helm_delete("danmnet-pods1")
def setup(): common_utils.helm_install(chart_name="default/php-apache", release_name="crf01", values="registry_url={reg_url}".format(reg_url=reg)) common_utils.check_kubernetes_object(kube_object=php_apache_pod, tester_function=common_utils.test_kubernetes_object_available, additional_filter="Running", timeout=90) flags = ["--horizontal-pod-autoscaler-downscale-stabilization=10s", "--horizontal-pod-autoscaler-sync-period=10s"] common_utils.modify_static_pod_config(common_utils.add_flag_to_command, "cm.yml", flags) common_utils.helm_install(chart_name="default/load-generator-for-apache", release_name="load") common_utils.check_kubernetes_object(kube_object=load_generator_for_apache, tester_function=common_utils.test_kubernetes_object_available, additional_filter="Running", timeout=60)
def step1_check_default_pool_cpu_node_capacity(): node_cpu_capacity = get_node_cpu_capacity(nodename) cpu_request = "{0}m".format(node_cpu_capacity) try: common_utils.helm_install( chart_name="default/cpu-pooling-default1", release_name="cpu-pooling", values= "registry_url={reg_url},nodename={node_name},cpu_request={cpu},cpu_limit={cpu}" .format(reg_url=reg, node_name=nodename, cpu=cpu_request)) common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_pod7, expected_result="1", filter=r'(Running)\s*[0]', timeout=90) logger.info( "Default pool allocation successfull with maximum allocatable cpus!" ) common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod7, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60) cpu_request = "{0}m".format(node_cpu_capacity + 10) common_utils.helm_install( chart_name="default/cpu-pooling-default1", release_name="cpu-pooling", values= "registry_url={reg_url},nodename={node_name},cpu_request={cpu},cpu_limit={cpu}" .format(reg_url=reg, node_name=nodename, cpu=cpu_request)) common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_pod7, expected_result="1", filter=r'(Pending)\s*[0]', timeout=90, delay=3) logger.info( "Default pool allocation failed with more cpu than allocatable as expected!" ) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod7, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60)
def step9_1_exclusive_1_shared(): try: common_utils.helm_install( chart_name="default/cpu-pooling-mix1", release_name="cpu-pooling", values="registry_url={reg_url},nodename={node_name}".format( reg_url=reg, node_name=nodename)) common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_pod5, expected_result="1", filter=r'(Running)\s*[0]', timeout=90) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod5, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90)
def step2_with_annotation(): try: common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling", values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg, node_name=nodename)) common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2, expected_result="1", filter=r'(Running)\s*[0]', timeout=90) allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod2['obj_name']) exclusive_cpus = cpupools[nodename]['exclusive_caas'] if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, exclusive_cpus): raise Exception('{pod} not allocate CPU from exclusive pool!'.format(pod=cpu_pooling_pod2['obj_name'])) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90)
def step12(): # None ip, pod is restarting common_utils.test_kubernetes_object_quality( kube_object=danmnet_pods10, expected_result=danmnet_pods10['obj_count'], filter=r'(ContainerCreating)\s*[0]', timeout=90) common_utils.helm_delete("danmnet-pods3") common_utils.helm_delete("danmnet-pods4") common_utils.helm_delete("danmnet-pods5") common_utils.helm_delete("danmnet-pods6") common_utils.helm_delete("danmnet-pods7") common_utils.helm_delete("danmnet-pods8") common_utils.helm_delete("danmnet-pods9") common_utils.helm_delete("danmnet-pods10") common_utils.check_kubernetes_object( kube_object=danmnet_pods_all, tester_function=common_utils.test_kubernetes_object_not_available, timeout=20) check_dep_count(danmnet_pods1["namespace"], exp_count=0)
def step5_check_static_ip_alloc_static_routes_success_after_purge(): common_utils.check_kubernetes_object( kube_object=tennet_pod1, tester_function=common_utils.test_kubernetes_object_not_available, timeout=60) common_utils.test_kubernetes_object_quality( kube_object=tennet_pod3, expected_result=tennet_pod3['obj_count'], filter=r'(Running)\s*[0]', timeout=60) pod_list = danm_utils.get_pod_list(tennet_pod3) if set(tennet_pod3['ip_list']) != set(danm_utils.get_pod_ips(pod_list)): raise Exception( "Static ip allocation for tenantnetwork-attach-pod3 was unsuccessful!" ) logger.info("Static ips allocated successfully!") danm_utils.check_static_routes(pod_list, 'tennet_attach_01', tenantnetwork_attach_properties) danm_utils.check_connectivity(pod_list, list(pod_list)[0], tennet_pod3['ip_list']) danm_utils.check_connectivity(pod_list, list(pod_list)[3], tennet_pod3['ip_list'])
def step5(): # Check danmnet_pods1, danmnet_pods2 are purged, ips are reallocated for danmnet_pods3 common_utils.check_kubernetes_object( kube_object=danmnet_pods1, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) common_utils.test_kubernetes_object_quality( kube_object=danmnet_pods3, expected_result=danmnet_pods3['obj_count'], filter=r'(Running)\s*[0]', timeout=60) pod_list = get_pod_list(danmnet_pods3) assigned_ips = get_pod_ips(pod_list, skip_restarts=True) if set(assigned_ips) != set(static_ips): raise Exception( "Static ip allocation for danmnet-pods3 was not successful!") logger.info("Static ip allocation for danmnet-pods3 was successful") check_static_routes(pod_list, 'cnet_pod1') actual_pod = list(pod_list)[0] check_connectivity(pod_list, actual_pod, static_ips) actual_pod = list(pod_list)[3] check_connectivity(pod_list, actual_pod, static_ips)
def step_7_allocate_all_exclusive_and_new_one_start_running_after_needed_resource_is_freed_up(): max_exclusive_pool_size = len(cpupools[nodename]['exclusive_caas']) try: common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling1", values="registry_url={reg_url},nodename={node_name},proc_req={cpus},pool_req={cpus}" .format(reg_url=reg, cpus=max_exclusive_pool_size, node_name=nodename)) common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2, expected_result="1", filter=r'(Running)\s*[0]', timeout=90) logger.info("Allocation of all exclusive CPU successfull!") common_utils.helm_install(chart_name="default/cpu-pooling-exclusive1", release_name="cpu-pooling2", values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg, node_name=nodename)) common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1, expected_result="1", filter=r'(Pending)\s*[0]', timeout=90, delay=3) logger.info("Try to allocate more exclusive CPU -> Pod in Pending!") common_utils.helm_delete("cpu-pooling1") common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1, expected_result="1", filter=r'(Running)\s*[0]', timeout=90) finally: if common_utils.helm_list("cpu-pooling1") != "0": common_utils.helm_delete("cpu-pooling1") common_utils.helm_delete("cpu-pooling2") common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod1, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90)
def step13_check_invalid_net_attach_and_successful_damnep_ip_release_after_retries( ): tnet1_alloc_before = danm_utils.get_alloc_value( 'tennet_attach_01', tenantnetwork_attach_properties, "tenantnetwork") tnet5_alloc_before = danm_utils.get_alloc_value( 'tennet_attach_05', tenantnetwork_attach_properties, "tenantnetwork") tnet6_alloc_before = danm_utils.get_alloc_value( 'tennet_attach_06', tenantnetwork_attach_properties, "tenantnetwork") common_utils.get_helm_chart_content("default/tenantnetwork-attach-pod11") common_utils.get_helm_chart_content("default/tenantnetwork-attach-pod13") execute.execute_unix_command( "sed -i 's/{{ .Values.registry_url }}/" + reg + "/g' " + "/tmp/tenantnetwork-attach-pod11/templates/tennet_pod_11.yaml") execute.execute_unix_command( "sed -i 's/{{ .Values.registry_url }}/" + reg + "/g' " + "/tmp/tenantnetwork-attach-pod13/templates/tennet_pod_13.yaml") for _ in range(0, 10): danm_utils.create_resources_from_fetched_chart_templates( "/tmp/tenantnetwork-attach-pod11/templates") danm_utils.create_resources_from_fetched_chart_templates( "/tmp/tenantnetwork-attach-pod13/templates") common_utils.test_kubernetes_object_quality( kube_object=tennet_pod11, expected_result=tennet_pod11['obj_count'], filter=r'(ContainerCreating)\s*[0]', timeout=40) common_utils.test_kubernetes_object_quality( kube_object=tennet_pod13, expected_result=tennet_pod13['obj_count'], filter=r'(ContainerCreating)\s*[0]', timeout=40) danm_utils.delete_resources_by_manifest_path( "/tmp/tenantnetwork-attach-pod11/templates") danm_utils.delete_resources_by_manifest_path( "/tmp/tenantnetwork-attach-pod13/templates") common_utils.check_kubernetes_object( kube_object=tennet_pod11, tester_function=common_utils.test_kubernetes_object_not_available, timeout=40) common_utils.check_kubernetes_object( kube_object=tennet_pod13, tester_function=common_utils.test_kubernetes_object_not_available, timeout=40) tnet1_alloc_after = danm_utils.get_alloc_value( 'tennet_attach_01', tenantnetwork_attach_properties, 'tenantnetwork') tnet5_alloc_after = danm_utils.get_alloc_value( 'tennet_attach_05', tenantnetwork_attach_properties, 'tenantnetwork') tnet6_alloc_after = danm_utils.get_alloc_value( 'tennet_attach_06', tenantnetwork_attach_properties, 'tenantnetwork') if tnet1_alloc_before != tnet1_alloc_after: raise Exception( "allocation value in tennet_attach_01 is not as expected") if tnet5_alloc_before != tnet5_alloc_after: raise Exception( "allocation value in tennet_attach_05 is not as expected") if tnet6_alloc_before != tnet6_alloc_after: raise Exception( "allocation value in tennet_attach_06 is not as expected") danm_utils.check_dep_count('default', exp_count=0)
def step13(): # danmnet_pods11, danmnet_pods13 has invalid networks attached hance the pod creation will fail, # checking if danmnet endpoints, ips are cleared after several unsuccessful pod creations alloc_before_cnet_pod5 = get_alloc_value('cnet_pod5', network_attach_properties, 'clusternetwork') alloc_before_cnet_pod6 = get_alloc_value('cnet_pod6', network_attach_properties, 'clusternetwork') common_utils.get_helm_chart_content('default/' + danmnet_pods11['obj_name']) execute.execute_unix_command("sed -i 's/{{ .Values.registry_url }}/" + reg + "/g' " + "/tmp/" + danmnet_pods11['obj_name'] + "/templates/" + danmnet_pods11['obj_name'] + ".yaml") common_utils.get_helm_chart_content('default/' + danmnet_pods13['obj_name']) execute.execute_unix_command("sed -i 's/{{ .Values.registry_url }}/" + reg + "/g' " + "/tmp/" + danmnet_pods13['obj_name'] + "/templates/" + danmnet_pods13['obj_name'] + ".yaml") command = "ls -rt /var/lib/cni/networks/cbr0/ | wc -l" ip_count_before = execute.execute_unix_command_as_root(command) command = "ls -rt /var/lib/cni/networks/cbr0/" cbr0_content1 = execute.execute_unix_command_as_root(command) for _ in range(0, 10): # danmnet_pods11 creation fails command = "kubectl create -f /tmp/" + danmnet_pods11[ 'obj_name'] + "/templates" execute.execute_unix_command_as_root(command) # danmnet_pods13 creation fails command = "kubectl create -f /tmp/" + danmnet_pods13[ 'obj_name'] + "/templates" execute.execute_unix_command_as_root(command) common_utils.test_kubernetes_object_quality( kube_object=danmnet_pods11, expected_result=danmnet_pods11['obj_count'], filter=r'(ContainerCreating)\s*[0]', timeout=40) common_utils.test_kubernetes_object_quality( kube_object=danmnet_pods13, expected_result=danmnet_pods13['obj_count'], filter=r'(ContainerCreating)\s*[0]', timeout=40) command = "kubectl delete -f /tmp/" + danmnet_pods11[ 'obj_name'] + "/templates" execute.execute_unix_command_as_root(command) command = "kubectl delete -f /tmp/" + danmnet_pods13[ 'obj_name'] + "/templates" execute.execute_unix_command_as_root(command) common_utils.check_kubernetes_object( kube_object=danmnet_pods11, tester_function=common_utils.test_kubernetes_object_not_available, timeout=40) common_utils.check_kubernetes_object( kube_object=danmnet_pods13, tester_function=common_utils.test_kubernetes_object_not_available, timeout=40) check_danm_count(ip_count_before, cbr0_content1, 0) logger.info("All flannel ips are cleared") alloc_after_cnet_pod5 = get_alloc_value('cnet_pod5', network_attach_properties, 'clusternetwork') alloc_after_cnet_pod6 = get_alloc_value('cnet_pod6', network_attach_properties, 'clusternetwork') if alloc_after_cnet_pod6 != alloc_before_cnet_pod6: raise Exception("allocation value in cnet-pod6 is not as expected") if alloc_after_cnet_pod5 != alloc_before_cnet_pod5: raise Exception("allocation value in cnet-pod5 is not as expected") check_dep_count('default', exp_count=0)
def step10_cpu_allowed_list_set_after_test_pod_deployed(): cpu_setter_deleted = False try: cpu_pooling_setter["obj_count"] = ex.execute_unix_command( "kubectl get pod --all-namespaces | " "grep setter | wc -l") ex.execute_unix_command( "kubectl get ds -n kube-system cpu-setter -o yaml") ex.execute_unix_command( "kubectl get ds -n kube-system cpu-setter -o yaml > setter.yaml") ex.execute_unix_command("kubectl delete ds -n kube-system cpu-setter") cpu_setter_deleted = True common_utils.check_kubernetes_object( kube_object=cpu_pooling_setter, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) common_utils.helm_install(chart_name="default/cpu-pooling-exclusive1", release_name="cpu-pooling", values="registry_url=" + reg + ",nodename=" + nodename) common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_pod1, expected_result="1", filter=r'(Running)\s*[0]', timeout=90) allowed_cpus_for_pod_before = common_utils.get_cpu_allowed_list_from_pod( cpu_pooling_pod1['obj_name']) ex.execute_unix_command("kubectl create -f setter.yaml") common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_setter, expected_result=cpu_pooling_setter["obj_count"], filter=r'(Running)\s*[0]', timeout=90) cpu_setter_deleted = False allowed_cpus_for_pod_after = common_utils.get_cpu_allowed_list_from_pod( cpu_pooling_pod1['obj_name']) exclusive_cpus = cpupools[nodename]['exclusive_caas'] if not common_utils.allowed_cpus_is_in_cpu_pool( allowed_cpus_for_pod_after, exclusive_cpus): raise Exception( '{pod} not allocate CPU from exclusive pool!'.format( pod=cpu_pooling_pod1['obj_name'])) if set(allowed_cpus_for_pod_before) == set(allowed_cpus_for_pod_after): raise Exception( 'Allocated CPUs before setter deployed is equal with CPU set after deploy!' ) finally: common_utils.helm_delete("cpu-pooling") common_utils.check_kubernetes_object( kube_object=cpu_pooling_pod1, tester_function=common_utils.test_kubernetes_object_not_available, timeout=90) setter_count = ex.execute_unix_command( "kubectl get pod --all-namespaces | grep setter | wc -l") if cpu_setter_deleted: if setter_count != "0": search_cmd = "kubectl get pod -n kube-system |grep setter | awk '{print $1}'" del_cmd = "kubectl -n kube-system delete pod --grace-period=0 --force --wait=false" ex.execute_unix_command( "for i in `{search}`; do {delete} $i; done".format( search=search_cmd, delete=del_cmd)) common_utils.check_kubernetes_object( kube_object=cpu_pooling_setter, tester_function=common_utils. test_kubernetes_object_not_available, timeout=90) ex.execute_unix_command("kubectl create -f setter.yaml") common_utils.test_kubernetes_object_quality( kube_object=cpu_pooling_setter, expected_result=cpu_pooling_setter["obj_count"], filter=r'(Running)\s*[0]', timeout=90)