def test_get_deployment(): p = Pod() p.metadata_name = "test-pod-1" n = Node() l1 = Label("a:b") l1.key = "a" l1.value = "b" l2 = Label("c:d") l2.key = "c" l2.value = "b" n.metadata_labels.add(l1) n.metadata_labels.add(l2) d = Deployment() d.metadata_name = "dep-test1" d.podList.add(p) p.hasDeployment = True rs = ReplicaSet() rs.metadata_name = "rs-test1" rs.metadata_ownerReferences__name = "dep-test1" # typically, you can find correct replicaSet by ownerReferences # TODO: create utililty function to do that print(move_pod_with_deployment_script_simple(p, n, [d, n, p, rs]))
def test_stub_completion(): # create elementary cluster # render cluster # update with rendered cluster # add stub policy # run, get solution (empty?) k = KubernetesCluster() nodes = [] pods = [] node_item = Node() node_item.metadata_name = "node 1" node_item.cpuCapacity = 25 node_item.memCapacity = 25 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) # Service to detecte eviction s1 = Service() s1.metadata_name = "test-service" s1.amountOfActivePods = 0 s1.isSearched = True d = Deployment() d.spec_replicas = 6 d.NumberOfPodsOnSameNodeForDeployment = 4 pod = build_running_pod_with_d(1, 2, 2, node_item, d, None, s1, pods) k.state_objects.extend(nodes) k.state_objects.extend(pods) yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True) update(''.join(yamlState)) cluster = next( filter(lambda x: isinstance(x, GlobalVar), kalc_state_objects)) cluster.policy.stub = True run()
def prepare_test_single_node_dies_2pod_killed_service_outage(): # Initialize scheduler, globalvar k = KubernetesCluster() scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects)) globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) # initial node state n = Node() n.cpuCapacity = 5 n.memCapacity = 5 # Create running pods pod_running_1 = build_running_pod(1,2,2,n) pod_running_2 = build_running_pod(2,2,2,n) ## Set consumptoin as expected n.currentFormalCpuConsumption = 4 n.currentFormalMemConsumption = 4 n.amountOfActivePods = 2 # Service to detecte eviction s = Service() s.metadata_name = "test-service" s.amountOfActivePods = 2 s.status = STATUS_SERV["Started"] # our service has multiple pods but we are detecting pods pending issue # remove service as we are detecting service outage by a bug above pod_running_1.targetService = s pod_running_2.targetService = s pod_running_1.hasService = True pod_running_2.hasService = True k.state_objects.extend([n, pod_running_1, pod_running_2, s]) # print_objects(k.state_objects) return k, globalVar, n
def test_get_fullscript(): k = KubernetesCluster() p = Pod() p.status = STATUS_POD["Running"] p.metadata_name = "test-pod-1" p.cpuRequest = 2 p.memRequest = 2 n_orig = Node("orgi") n = Node() p.nodeSelectorList.add(n) n_orig.metadata_name = "ORIG" n_orig.currentFormalMemConsumption = 5 n_orig.currentFormalCpuConsumption = 5 n.status = STATUS_NODE["Active"] n.cpuCapacity = 10 n.memCapacity = 10 l1 = Label("a:b") l1.key = "a" l1.value = "b" l2 = Label("c:d") l2.key = "c" l2.value = "b" n.metadata_labels.add(l1) n.metadata_labels.add(l2) d = Deployment() d.metadata_name = "dep-test1" d.podList.add(p) p.hasDeployment = True p.atNode = n_orig rs = ReplicaSet() rs.metadata_name = "rs-test1" rs.metadata_ownerReferences__name = "dep-test1" # typically, you can find correct replicaSet by ownerReferences # TODO: create utililty function to do that k.state_objects.extend([d, n, p, rs]) prob = Balance_pods_and_drain_node(k.state_objects) s = k.scheduler g = k.globalvar prob.MoveRunningPodToAnotherNode(p, n_orig, n, s, g) assert len(prob.script)
def test_convert_node_problem(): # Initialize scheduler, globalvar k = KubernetesCluster() scheduler = next( filter(lambda x: isinstance(x, Scheduler), k.state_objects)) globalVar = next( filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) # initial node state n = Node() n.cpuCapacity = 5 n.memCapacity = 5 # Create running pods pod_running_1 = build_running_pod(1, 2, 2, n) pod_running_2 = build_running_pod(2, 2, 2, n) ## Set consumptoin as expected n.currentFormalCpuConsumption = 4 n.currentFormalMemConsumption = 4 n.amountOfActivePods = 2 pc = PriorityClass() pc.priority = 10 pc.metadata_name = "high-prio-test" # Service to detecte eviction s = Service() s.metadata_name = "test-service" s.amountOfActivePods = 2 s.status = STATUS_SERV["Started"] # our service has multiple pods but we are detecting pods pending issue # remove service as we are detecting service outage by a bug above pod_running_1.targetService = s pod_running_2.targetService = s pod_running_1.hasService = True pod_running_2.hasService = True pod_running_1.priorityClass = pc pod_running_2.priorityClass = pc d = Deployment() d.spec_replicas = 2 d.amountOfActivePods = 2 pod_running_1.hasDeployment = True pod_running_2.hasDeployment = True d.podList.add(pod_running_1) d.podList.add(pod_running_2) k.state_objects.extend([n, pod_running_1, pod_running_2, s, d, pc]) k2 = KubernetesCluster() for y in convert_space_to_yaml(k.state_objects, wrap_items=True): # print(y) k2.load(y) k2._build_state() # TODO: test node outage exclusion
def prepare_affinity_test_8_pods_on_3_nodes_with_6_antiaffinity_pods(): # Initialize scheduler, globalvar k = KubernetesCluster() scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects)) globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) globalVar.block_policy_calculated = False # initial node state i = 0 j = 0 nodes = [] pods = [] services = [] deployments = [] # Service to detecte eviction s1 = Service() s1.metadata_name = "test-service" s1.amountOfActivePods = 0 s1.isSearched = True services.append(s1) s2 = Service() s2.metadata_name = "test-service2" s2.amountOfActivePods = 0 services.append(s2) # create Deploymnent that we're going to detect failure of... d = Deployment() d.spec_replicas = 6 d.NumberOfPodsOnSameNodeForDeployment = 4 deployments.append(d) d2 = Deployment() d2.spec_replicas = 2 d2.NumberOfPodsOnSameNodeForDeployment = 2 deployments.append(d2) node_item = Node() node_item.metadata_name = "node 1" node_item.cpuCapacity = 25 node_item.memCapacity = 25 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(0,2,2,node_item,d,None,s1,pods) pod = build_running_pod_with_d(1,2,2,node_item,d,None,s1,pods) pod = build_running_pod_with_d(2,2,2,node_item,d,None,None,pods) pod = build_running_pod_with_d(3,2,2,node_item,None,None,None,pods) pod = build_running_pod_with_d(4,2,2,node_item,None,None,s1,pods) pod = build_running_pod_with_d(5,2,2,node_item,None,None,s1,pods) node_item = Node() node_item.metadata_name = "node 2" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(12,2,2,node_item,d2,None,s1,pods) pod = build_running_pod_with_d(13,2,2,node_item,d2,None,s1,pods) node_item = Node() node_item.metadata_name = "node 3" node_item.cpuCapacity = 4 node_item.memCapacity = 4 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(16,2,2,node_item,None,None,None,pods) node_item = Node() node_item.metadata_name = "node 4" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) node_item = Node() node_item.metadata_name = "node 5" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) node_item = Node() node_item.metadata_name = "node 6" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) for node in nodes: globalVar.amountOfNodes += 1 for pod in pods: if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node) for node2 in nodes: if node != node2: node2.different_than.add(node) # pods[0].antiaffinity_set = True # pods[0].podsMatchedByAntiaffinity.add(pods[1]) # pods[0].podsMatchedByAntiaffinity.add(pods[2]) # pods[0].podsMatchedByAntiaffinity.add(pods[12]) # pods[0].podsMatchedByAntiaffinity_length = 3 # pods[0].target_number_of_antiaffinity_pods = 3 # pods[1].antiaffinity_set = True # pods[1].podsMatchedByAntiaffinity.add(pods[0]) # pods[1].podsMatchedByAntiaffinity.add(pods[2]) # pods[1].podsMatchedByAntiaffinity.add(pods[12]) # pods[1].podsMatchedByAntiaffinity_length = 3 # pods[1].target_number_of_antiaffinity_pods = 3 # pods[2].antiaffinity_set = True # pods[2].podsMatchedByAntiaffinity.add(pods[1]) # pods[2].podsMatchedByAntiaffinity.add(pods[0]) # pods[2].podsMatchedByAntiaffinity.add(pods[12]) # pods[2].podsMatchedByAntiaffinity_length = 3 # pods[2].target_number_of_antiaffinity_pods = 3 # pods[12].antiaffinity_set = True # pods[12].podsMatchedByAntiaffinity.add(pods[1]) # pods[12].podsMatchedByAntiaffinity.add(pods[2]) # pods[12].podsMatchedByAntiaffinity.add(pods[0]) # pods[12].podsMatchedByAntiaffinity_length = 3 # pods[12].target_number_of_antiaffinity_pods = 3 # nodes[2].isSearched = True # priority for pod-to-evict pc = PriorityClass() pc.priority = 10 pc.metadata_name = "high-prio-test" k.state_objects.extend(nodes) k.state_objects.extend(pods) k.state_objects.extend([pc, s1, s2 ]) k.state_objects.extend(deployments) create_objects = [] k._build_state() globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects)) globalVar.target_DeploymentsWithAntiaffinity_length = 1 globalVar.maxNumberOfPodsOnSameNodeForDeployment = 10 globalVar.target_amountOfPodsWithAntiaffinity = 3 class Antiaffinity_check_k1(Antiaffinity_check_with_limited_number_of_pods_with_add_node): pass p = Antiaffinity_check_k1(k.state_objects) Antiaffinity_check_k1.__name__ = inspect.stack()[0].function test_case = StateSet() test_case.scheduler = scheduler test_case.globalVar = globalVar test_case.pods = pods test_case.nodes = nodes services = [s1,s2] test_case.services = services test_case.deployments = deployments # print_objects(k.state_objects) return k, p, test_case
def test_stub_completion(): # Initialize scheduler, globalvar k = KubernetesCluster() scheduler = next( filter(lambda x: isinstance(x, Scheduler), k.state_objects)) globalVar = next( filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) globalVar.block_policy_calculated = False # initial node state i = 0 j = 0 nodes = [] pods = [] services = [] # Service to detecte eviction s1 = Service() s1.metadata_name = "test-service" s1.amountOfActivePods = 0 s1.isSearched = True services.append(s1) s2 = Service() s2.metadata_name = "test-service2" s2.amountOfActivePods = 0 services.append(s2) # create Deploymnent that we're going to detect failure of... d = Deployment() d.spec_replicas = 6 d.NumberOfPodsOnSameNodeForDeployment = 4 d2 = Deployment() d2.spec_replicas = 2 d2.NumberOfPodsOnSameNodeForDeployment = 1 node_item = Node() node_item.metadata_name = "node 1" node_item.cpuCapacity = 25 node_item.memCapacity = 25 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(1, 2, 2, node_item, d, None, s1, pods) pod = build_running_pod_with_d(2, 2, 2, node_item, d, None, s1, pods) pod = build_running_pod_with_d(3, 2, 2, node_item, d, None, None, pods) pod = build_running_pod_with_d(4, 2, 2, node_item, d2, None, None, pods) pod = build_running_pod_with_d(5, 2, 2, node_item, None, None, s1, pods) pod = build_running_pod_with_d(6, 2, 2, node_item, None, None, s1, pods) pod = build_running_pod_with_d(7, 2, 2, node_item, None, None, s2, pods) pod = build_running_pod_with_d(8, 2, 2, node_item, None, None, s2, pods) pod = build_running_pod_with_d(9, 2, 2, node_item, None, None, s1, pods) pod = build_running_pod_with_d(10, 2, 2, node_item, None, None, s1, pods) pod = build_running_pod_with_d(11, 2, 2, node_item, None, None, s2, pods) pod = build_running_pod_with_d(12, 2, 2, node_item, None, None, s2, pods) node_item = Node() node_item.metadata_name = "node 2" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(13, 2, 2, node_item, d, None, s1, pods) pod = build_running_pod_with_d(14, 2, 2, node_item, d2, None, s1, pods) pod = build_running_pod_with_d(15, 2, 2, node_item, None, None, s2, pods) pod = build_running_pod_with_d(16, 2, 2, node_item, None, None, s2, pods) node_item = Node() node_item.metadata_name = "node 3" node_item.cpuCapacity = 4 node_item.memCapacity = 4 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(17, 2, 2, node_item, d, None, None, pods) node_item = Node() node_item.metadata_name = "node 4" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["New"] nodes.append(node_item) node_item = Node() node_item.metadata_name = "node 5" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["New"] nodes.append(node_item) node_item = Node() node_item.metadata_name = "node 6" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["New"] nodes.append(node_item) for node in nodes: globalVar.amountOfNodes += 1 for pod in pods: if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node) for node2 in nodes: if node != node2: node2.different_than.add(node) pods[0].antiaffinity_set = True pods[0].podsMatchedByAntiaffinity.add(pods[1]) pods[0].podsMatchedByAntiaffinity.add(pods[2]) pods[0].podsMatchedByAntiaffinity.add(pods[12]) pods[0].podsMatchedByAntiaffinity_length = 3 pods[0].target_number_of_antiaffinity_pods = 3 pods[1].antiaffinity_set = True pods[1].podsMatchedByAntiaffinity.add(pods[0]) pods[1].podsMatchedByAntiaffinity.add(pods[2]) pods[1].podsMatchedByAntiaffinity.add(pods[12]) pods[1].podsMatchedByAntiaffinity_length = 3 pods[1].target_number_of_antiaffinity_pods = 3 pods[2].antiaffinity_set = True pods[2].podsMatchedByAntiaffinity.add(pods[1]) pods[2].podsMatchedByAntiaffinity.add(pods[0]) pods[2].podsMatchedByAntiaffinity.add(pods[12]) pods[2].podsMatchedByAntiaffinity_length = 3 pods[2].target_number_of_antiaffinity_pods = 3 pods[12].antiaffinity_set = True pods[12].podsMatchedByAntiaffinity.add(pods[1]) pods[12].podsMatchedByAntiaffinity.add(pods[2]) pods[12].podsMatchedByAntiaffinity.add(pods[0]) pods[12].podsMatchedByAntiaffinity_length = 3 pods[12].target_number_of_antiaffinity_pods = 3 nodes[2].isSearched = True # priority for pod-to-evict pc = PriorityClass() pc.priority = 10 pc.metadata_name = "high-prio-test" k.state_objects.extend(nodes) k.state_objects.extend(pods) k.state_objects.extend([pc, s1, s2]) create_objects = [] # k._build_state() yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True) print("LEN", len(yamlState)) print(''.join(yamlState)) update(''.join(yamlState))
def prepare_test_29_many_pods_not_enough_capacity_for_service(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount): # Initialize scheduler, globalvar k = KubernetesCluster() scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects)) # initial node state i = 0 j = 0 nodes = [] pods = [] # Service to detecte eviction s = Service() s.metadata_name = "test-service" s.amountOfActivePods = 0 s2 = Service() s2.metadata_name = "test-service2" s2.amountOfActivePods = 0 # create Deploymnent that we're going to detect failure of... d = Deployment() d.spec_replicas = 2 pod_id = 1 for i in range(nodes_amount): node_item = Node("node"+str(i)) node_item.cpuCapacity = node_capacity node_item.memCapacity = node_capacity node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) for j in range(pod2_amount): pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None) pod_id += 1 pod_running_2.hasService = True pods.append(pod_running_2) node_item.amountOfActivePods += 1 s.podList.add(pod_running_2) s.amountOfActivePods +=1 for j in range(pod0_amount): pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None) pod_id += 1 pods.append(pod_running_0) node_item.amountOfActivePods += 1 for j in range(pod2_2_amount): pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None) pod_id += 1 pod_running_2.hasService = True pods.append(pod_running_2) node_item.amountOfActivePods += 1 s.podList.add(pod_running_2) s.amountOfActivePods +=1 for j in range(pod3_amount): pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[0],None,None) pod_id += 1 pod_running_2.hasService = True pods.append(pod_running_2) node_item.amountOfActivePods += 1 s2.podList.add(pod_running_2) s2.amountOfActivePods +=1 # priority for pod-to-evict pc = PriorityClass() pc.priority = 10 pc.metadata_name = "high-prio-test" k.state_objects.extend(nodes) k.state_objects.extend(pods) k.state_objects.extend([pc, s, s2 ]) create_objects = [] k2 = reload_cluster_from_yaml(k,create_objects) k._build_state() class NewGoal_k1(CheckNodeOutage): pass p = NewGoal_k1(k.state_objects) class NewGoal_k2(CheckNodeOutage): pass p2 = NewGoal_k2(k2.state_objects) assert_conditions = ["MarkServiceOutageEvent",\ "Mark_node_outage_event"] not_assert_conditions = [] return k, k2, p , p2
def prepare_many_pods_without_yaml(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount): # Initialize scheduler, globalvar k = KubernetesCluster() scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects)) # initial node state i = 0 j = 0 nodes = [] pods = [] # Service to detecte eviction s = Service() s.metadata_name = "test-service" s.amountOfActivePods = 0 s2 = Service() s2.metadata_name = "test-service2" s2.amountOfActivePods = 0 # create Deploymnent that we're going to detect failure of... d = Deployment() d.spec_replicas = 2 pod_id = 0 for i in range(nodes_amount): node_item = Node("node"+str(i)) node_item.metadata_name = "node"+str(i) node_item.cpuCapacity = node_capacity node_item.memCapacity = node_capacity node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) for j in range(pod2_amount): pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None) pod_id += 1 pod_running_2.hasService = True pods.append(pod_running_2) node_item.amountOfActivePods += 1 s.podList.add(pod_running_2) s.amountOfActivePods += 1 s.status = STATUS_SERV["Started"] for j in range(pod0_amount): pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None) pod_id += 1 pods.append(pod_running_0) node_item.amountOfActivePods += 1 for j in range(pod2_2_amount): pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None) pod_id += 1 pods.append(pod_running_2) node_item.amountOfActivePods += 1 for j in range(pod3_amount): pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[0],None,None) pod_id += 1 pod_running_2.hasService = True pods.append(pod_running_2) nodes[0].amountOfActivePods += 1 s2.podList.add(pod_running_2) s2.amountOfActivePods +=1 # priority for pod-to-evict pc = PriorityClass() pc.priority = 10 pc.metadata_name = "high-prio-test" k.state_objects.extend(nodes) k.state_objects.extend(pods) k.state_objects.extend([pc, s, s2 ]) create_objects = [] k._build_state() globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects)) class HypothesisysNode_k1(HypothesisysNode): pass p = HypothesisysNode_k1(k.state_objects) HypothesisysNode_k1.__name__ = inspect.stack()[0].function assert_conditions = ["MarkServiceOutageEvent",\ "Mark_node_outage_event"] not_assert_conditions = [] print_objects(k.state_objects) test_case = StateSet() test_case.scheduler = scheduler test_case.globalVar = globalVar test_case.pods = pods test_case.nodes = nodes services = [s,s2] test_case.services = services return k, p, test_case
def test_1(): # Initialize scheduler, globalvar k = KubernetesCluster() scheduler = next( filter(lambda x: isinstance(x, Scheduler), k.state_objects)) # initial node state i = 0 j = 0 nodes = [] pods = [] # Service to detecte eviction s1 = Service() s1.metadata_name = "test-service" s1.amountOfActivePods = 0 s1.antiaffinity = True services = [] services.append(s1) s2 = Service() s2.metadata_name = "test-service2" s2.amountOfActivePods = 0 # create Deploymnent that we're going to detect failure of... d = Deployment() d.spec_replicas = 2 node_item = Node() node_item.metadata_name = "node 1" node_item.cpuCapacity = 10 node_item.memCapacity = 10 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(1, 2, 2, node_item, None, None, s1, pods) pod = build_running_pod_with_d(2, 2, 2, node_item, None, None, s1, pods) pod = build_running_pod_with_d(3, 2, 2, node_item, None, None, None, pods) pod = build_running_pod_with_d(4, 2, 2, node_item, None, None, None, pods) node_item = Node() node_item.metadata_name = "node 2" node_item.cpuCapacity = 10 node_item.memCapacity = 10 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(5, 2, 2, node_item, None, None, s1, pods) pod = build_running_pod_with_d(7, 2, 2, node_item, None, None, s2, pods) pod = build_running_pod_with_d(8, 2, 2, node_item, None, None, s2, pods) node_item = Node() node_item.metadata_name = "node 3" node_item.cpuCapacity = 4 node_item.memCapacity = 4 node_item.isNull = False node_item.status = STATUS_NODE["Active"] nodes.append(node_item) pod = build_running_pod_with_d(9, 2, 2, node_item, None, None, None, pods) pod = build_running_pod_with_d(6, 2, 2, node_item, None, None, None, pods) node_item = Node() node_item.metadata_name = "node 4" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["New"] nodes.append(node_item) node_item = Node() node_item.metadata_name = "node 5" node_item.cpuCapacity = 8 node_item.memCapacity = 8 node_item.isNull = False node_item.status = STATUS_NODE["New"] nodes.append(node_item) for node in nodes: for pod in pods: if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node) for node2 in nodes: if node != node2: node2.different_than.add(node) # priority for pod-to-evict pc = PriorityClass() pc.priority = 10 pc.metadata_name = "high-prio-test" k.state_objects.extend(nodes) k.state_objects.extend(pods) k.state_objects.extend([pc, s1, s2]) create_objects = [] k._build_state() globalVar = next( filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) scheduler = next( filter(lambda x: isinstance(x, Scheduler), k.state_objects)) class Antiaffinity_implement_k1(Antiaffinity_implement): def goal(self): assert services[0].antiaffinity_prefered_policy_met == True p = Antiaffinity_implement_k1(k.state_objects) Antiaffinity_implement_k1.__name__ = inspect.stack()[0].function assert_conditions = ["manually_initiate_killing_of_podt",\ "Not_at_same_node"] not_assert_conditions = [] print_objects(k.state_objects) test_case = StateSet() test_case.scheduler = scheduler test_case.globalVar = globalVar test_case.pods = pods test_case.nodes = nodes services = [s1, s2] test_case.services = services assert_brake = checks_assert_conditions_in_one_mode( k, p, assert_conditions, not_assert_conditions, "functional test", DEBUG_MODE)
def test_node_killer_pod_with_service(): # value start stop step node_amount_range = range(2, 5, 2) pod_amount_range = range(16, 61, 1) per_node_capacity_range = range(20, 41, 10) search = True assert_brake = False csvfile = open("{0}_{1}.csv".format(inspect.stack()[1].function, sha[:7]), 'w') csvwriter = csv.writer(csvfile, delimiter=';') for node_capacity in per_node_capacity_range: for node_amount in node_amount_range: for pod_amount in pod_amount_range: if pod_amount > (node_amount * node_capacity) : continue # Initialize scheduler, globalvar start = time.time() k = KubernetesCluster() scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects)) # initial node state i = 0 j = 0 nodes = [] pods_running = [] high = PriorityClass() high.priority = 10 high.metadata_name = "high" # low = PriorityClass() # low.priority = 0 # low.metadata_name = "low" s = Service() s.metadata_name = "test-service" s.amountOfActivePods = 0 s.status = STATUS_SERV["Started"] s.isSearched = True isSearched = True pod_id=0 for i in range(node_amount): node_item = Node("node"+str(i)) node_item.cpuCapacity = node_capacity node_item.memCapacity = node_capacity node_item.isNull = False node_item.status = STATUS_NODE["Active"] node_item.isSearched = isSearched isSearched = False nodes.append(node_item) node_counter = 0 for j in range(pod_amount): node_item = nodes[node_counter] if node_item.currentFormalCpuConsumption == node_capacity: break pod_running = Pod() pod_running.metadata_name = "pod_prio_0_{0}_{1}".format(i,j) pod_running.cpuRequest = 1 pod_running.memRequest = 1 pod_running.atNode = node_item pod_running.status = STATUS_POD["Running"] pod_running.hasDeployment = False pod_running.hasService = False pod_running.hasDaemonset = False pod_running.priorityClass = high pod_running.hasService = True pods_running.append(pod_running) # node_item.podList.add(pod_running) node_item.currentFormalCpuConsumption += 1 node_item.currentFormalMemConsumption += 1 node_item.amountOfActivePods += 1 s.podList.add(pod_running) s.amountOfActivePods += 1 node_counter += 1 if node_counter == len(nodes): node_counter=0 k.state_objects.extend(nodes) k.state_objects.extend(pods_running) # k.state_objects.extend([low]) k.state_objects.append(high) k.state_objects.append(s) k._build_state() print("(node_capacity * (node_amount - 1))(",(node_capacity * (node_amount - 1)), ")<(", pod_amount,")pod_amount") if (node_capacity * (node_amount - 1)) < pod_amount: task_type = "no-outage" else: task_type = "NodeOutageFinished" print("check break node_amount {0} with capacity {1} pod amount {2}".format( node_amount, node_capacity,pod_amount)) print("-------------------") print_objects(k.state_objects) GenClass = type("{0}_{1}_{2}_{3}".format(inspect.stack()[1].function, node_amount, pod_amount, sha[:7]),(HypothesisysNode,),{}) p = GenClass(k.state_objects) try: p.run(timeout=1000, sessionName=f"gen_test_{node_capacity}_{node_amount}_{pod_amount}_L{LIN_COUNT}") except Exception as e: print("run break exception is \n",e) assert False # print_plan(p) end = time.time() print("-------------------") print("timer :", int(end - start)) if p.plan != None: csvwriter.writerow([node_amount, node_capacity, pod_amount, int(end - start), "ok"]) else: csvwriter.writerow([node_amount, node_capacity, pod_amount, int(end - start), "empty_plan"]) csvfile.flush() print("-------------------")
def Add_node(self, node: Node, globalVar: GlobalVar): assert globalVar.add_node_enabled == True assert node.status == STATUS_NODE["New"] node.status = STATUS_NODE["Active"] globalVar.amountOfNodes += 1
def test_1_1pod_2nodes_Service_outage(): # Initialize scheduler, globalvar k = KubernetesCluster() globalVar = next( filter(lambda x: isinstance(x, GlobalVar), k.state_objects)) scheduler = next( filter(lambda x: isinstance(x, Scheduler), k.state_objects)) # initial node state i = 0 j = 0 nodes = [] pods = [] # Service to detecte eviction s = Service() s.metadata_name = "test-service" s.amountOfActivePods = 0 s2 = Service() s2.metadata_name = "test-service2" s2.amountOfActivePods = 0 # create Deploymnent that we're going to detect failure of... pod_id = 1 node_1 = Node("node 1") node_1.cpuCapacity = 4 node_1.memCapacity = 4 node_1.isNull = False node_1.status = STATUS_NODE["Active"] node_2 = Node("node 2") node_2.cpuCapacity = 4 node_2.memCapacity = 4 node_2.isNull = False node_2.status = STATUS_NODE["Active"] pod_running_1 = build_running_pod_with_d(pod_id, 2, 2, node_1, None, None) pod_running_1.hasService = True node_1.amountOfActivePods += 1 s.podList.add(pod_running_1) s.amountOfActivePods += 1 s.status = STATUS_SERV["Started"] # k.state_objects += [node_1,node_2,pod_running_1, s] k.state_objects += [ node_1, node_2, pod_running_1, s, STATUS_POD["Pending"], STATUS_POD["Killing"], STATUS_POD["Running"] ] create_objects = [] k._build_state() class HypothesisysNode_k1(HypothesisysNode): pass p = HypothesisysNode_k1(k.state_objects) HypothesisysNode_k1.__name__ = inspect.stack()[0].function not_assert_conditions = [] print_objects(k.state_objects) p.Initiate_node_outage(node_1, globalVar) # p.Initiate_killing_of_Pod_because_of_node_outage(node_1,pod_running_1,globalVar) # p.KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(pod_running_1,node_1,s,scheduler) # p.NodeOutageFinished(node_1,globalVar) # p.Mark_node_outage_event(node_1,globalVar) # p.SelectNode(pod_running_1,node_2,globalVar) # p.StartPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(pod_running_1,node_2,scheduler,s,globalVar) # p.SchedulerCleaneduler,globalVar) print(" >> changed state << ") print_objects(k.state_objects) p.run() print(" >> after << ") print_objects(k.state_objects) print_plan(p)