Exemplo n.º 1
0
def test_stub_completion():
    # create elementary cluster
    # render cluster
    # update with rendered cluster
    # add stub policy
    # run, get solution (empty?)
    k = KubernetesCluster()

    nodes = []
    pods = []
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 25
    node_item.memCapacity = 25
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)
    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.isSearched = True
    d = Deployment()
    d.spec_replicas = 6
    d.NumberOfPodsOnSameNodeForDeployment = 4
    pod = build_running_pod_with_d(1, 2, 2, node_item, d, None, s1, pods)
    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
    update(''.join(yamlState))
    cluster = next(
        filter(lambda x: isinstance(x, GlobalVar), kalc_state_objects))
    cluster.policy.stub = True
    run()
Exemplo n.º 2
0
def prepare_affinity_test_8_pods_on_3_nodes_with_6_antiaffinity_pods():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    globalVar.block_policy_calculated = False
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []
    services = []
    deployments = []
    
    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.isSearched = True
    services.append(s1)

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    services.append(s2)
    
    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 6    
    d.NumberOfPodsOnSameNodeForDeployment = 4
    deployments.append(d)
    d2 = Deployment()
    d2.spec_replicas = 2    
    d2.NumberOfPodsOnSameNodeForDeployment = 2
    deployments.append(d2)
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 25
    node_item.memCapacity = 25
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(0,2,2,node_item,d,None,s1,pods)
    pod = build_running_pod_with_d(1,2,2,node_item,d,None,s1,pods)
    pod = build_running_pod_with_d(2,2,2,node_item,d,None,None,pods)
    pod = build_running_pod_with_d(3,2,2,node_item,None,None,None,pods)
    pod = build_running_pod_with_d(4,2,2,node_item,None,None,s1,pods)
    pod = build_running_pod_with_d(5,2,2,node_item,None,None,s1,pods)


         
    node_item = Node()
    node_item.metadata_name = "node 2"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(12,2,2,node_item,d2,None,s1,pods)
    pod = build_running_pod_with_d(13,2,2,node_item,d2,None,s1,pods)

    
    node_item = Node()
    node_item.metadata_name = "node 3"
    node_item.cpuCapacity = 4
    node_item.memCapacity = 4
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(16,2,2,node_item,None,None,None,pods)

    node_item = Node()
    node_item.metadata_name = "node 4"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)
    


    node_item = Node()
    node_item.metadata_name = "node 5"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)
    


    node_item = Node()
    node_item.metadata_name = "node 6"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)
    

    for node in nodes:
        globalVar.amountOfNodes += 1
        for pod in pods:
            if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node)
        for node2 in nodes:
            if node != node2:
                node2.different_than.add(node)

#     pods[0].antiaffinity_set = True
#     pods[0].podsMatchedByAntiaffinity.add(pods[1])
#     pods[0].podsMatchedByAntiaffinity.add(pods[2])
#     pods[0].podsMatchedByAntiaffinity.add(pods[12])
#     pods[0].podsMatchedByAntiaffinity_length = 3
#     pods[0].target_number_of_antiaffinity_pods = 3

#     pods[1].antiaffinity_set = True
#     pods[1].podsMatchedByAntiaffinity.add(pods[0])
#     pods[1].podsMatchedByAntiaffinity.add(pods[2])
#     pods[1].podsMatchedByAntiaffinity.add(pods[12])
#     pods[1].podsMatchedByAntiaffinity_length = 3
#     pods[1].target_number_of_antiaffinity_pods = 3

#     pods[2].antiaffinity_set = True
#     pods[2].podsMatchedByAntiaffinity.add(pods[1])
#     pods[2].podsMatchedByAntiaffinity.add(pods[0])
#     pods[2].podsMatchedByAntiaffinity.add(pods[12])
#     pods[2].podsMatchedByAntiaffinity_length = 3
#     pods[2].target_number_of_antiaffinity_pods = 3

#     pods[12].antiaffinity_set = True
#     pods[12].podsMatchedByAntiaffinity.add(pods[1])
#     pods[12].podsMatchedByAntiaffinity.add(pods[2])
#     pods[12].podsMatchedByAntiaffinity.add(pods[0])
#     pods[12].podsMatchedByAntiaffinity_length = 3
#     pods[12].target_number_of_antiaffinity_pods = 3
    
#     nodes[2].isSearched = True
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    
    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s1, s2 ])
    k.state_objects.extend(deployments)
    create_objects = []
    k._build_state()
    globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar.target_DeploymentsWithAntiaffinity_length = 1
    globalVar.maxNumberOfPodsOnSameNodeForDeployment = 10
    globalVar.target_amountOfPodsWithAntiaffinity = 3
    
    
    class Antiaffinity_check_k1(Antiaffinity_check_with_limited_number_of_pods_with_add_node):
        pass

    p = Antiaffinity_check_k1(k.state_objects)
    Antiaffinity_check_k1.__name__ = inspect.stack()[0].function
    test_case = StateSet()
    test_case.scheduler = scheduler
    test_case.globalVar = globalVar
    test_case.pods = pods
    test_case.nodes = nodes
    services = [s1,s2]
    test_case.services = services
    test_case.deployments = deployments
#     print_objects(k.state_objects)
    return k, p, test_case
Exemplo n.º 3
0
def test_stub_completion():

    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    globalVar.block_policy_calculated = False
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []
    services = []

    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.isSearched = True
    services.append(s1)

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    services.append(s2)

    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 6
    d.NumberOfPodsOnSameNodeForDeployment = 4
    d2 = Deployment()
    d2.spec_replicas = 2
    d2.NumberOfPodsOnSameNodeForDeployment = 1
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 25
    node_item.memCapacity = 25
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(1, 2, 2, node_item, d, None, s1, pods)
    pod = build_running_pod_with_d(2, 2, 2, node_item, d, None, s1, pods)
    pod = build_running_pod_with_d(3, 2, 2, node_item, d, None, None, pods)
    pod = build_running_pod_with_d(4, 2, 2, node_item, d2, None, None, pods)
    pod = build_running_pod_with_d(5, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(6, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(7, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(8, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(9, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(10, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(11, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(12, 2, 2, node_item, None, None, s2, pods)

    node_item = Node()
    node_item.metadata_name = "node 2"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(13, 2, 2, node_item, d, None, s1, pods)
    pod = build_running_pod_with_d(14, 2, 2, node_item, d2, None, s1, pods)
    pod = build_running_pod_with_d(15, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(16, 2, 2, node_item, None, None, s2, pods)

    node_item = Node()
    node_item.metadata_name = "node 3"
    node_item.cpuCapacity = 4
    node_item.memCapacity = 4
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(17, 2, 2, node_item, d, None, None, pods)

    node_item = Node()
    node_item.metadata_name = "node 4"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    node_item = Node()
    node_item.metadata_name = "node 5"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    node_item = Node()
    node_item.metadata_name = "node 6"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    for node in nodes:
        globalVar.amountOfNodes += 1
        for pod in pods:
            if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node)
        for node2 in nodes:
            if node != node2:
                node2.different_than.add(node)

    pods[0].antiaffinity_set = True
    pods[0].podsMatchedByAntiaffinity.add(pods[1])
    pods[0].podsMatchedByAntiaffinity.add(pods[2])
    pods[0].podsMatchedByAntiaffinity.add(pods[12])
    pods[0].podsMatchedByAntiaffinity_length = 3
    pods[0].target_number_of_antiaffinity_pods = 3

    pods[1].antiaffinity_set = True
    pods[1].podsMatchedByAntiaffinity.add(pods[0])
    pods[1].podsMatchedByAntiaffinity.add(pods[2])
    pods[1].podsMatchedByAntiaffinity.add(pods[12])
    pods[1].podsMatchedByAntiaffinity_length = 3
    pods[1].target_number_of_antiaffinity_pods = 3

    pods[2].antiaffinity_set = True
    pods[2].podsMatchedByAntiaffinity.add(pods[1])
    pods[2].podsMatchedByAntiaffinity.add(pods[0])
    pods[2].podsMatchedByAntiaffinity.add(pods[12])
    pods[2].podsMatchedByAntiaffinity_length = 3
    pods[2].target_number_of_antiaffinity_pods = 3

    pods[12].antiaffinity_set = True
    pods[12].podsMatchedByAntiaffinity.add(pods[1])
    pods[12].podsMatchedByAntiaffinity.add(pods[2])
    pods[12].podsMatchedByAntiaffinity.add(pods[0])
    pods[12].podsMatchedByAntiaffinity_length = 3
    pods[12].target_number_of_antiaffinity_pods = 3

    nodes[2].isSearched = True
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s1, s2])
    create_objects = []
    # k._build_state()

    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
    print("LEN", len(yamlState))
    print(''.join(yamlState))
    update(''.join(yamlState))
Exemplo n.º 4
0
def test_7_3nodes_and_needed3nodes_suggests_movement_of_pods():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []
    services = []

    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.antiaffinity = True
    s1.targetAmountOfPodsOnDifferentNodes = 3
    s1.isSearched = True
    services.append(s1)

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    services.append(s2)

    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 2
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(1, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(2, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(3, 2, 2, node_item, None, None, None, pods)
    pod = build_running_pod_with_d(4, 2, 2, node_item, None, None, None, pods)

    node_item = Node()
    node_item.metadata_name = "node 2"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(5, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(6, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(7, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(8, 2, 2, node_item, None, None, s2, pods)

    node_item = Node()
    node_item.metadata_name = "node 3"
    node_item.cpuCapacity = 4
    node_item.memCapacity = 4
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(9, 2, 2, node_item, None, None, None, pods)

    node_item = Node()
    node_item.metadata_name = "node 4"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    node_item = Node()
    node_item.metadata_name = "node 5"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    node_item = Node()
    node_item.metadata_name = "node 6"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    for node in nodes:
        for pod in pods:
            if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node)
        for node2 in nodes:
            if node != node2:
                node2.different_than.add(node)

    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s1, s2])
    create_objects = []
    k._build_state()
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))

    class Antiaffinity_prefered_with_add_node_k1(
            Antiaffinity_prefered_with_add_node):
        def goal(self):
            assert services[0].antiaffinity_prefered_policy_met == True

    p = Antiaffinity_prefered_with_add_node_k1(k.state_objects)
    Antiaffinity_prefered_with_add_node_k1.__name__ = inspect.stack(
    )[0].function
    assert_conditions = ["manually_initiate_killing_of_podt",\
                        "Not_at_same_node",\
                        "Add_node"]
    not_assert_conditions = []
    print_objects(k.state_objects)
    test_case = StateSet()
    test_case.scheduler = scheduler
    test_case.globalVar = globalVar
    test_case.pods = pods
    test_case.nodes = nodes
    services = [s1, s2]
    test_case.services = services
    assert_brake = checks_assert_conditions_in_one_mode(
        k, p, assert_conditions, not_assert_conditions, "functional test",
        DEBUG_MODE)
Exemplo n.º 5
0
def test_node_killer_pod_with_service():
#   value                         start   stop    step
    node_amount_range =       range(2,     5,     2)
    pod_amount_range =        range(16,    61,     1)
    per_node_capacity_range = range(20,    41,     10)

    search = True

    assert_brake = False

    csvfile = open("{0}_{1}.csv".format(inspect.stack()[1].function, sha[:7]), 'w')
    csvwriter = csv.writer(csvfile, delimiter=';')

    for node_capacity in per_node_capacity_range:
        for node_amount in node_amount_range:
            for pod_amount in pod_amount_range:
                if pod_amount > (node_amount * node_capacity) : continue
                # Initialize scheduler, globalvar
                start = time.time()
                k = KubernetesCluster()
                scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
                # initial node state
                i = 0
                j = 0
                nodes = []
                pods_running = []
                high = PriorityClass()
                high.priority = 10
                high.metadata_name = "high"
                # low = PriorityClass()
                # low.priority = 0
                # low.metadata_name = "low"
                s = Service()
                s.metadata_name = "test-service"
                s.amountOfActivePods = 0
                s.status = STATUS_SERV["Started"]
                s.isSearched = True
                isSearched = True
                pod_id=0
                for i in range(node_amount):
                    node_item = Node("node"+str(i))
                    node_item.cpuCapacity = node_capacity
                    node_item.memCapacity = node_capacity
                    node_item.isNull = False
                    node_item.status = STATUS_NODE["Active"]
                    
                    node_item.isSearched = isSearched
                    isSearched = False
                    nodes.append(node_item)
                node_counter = 0
                for j in range(pod_amount):
                    node_item = nodes[node_counter]
                    if node_item.currentFormalCpuConsumption == node_capacity:
                        break
                    pod_running = Pod()
                    pod_running.metadata_name = "pod_prio_0_{0}_{1}".format(i,j)
                    pod_running.cpuRequest = 1
                    pod_running.memRequest = 1
                    pod_running.atNode = node_item
                    pod_running.status = STATUS_POD["Running"]
                    pod_running.hasDeployment = False
                    pod_running.hasService = False
                    pod_running.hasDaemonset = False
                    pod_running.priorityClass = high
                    pod_running.hasService = True
                    pods_running.append(pod_running)
                    # node_item.podList.add(pod_running)
                    node_item.currentFormalCpuConsumption += 1
                    node_item.currentFormalMemConsumption += 1
                    node_item.amountOfActivePods += 1
                    s.podList.add(pod_running)
                    s.amountOfActivePods += 1
                    node_counter += 1
                    if node_counter == len(nodes):
                        node_counter=0

                k.state_objects.extend(nodes)
                k.state_objects.extend(pods_running)
                # k.state_objects.extend([low])
                k.state_objects.append(high)
                k.state_objects.append(s)
                k._build_state()
                
                print("(node_capacity * (node_amount - 1))(",(node_capacity * (node_amount - 1)), ")<(", pod_amount,")pod_amount")

                if (node_capacity * (node_amount - 1)) < pod_amount:
                    task_type = "no-outage"
                else:
                    task_type = "NodeOutageFinished"

    
                print("check break node_amount {0} with capacity {1} pod amount {2}".format( node_amount, node_capacity,pod_amount))
                print("-------------------")
                print_objects(k.state_objects)


                GenClass = type("{0}_{1}_{2}_{3}".format(inspect.stack()[1].function, node_amount, pod_amount, sha[:7]),(HypothesisysNode,),{})

                p = GenClass(k.state_objects)

                try:
                    p.run(timeout=1000, sessionName=f"gen_test_{node_capacity}_{node_amount}_{pod_amount}_L{LIN_COUNT}")
                except Exception as e:
                    print("run break exception is \n",e)
                    assert False
                # print_plan(p)
                end = time.time()
                print("-------------------")
                print("timer :", int(end - start))
                if p.plan != None:
                    csvwriter.writerow([node_amount, node_capacity, pod_amount, int(end - start), "ok"])
                else:
                    csvwriter.writerow([node_amount, node_capacity, pod_amount, int(end - start), "empty_plan"])
                csvfile.flush()
                print("-------------------")