Пример #1
0
def test_9_121pods():
    nodes_amount=7
    nodes_capacity=40
    pods_running_on_each_node_with_req_2_mem_2_cpu_s1 = 15
    pods_running_on_each_node_with_req_0_mem_0_cpu_s1 = 0
    pods_running_on_each_node_with_req_2_mem_2_cpu_no_serv = 2
    pods_running_on_node0_with_req_2_mem_2_cpu_s1 = 1

    k, p, test_case = prepare_many_pods_without_yaml(nodes_amount,\
                                        nodes_capacity,pods_running_on_each_node_with_req_2_mem_2_cpu_s1,\
                                        pods_running_on_each_node_with_req_0_mem_0_cpu_s1,\
                                        pods_running_on_each_node_with_req_2_mem_2_cpu_no_serv,\
                                        pods_running_on_node0_with_req_2_mem_2_cpu_s1)
    assert_conditions = ["Service_outage_hypothesis",\
                        "Remove_pod_from_the_queue"]
    not_assert_conditions = []

    for node_item in test_case.nodes:
        for service_item in test_case.services:
            print(" -------------->>>> test for " + str(node_item) + "  " + str(service_item) + "  <<<<-------------")
            node_item.isSearched = True
            service_item.isSearched = True
            assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
            print_objects(k.state_objects)
            node_item.isSearched = False
            service_item.isSearched = False
Пример #2
0
def test_2_3pods_NO_Service_outage():
    k, p, test_case = prepare_many_pods_without_yaml(2,6,1,0,0,1)
    assert_conditions = ["StartPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull",\
                "SchedulerCleaned"]
    not_assert_conditions = ["Service_outage_hypothesis",\
                        "Remove_pod_from_the_queue"]
    assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
    print_objects(k.state_objects)
Пример #3
0
def test_2_3pods_NO_Service_outage():
    k, p, test_case = prepare_many_pods_without_yaml(2, 6, 1, 0, 0, 1)
    assert_conditions = ["Remove_pod_from_the_cluster_IF_service_isnotnull_IF_is_last_for_service",\
                "SchedulerCleaned"]
    not_assert_conditions = ["Service_outage_hypothesis",\
                        "Remove_pod_from_the_queue"]
    assert_brake = checks_assert_conditions_in_one_mode(
        k, p, assert_conditions, not_assert_conditions, "functional test",
        DEBUG_MODE)
    print_objects(k.state_objects)
Пример #4
0
def test_7_11pods():
    nodes_amount=2
    nodes_capacity=17
    pods_running_on_2_nodes_with_req_2_mem_2_cpu_s1 = 5
    pods_running_on_2_nodes_with_req_0_mem_0_cpu_s1 = 0
    pods_running_on_2_nodes_with_req_2_mem_2_cpu_no_serv = 2
    pods_running_on_node0_with_req_2_mem_2_cpu_s1 = 1

    k, p, test_case = prepare_many_pods_without_yaml(nodes_amount,\
                                        nodes_capacity,pods_running_on_2_nodes_with_req_2_mem_2_cpu_s1,\
                                        pods_running_on_2_nodes_with_req_0_mem_0_cpu_s1,\
                                        pods_running_on_2_nodes_with_req_2_mem_2_cpu_no_serv,\
                                        pods_running_on_node0_with_req_2_mem_2_cpu_s1)
    assert_conditions = ["Service_outage_hypothesis",\
                        "Remove_pod_from_the_queue"]
    not_assert_conditions = []

    # ----  model test start ---- 
    # p.Initiate_node_outage_searched(test_case.nodes[0], test_case.globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(test_case.nodes[0], test_case.pods[0], test_case.globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(test_case.nodes[0], test_case.pods[1], test_case.globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(test_case.nodes[0], test_case.pods[2], test_case.globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(test_case.nodes[0], test_case.pods[3], test_case.globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(test_case.nodes[0], test_case.pods[4], test_case.globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(test_case.nodes[0], test_case.pods[5], test_case.globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(test_case.nodes[0], test_case.pods[6], test_case.globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(test_case.nodes[0], test_case.pods[14], test_case.globalVar)
    # p.KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(test_case.pods[0], test_case.nodes[0], test_case.services[0], test_case.scheduler)
    # p.KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(test_case.pods[1], test_case.nodes[0], test_case.services[0], test_case.scheduler)
    # p.KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(test_case.pods[2], test_case.nodes[0], test_case.services[0], test_case.scheduler)
    # p.KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(test_case.pods[3], test_case.nodes[0], test_case.services[0], test_case.scheduler)
    # p.KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(test_case.pods[4], test_case.nodes[0], test_case.services[0], test_case.scheduler)
    # p.KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNull(test_case.pods[5], test_case.nodes[0], test_case.scheduler)
    # p.KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNull(test_case.pods[6], test_case.nodes[0], test_case.scheduler)
    # p.KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(test_case.pods[14], test_case.nodes[0], test_case.services[0], test_case.scheduler)
    # p.NodeOutageFinished(test_case.nodes[0], test_case.globalVar)
    # p.SelectNode(test_case.pods[0], test_case.nodes[1], test_case.globalVar)
    # p.SelectNode(test_case.pods[5], test_case.nodes[1], test_case.globalVar)
    # p.StartPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(test_case.nodes[1], test_case.pods[0], test_case.scheduler, test_case.services[0], test_case.globalVar)
    # p.StartPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNull(test_case.nodes[1], test_case.pods[5], test_case.scheduler, test_case.globalVar)


    # ---- model test end ----- 


    for node_item in test_case.nodes:
        for service_item in test_case.services:
            print(" -------------->>>> test for " + str(node_item) + "  " + str(service_item) + "  <<<<-------------")
            node_item.isSearched = True
            service_item.isSearched = True
            assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
            print_objects(k.state_objects)
            node_item.isSearched = False
            service_item.isSearched = False
def test_anyservice_interrupted_fromfiles():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    mark_excluded_service(k.state_objects)
    p = AnyServiceInterrupted(k.state_objects)
    print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_anyservice_interrupted_fromfiles")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
Пример #6
0
def test_anydeployment_interrupted_fromfiles():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DEPLOYMENT).read())
    k._build_state()
    mark_excluded_service(k.state_objects)
    print("------Objects before solver processing------")
    print_objects(k.state_objects)
    p = NodeInterupted(k.state_objects)
    p.run(timeout=6600, sessionName="test_anydeployment_interrupted_fromfiles")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print("------Objects after solver processing------")
    print(Scenario(p.plan).asyaml())
    print_objects(k.state_objects)
Пример #7
0
def checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,debug_mode):
    test_assert_brake = False
    print_yamls = False
    if debug_mode > 0:
        print("--print_objects--")    
        print("--k1--")    
        print_objects(k.state_objects)
        print("--k2--")
        print_objects(k2.state_objects)
        if print_yamls:
            print("--print_yamls--")    
            print("--k1--")    
            print_objects_from_yaml(k)
            print("--k2--")    
            print_objects_from_yaml(k2)
            print("---yaml diff ---")
            compare_yaml_files(k,k2)
        
        print("--functional test--")    
    
    test_mode = "functional test"
    try:
        test_assert_brake = checks_assert_conditions_in_one_mode(k2,p2,assert_conditions,not_assert_conditions,test_mode,debug_mode)
    except Exception as e:
        print(e)
        test_assert_brake = True

    if debug_mode == 1:
        test_mode = "loading test"

        print("plan from yaml :")
        print_plan(p2)
    
    if test_assert_brake and debug_mode == 2 :
        test_mode = "functional test"
        try:
            test_assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,test_mode,debug_mode)
        except Exception as e:
            print(e)
            print("plan orig :")
            print_plan(p)
            raise Exception("###  Error loading data   ####")
    assert test_assert_brake
Пример #8
0
def run_dir_wo_cli(DUMP_local,CHANGE_local):
    k = KubernetesCluster()
    if not (DUMP_local is None):
        for dump_item in DUMP_local:
            k.load_dir(dump_item)
    if not (CHANGE_local is None):
        for change_item in CHANGE_local:
            k.create_resource(open(change_item).read())
    k._build_state()
    p = OptimisticRun(k.state_objects)
    print("#### run_wo_cli:")
    print("#### print_objects before run: #####")
    print(print_objects(k.state_objects))

    p.run(timeout=999000, sessionName="test_OptimisticRun")
    if not p.plan:
         raise Exception("Could not solve %s" % p.__class__.__name__)

    print("#### print_objects after run: ######")
    print(print_objects(k.state_objects))
def test_OptimisticRun():
    k = KubernetesCluster()
    k.load(open(NODE1).read())
    k.load(open(NODE2).read())
    k.load(open(PODS).read())
    k.load(open(PODS_BIG).read())
    
    # k.load(open(PODS_PENDING).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())
    k.create_resource(open(DEPLOYMENT_NEW).read())
    k._build_state()
    p = OptimisticRun(k.state_objects) # self.scheduler.status == STATUS_SCHED["Clean"]
    # print_objects(k.state_objects)
    print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_OptimisticRun")
    if not p.plan:
         raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    print_objects(k.state_objects)
Пример #10
0
def run_wo_cli_step1(DUMP_local,CHANGE_local):
    k = KubernetesCluster()
    if not (DUMP_local is None):
        for dump_item in DUMP_local:
            k.load(open(dump_item).read())
    if not (CHANGE_local is None):
        for change_item in CHANGE_local:
            k.create_resource(open(change_item).read())
    k._build_state()
    pod_running = next(filter(lambda x: isinstance(x, Pod) and x.status == STATUS_POD["Running"], k.state_objects))
    class NewGoal(OptimisticRun):
        goal = lambda self: pod_running.status == STATUS_POD["Killing"]
    p = NewGoal(k.state_objects)
    print("#### run_wo_cli:")
    print("#### print_objects before run: #### ")
    print(print_objects(k.state_objects))

    p.run(timeout=999000, sessionName="test_OptimisticRun")
    if not p.plan:
         raise Exception("Could not solve %s" % p.__class__.__name__)

    print("#### print_objects after run: ####")
    print(print_objects(k.state_objects))
Пример #11
0
def test_1():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []

    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.antiaffinity = True
    services = []
    services.append(s1)
    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 2
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 10
    node_item.memCapacity = 10
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(1, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(2, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(3, 2, 2, node_item, None, None, None, pods)
    pod = build_running_pod_with_d(4, 2, 2, node_item, None, None, None, pods)

    node_item = Node()
    node_item.metadata_name = "node 2"
    node_item.cpuCapacity = 10
    node_item.memCapacity = 10
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(5, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(7, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(8, 2, 2, node_item, None, None, s2, pods)

    node_item = Node()
    node_item.metadata_name = "node 3"
    node_item.cpuCapacity = 4
    node_item.memCapacity = 4
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(9, 2, 2, node_item, None, None, None, pods)
    pod = build_running_pod_with_d(6, 2, 2, node_item, None, None, None, pods)

    node_item = Node()
    node_item.metadata_name = "node 4"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    node_item = Node()
    node_item.metadata_name = "node 5"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)
    for node in nodes:
        for pod in pods:
            if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node)
        for node2 in nodes:
            if node != node2:
                node2.different_than.add(node)
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s1, s2])
    create_objects = []
    k._build_state()
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))

    class Antiaffinity_implement_k1(Antiaffinity_implement):
        def goal(self):
            assert services[0].antiaffinity_prefered_policy_met == True

    p = Antiaffinity_implement_k1(k.state_objects)
    Antiaffinity_implement_k1.__name__ = inspect.stack()[0].function
    assert_conditions = ["manually_initiate_killing_of_podt",\
                        "Not_at_same_node"]
    not_assert_conditions = []
    print_objects(k.state_objects)
    test_case = StateSet()
    test_case.scheduler = scheduler
    test_case.globalVar = globalVar
    test_case.pods = pods
    test_case.nodes = nodes
    services = [s1, s2]
    test_case.services = services
    assert_brake = checks_assert_conditions_in_one_mode(
        k, p, assert_conditions, not_assert_conditions, "functional test",
        DEBUG_MODE)
Пример #12
0
def test_node_killer_pod_with_service():
#   value                         start   stop    step
    node_amount_range =       range(2,     5,     2)
    pod_amount_range =        range(16,    61,     1)
    per_node_capacity_range = range(20,    41,     10)

    search = True

    assert_brake = False

    csvfile = open("{0}_{1}.csv".format(inspect.stack()[1].function, sha[:7]), 'w')
    csvwriter = csv.writer(csvfile, delimiter=';')

    for node_capacity in per_node_capacity_range:
        for node_amount in node_amount_range:
            for pod_amount in pod_amount_range:
                if pod_amount > (node_amount * node_capacity) : continue
                # Initialize scheduler, globalvar
                start = time.time()
                k = KubernetesCluster()
                scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
                # initial node state
                i = 0
                j = 0
                nodes = []
                pods_running = []
                high = PriorityClass()
                high.priority = 10
                high.metadata_name = "high"
                # low = PriorityClass()
                # low.priority = 0
                # low.metadata_name = "low"
                s = Service()
                s.metadata_name = "test-service"
                s.amountOfActivePods = 0
                s.status = STATUS_SERV["Started"]
                s.isSearched = True
                isSearched = True
                pod_id=0
                for i in range(node_amount):
                    node_item = Node("node"+str(i))
                    node_item.cpuCapacity = node_capacity
                    node_item.memCapacity = node_capacity
                    node_item.isNull = False
                    node_item.status = STATUS_NODE["Active"]
                    
                    node_item.isSearched = isSearched
                    isSearched = False
                    nodes.append(node_item)
                node_counter = 0
                for j in range(pod_amount):
                    node_item = nodes[node_counter]
                    if node_item.currentFormalCpuConsumption == node_capacity:
                        break
                    pod_running = Pod()
                    pod_running.metadata_name = "pod_prio_0_{0}_{1}".format(i,j)
                    pod_running.cpuRequest = 1
                    pod_running.memRequest = 1
                    pod_running.atNode = node_item
                    pod_running.status = STATUS_POD["Running"]
                    pod_running.hasDeployment = False
                    pod_running.hasService = False
                    pod_running.hasDaemonset = False
                    pod_running.priorityClass = high
                    pod_running.hasService = True
                    pods_running.append(pod_running)
                    # node_item.podList.add(pod_running)
                    node_item.currentFormalCpuConsumption += 1
                    node_item.currentFormalMemConsumption += 1
                    node_item.amountOfActivePods += 1
                    s.podList.add(pod_running)
                    s.amountOfActivePods += 1
                    node_counter += 1
                    if node_counter == len(nodes):
                        node_counter=0

                k.state_objects.extend(nodes)
                k.state_objects.extend(pods_running)
                # k.state_objects.extend([low])
                k.state_objects.append(high)
                k.state_objects.append(s)
                k._build_state()
                
                print("(node_capacity * (node_amount - 1))(",(node_capacity * (node_amount - 1)), ")<(", pod_amount,")pod_amount")

                if (node_capacity * (node_amount - 1)) < pod_amount:
                    task_type = "no-outage"
                else:
                    task_type = "NodeOutageFinished"

    
                print("check break node_amount {0} with capacity {1} pod amount {2}".format( node_amount, node_capacity,pod_amount))
                print("-------------------")
                print_objects(k.state_objects)


                GenClass = type("{0}_{1}_{2}_{3}".format(inspect.stack()[1].function, node_amount, pod_amount, sha[:7]),(HypothesisysNode,),{})

                p = GenClass(k.state_objects)

                try:
                    p.run(timeout=1000, sessionName=f"gen_test_{node_capacity}_{node_amount}_{pod_amount}_L{LIN_COUNT}")
                except Exception as e:
                    print("run break exception is \n",e)
                    assert False
                # print_plan(p)
                end = time.time()
                print("-------------------")
                print("timer :", int(end - start))
                if p.plan != None:
                    csvwriter.writerow([node_amount, node_capacity, pod_amount, int(end - start), "ok"])
                else:
                    csvwriter.writerow([node_amount, node_capacity, pod_amount, int(end - start), "empty_plan"])
                csvfile.flush()
                print("-------------------")
Пример #13
0
def test_load_deployment():
    k = KubernetesCluster()
    k.load(
        open("./tests/client-cases/criticalhopmanifest_prefixed.yaml").read())
    k._build_state()
    print_objects(k.state_objects)
Пример #14
0
def print_objects_compare(k,k2):
    print("---originaly-generated---")
    print_objects(k.state_objects)
    print("---loaded-from-yaml----")
    print_objects(k2.state_objects)
def prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount):
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []
    
    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 0

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 2    
    pod_id = 1
    for i in range(nodes_amount):
        node_item = Node("node"+str(i))
        node_item.cpuCapacity = node_capacity
        node_item.memCapacity = node_capacity
        node_item.isNull = False
        node_item.status = STATUS_NODE["Active"]
        nodes.append(node_item)
        
        for j in range(pod2_amount):
            pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
            pod_id += 1
            pod_running_2.hasService = True
            pods.append(pod_running_2)
            node_item.amountOfActivePods += 1
            s.podList.add(pod_running_2)
            s.amountOfActivePods +=1

        for j in range(pod0_amount):
            pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None)
            pod_id += 1
            pods.append(pod_running_0)
            node_item.amountOfActivePods += 1

        for j in range(pod2_2_amount):
            pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
            pod_id += 1
            pod_running_2.hasService = True
            pods.append(pod_running_2)
            node_item.amountOfActivePods += 1
            s.podList.add(pod_running_2)
            s.amountOfActivePods +=1

    for j in range(pod3_amount):
        pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[0],None,None)
        pod_id += 1
        pod_running_2.hasService = True
        pods.append(pod_running_2)
        node_item.amountOfActivePods += 1
        s2.podList.add(pod_running_2)
        s2.amountOfActivePods +=1
    
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    
    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s, s2 ])
    create_objects = []
    k._build_state()
    class NewGoal_k1(CheckNodeOutage):
        pass
    p = NewGoal_k1(k.state_objects)
    NewGoal_k1.__name__ = inspect.stack()[0].function
    assert_conditions = ["MarkServiceOutageEvent",\
                        "Mark_node_outage_event"]
    not_assert_conditions = []
    print_objects(k.state_objects)
    return k, p
Пример #16
0
def prepare_many_pods_without_yaml(nodes_amount, node_capacity, pod2_amount,
                                   pod0_amount, pod2_2_amount, pod3_amount):
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []

    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 0

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 2
    pod_id = 0
    for i in range(nodes_amount):
        node_item = Node("node" + str(i))
        node_item.metadata_name = "node" + str(i)
        node_item.cpuCapacity = node_capacity
        node_item.memCapacity = node_capacity
        node_item.isNull = False
        node_item.status = STATUS_NODE["Active"]
        nodes.append(node_item)

        for j in range(pod2_amount):
            pod_running_2 = build_running_pod_with_d(pod_id, 2, 2, node_item,
                                                     None, None)
            pod_id += 1
            pod_running_2.hasService = True
            pods.append(pod_running_2)
            node_item.amountOfActivePods += 1
            s.podList.add(pod_running_2)
            s.amountOfActivePods += 1
            s.status = STATUS_SERV["Started"]

        for j in range(pod0_amount):
            pod_running_0 = build_running_pod_with_d(pod_id, 0, 0, node_item,
                                                     None, None)
            pod_id += 1
            pods.append(pod_running_0)
            node_item.amountOfActivePods += 1

        for j in range(pod2_2_amount):
            pod_running_2 = build_running_pod_with_d(pod_id, 2, 2, node_item,
                                                     None, None)
            pod_id += 1
            pod_running_2.hasService = True
            pods.append(pod_running_2)
            node_item.amountOfActivePods += 1
            s2.podList.add(pod_running_2)
            s2.amountOfActivePods += 1
            s2.status = STATUS_SERV["Started"]

    for j in range(pod3_amount):
        pod_running_2 = build_running_pod_with_d(pod_id, 2, 2, nodes[0], None,
                                                 None)
        pod_id += 1
        pod_running_2.hasService = True
        pods.append(pod_running_2)
        nodes[0].amountOfActivePods += 1
        s2.podList.add(pod_running_2)
        s2.amountOfActivePods += 1

    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s, s2])
    create_objects = []
    k._build_state()
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))

    class HypothesisysNodeAndService_k1(HypothesisysNodeAndService):
        pass

    p = HypothesisysNodeAndService_k1(k.state_objects)
    HypothesisysNodeAndService_k1.__name__ = inspect.stack()[0].function
    print_objects(k.state_objects)
    test_case = StateSet()
    test_case.scheduler = scheduler
    test_case.globalVar = globalVar
    test_case.pods = pods
    test_case.nodes = nodes
    services = [s, s2]
    test_case.services = services
    return k, p, test_case
Пример #17
0
def test_1_1pod_2nodes_Service_outage():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []

    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 0

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    # create Deploymnent that we're going to detect failure of...
    pod_id = 1
    node_1 = Node("node 1")
    node_1.cpuCapacity = 4
    node_1.memCapacity = 4
    node_1.isNull = False
    node_1.status = STATUS_NODE["Active"]

    node_2 = Node("node 2")
    node_2.cpuCapacity = 4
    node_2.memCapacity = 4
    node_2.isNull = False
    node_2.status = STATUS_NODE["Active"]
    pod_running_1 = build_running_pod_with_d(pod_id, 2, 2, node_1, None, None)
    pod_running_1.hasService = True
    node_1.amountOfActivePods += 1
    s.podList.add(pod_running_1)
    s.amountOfActivePods += 1
    s.status = STATUS_SERV["Started"]

    # k.state_objects += [node_1,node_2,pod_running_1, s]
    k.state_objects += [
        node_1, node_2, pod_running_1, s, STATUS_POD["Pending"],
        STATUS_POD["Killing"], STATUS_POD["Running"]
    ]
    create_objects = []
    k._build_state()

    class HypothesisysNode_k1(HypothesisysNode):
        pass

    p = HypothesisysNode_k1(k.state_objects)
    HypothesisysNode_k1.__name__ = inspect.stack()[0].function
    not_assert_conditions = []
    print_objects(k.state_objects)
    p.Initiate_node_outage(node_1, globalVar)
    # p.Initiate_killing_of_Pod_because_of_node_outage(node_1,pod_running_1,globalVar)
    # p.KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(pod_running_1,node_1,s,scheduler)
    # p.NodeOutageFinished(node_1,globalVar)
    # p.Mark_node_outage_event(node_1,globalVar)
    # p.SelectNode(pod_running_1,node_2,globalVar)
    # p.StartPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull(pod_running_1,node_2,scheduler,s,globalVar)
    # p.SchedulerCleaneduler,globalVar)
    print("                       >> changed state <<  ")
    print_objects(k.state_objects)

    p.run()
    print("                      >> after <<         ")
    print_objects(k.state_objects)
    print_plan(p)