コード例 #1
0
def test_get_deployment():
    p = Pod()
    p.metadata_name = "test-pod-1"
    n = Node()
    l1 = Label("a:b")
    l1.key = "a"
    l1.value = "b"
    l2 = Label("c:d")
    l2.key = "c"
    l2.value = "b"
    n.metadata_labels.add(l1)
    n.metadata_labels.add(l2)

    d = Deployment()
    d.metadata_name = "dep-test1"
    d.podList.add(p)
    p.hasDeployment = True

    rs = ReplicaSet()
    rs.metadata_name = "rs-test1"
    rs.metadata_ownerReferences__name = "dep-test1"
    # typically, you can find correct replicaSet by ownerReferences
    # TODO: create utililty function to do that 

    print(move_pod_with_deployment_script_simple(p, n, [d, n, p, rs]))
コード例 #2
0
ファイル: test_policy_stub.py プロジェクト: KellyGriffin/kalc
def test_stub_completion():
    # create elementary cluster
    # render cluster
    # update with rendered cluster
    # add stub policy
    # run, get solution (empty?)
    k = KubernetesCluster()

    nodes = []
    pods = []
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 25
    node_item.memCapacity = 25
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)
    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.isSearched = True
    d = Deployment()
    d.spec_replicas = 6
    d.NumberOfPodsOnSameNodeForDeployment = 4
    pod = build_running_pod_with_d(1, 2, 2, node_item, d, None, s1, pods)
    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
    update(''.join(yamlState))
    cluster = next(
        filter(lambda x: isinstance(x, GlobalVar), kalc_state_objects))
    cluster.policy.stub = True
    run()
コード例 #3
0
def test_convert_node_problem():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    # initial node state
    n = Node()
    n.cpuCapacity = 5
    n.memCapacity = 5
    # Create running pods
    pod_running_1 = build_running_pod(1, 2, 2, n)
    pod_running_2 = build_running_pod(2, 2, 2, n)

    ## Set consumptoin as expected
    n.currentFormalCpuConsumption = 4
    n.currentFormalMemConsumption = 4
    n.amountOfActivePods = 2

    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 2
    s.status = STATUS_SERV["Started"]

    # our service has multiple pods but we are detecting pods pending issue
    # remove service as we are detecting service outage by a bug above
    pod_running_1.targetService = s
    pod_running_2.targetService = s
    pod_running_1.hasService = True
    pod_running_2.hasService = True
    pod_running_1.priorityClass = pc
    pod_running_2.priorityClass = pc

    d = Deployment()
    d.spec_replicas = 2
    d.amountOfActivePods = 2
    pod_running_1.hasDeployment = True
    pod_running_2.hasDeployment = True
    d.podList.add(pod_running_1)
    d.podList.add(pod_running_2)

    k.state_objects.extend([n, pod_running_1, pod_running_2, s, d, pc])
    k2 = KubernetesCluster()
    for y in convert_space_to_yaml(k.state_objects, wrap_items=True):
        # print(y)
        k2.load(y)
    k2._build_state()


# TODO: test node outage exclusion
コード例 #4
0
def test_get_fullscript():
    k = KubernetesCluster()
    p = Pod()
    p.status = STATUS_POD["Running"]
    p.metadata_name = "test-pod-1"
    p.cpuRequest = 2
    p.memRequest = 2
    n_orig = Node("orgi")
    n = Node()
    p.nodeSelectorList.add(n)
    n_orig.metadata_name = "ORIG"
    n_orig.currentFormalMemConsumption = 5
    n_orig.currentFormalCpuConsumption = 5
    n.status = STATUS_NODE["Active"]
    n.cpuCapacity = 10
    n.memCapacity = 10
    l1 = Label("a:b")
    l1.key = "a"
    l1.value = "b"
    l2 = Label("c:d")
    l2.key = "c"
    l2.value = "b"
    n.metadata_labels.add(l1)
    n.metadata_labels.add(l2)

    d = Deployment()
    d.metadata_name = "dep-test1"
    d.podList.add(p)
    p.hasDeployment = True
    p.atNode = n_orig

    rs = ReplicaSet()
    rs.metadata_name = "rs-test1"
    rs.metadata_ownerReferences__name = "dep-test1"
    # typically, you can find correct replicaSet by ownerReferences
    # TODO: create utililty function to do that 

    k.state_objects.extend([d, n, p, rs])

    prob = Balance_pods_and_drain_node(k.state_objects)
    s = k.scheduler
    g = k.globalvar
    prob.MoveRunningPodToAnotherNode(p, n_orig, n, s, g)

    assert len(prob.script)
コード例 #5
0
def prepare_test_single_node_dies_2pod_killed_deployment_outage():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    # initial node state
    n = Node()
    n.cpuCapacity = 5
    n.memCapacity = 5
    # Create running pods
    pod_running_1 = build_running_pod(1, 2, 2, n)
    pod_running_2 = build_running_pod(2, 2, 2, n)

    ## Set consumptoin as expected
    n.currentFormalCpuConsumption = 4
    n.currentFormalMemConsumption = 4
    n.amountOfActivePods = 2

    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 2
    s.status = STATUS_SERV["Started"]

    # our service has multiple pods but we are detecting pods pending issue
    # remove service as we are detecting service outage by a bug above
    pod_running_1.targetService = s
    pod_running_2.targetService = s
    pod_running_1.hasService = True
    pod_running_2.hasService = True

    d = Deployment()
    d.spec_replicas = 2
    d.amountOfActivePods = 2
    pod_running_1.hasDeployment = True
    pod_running_2.hasDeployment = True
    d.podList.add(pod_running_1)
    d.podList.add(pod_running_2)
    k.state_objects.extend([n, pod_running_1, pod_running_2, s, d])
    return k, globalVar
コード例 #6
0
ファイル: test_cyclic_load.py プロジェクト: KellyGriffin/kalc
def test_cyclic_create():
    k, globalVar, n = prepare_test_single_node_dies_2pod_killed_service_outage()
    yamlStateBeforeCreate = convert_space_to_yaml(k.state_objects, wrap_items=True)

    pod_pending_1 = build_pending_pod(3,2,2,n)

    dnew = Deployment()
    dnew.amountOfActivePods = 0
    dnew.spec_replicas = 1
    dnew.podList.add(pod_pending_1) # important to add as we extract status, priority spec from pod

    snew = Service()
    snew.metadata_name = "test-service-new"
    snew.amountOfActivePods = 0
    pod_pending_1.targetService = snew

    create_objects = [dnew, snew]
    yamlCreate = convert_space_to_yaml(create_objects, wrap_items=False, load_logic_support=False)

    # snew.status = STATUS_SERV["Started"]
    k.state_objects.extend([pod_pending_1, dnew, snew])
    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)

    k2 = KubernetesCluster()
    for y in yamlStateBeforeCreate: 
        # print(y)
        k2.load(y)
    for y in yamlCreate:
        k2.load(y, mode=KubernetesCluster.CREATE_MODE)
    k2._build_state()
    globalVar = k2.state_objects[1]
    # print("--- RUN 2 ---")
    
    yamlState2 = convert_space_to_yaml(k2.state_objects, wrap_items=True)
    # for y in yamlState2:
        # print(y)

    assert prepare_yamllist_for_diff(yamlState, ignore_names=True) == \
                    prepare_yamllist_for_diff(yamlState2, ignore_names=True) 

    
コード例 #7
0
def prepare_affinity_test_8_pods_on_3_nodes_with_6_antiaffinity_pods():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    globalVar.block_policy_calculated = False
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []
    services = []
    deployments = []
    
    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.isSearched = True
    services.append(s1)

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    services.append(s2)
    
    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 6    
    d.NumberOfPodsOnSameNodeForDeployment = 4
    deployments.append(d)
    d2 = Deployment()
    d2.spec_replicas = 2    
    d2.NumberOfPodsOnSameNodeForDeployment = 2
    deployments.append(d2)
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 25
    node_item.memCapacity = 25
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(0,2,2,node_item,d,None,s1,pods)
    pod = build_running_pod_with_d(1,2,2,node_item,d,None,s1,pods)
    pod = build_running_pod_with_d(2,2,2,node_item,d,None,None,pods)
    pod = build_running_pod_with_d(3,2,2,node_item,None,None,None,pods)
    pod = build_running_pod_with_d(4,2,2,node_item,None,None,s1,pods)
    pod = build_running_pod_with_d(5,2,2,node_item,None,None,s1,pods)


         
    node_item = Node()
    node_item.metadata_name = "node 2"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(12,2,2,node_item,d2,None,s1,pods)
    pod = build_running_pod_with_d(13,2,2,node_item,d2,None,s1,pods)

    
    node_item = Node()
    node_item.metadata_name = "node 3"
    node_item.cpuCapacity = 4
    node_item.memCapacity = 4
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(16,2,2,node_item,None,None,None,pods)

    node_item = Node()
    node_item.metadata_name = "node 4"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)
    


    node_item = Node()
    node_item.metadata_name = "node 5"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)
    


    node_item = Node()
    node_item.metadata_name = "node 6"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)
    

    for node in nodes:
        globalVar.amountOfNodes += 1
        for pod in pods:
            if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node)
        for node2 in nodes:
            if node != node2:
                node2.different_than.add(node)

#     pods[0].antiaffinity_set = True
#     pods[0].podsMatchedByAntiaffinity.add(pods[1])
#     pods[0].podsMatchedByAntiaffinity.add(pods[2])
#     pods[0].podsMatchedByAntiaffinity.add(pods[12])
#     pods[0].podsMatchedByAntiaffinity_length = 3
#     pods[0].target_number_of_antiaffinity_pods = 3

#     pods[1].antiaffinity_set = True
#     pods[1].podsMatchedByAntiaffinity.add(pods[0])
#     pods[1].podsMatchedByAntiaffinity.add(pods[2])
#     pods[1].podsMatchedByAntiaffinity.add(pods[12])
#     pods[1].podsMatchedByAntiaffinity_length = 3
#     pods[1].target_number_of_antiaffinity_pods = 3

#     pods[2].antiaffinity_set = True
#     pods[2].podsMatchedByAntiaffinity.add(pods[1])
#     pods[2].podsMatchedByAntiaffinity.add(pods[0])
#     pods[2].podsMatchedByAntiaffinity.add(pods[12])
#     pods[2].podsMatchedByAntiaffinity_length = 3
#     pods[2].target_number_of_antiaffinity_pods = 3

#     pods[12].antiaffinity_set = True
#     pods[12].podsMatchedByAntiaffinity.add(pods[1])
#     pods[12].podsMatchedByAntiaffinity.add(pods[2])
#     pods[12].podsMatchedByAntiaffinity.add(pods[0])
#     pods[12].podsMatchedByAntiaffinity_length = 3
#     pods[12].target_number_of_antiaffinity_pods = 3
    
#     nodes[2].isSearched = True
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    
    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s1, s2 ])
    k.state_objects.extend(deployments)
    create_objects = []
    k._build_state()
    globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar.target_DeploymentsWithAntiaffinity_length = 1
    globalVar.maxNumberOfPodsOnSameNodeForDeployment = 10
    globalVar.target_amountOfPodsWithAntiaffinity = 3
    
    
    class Antiaffinity_check_k1(Antiaffinity_check_with_limited_number_of_pods_with_add_node):
        pass

    p = Antiaffinity_check_k1(k.state_objects)
    Antiaffinity_check_k1.__name__ = inspect.stack()[0].function
    test_case = StateSet()
    test_case.scheduler = scheduler
    test_case.globalVar = globalVar
    test_case.pods = pods
    test_case.nodes = nodes
    services = [s1,s2]
    test_case.services = services
    test_case.deployments = deployments
#     print_objects(k.state_objects)
    return k, p, test_case
コード例 #8
0
def test_stub_completion():

    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    globalVar.block_policy_calculated = False
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []
    services = []

    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.isSearched = True
    services.append(s1)

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    services.append(s2)

    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 6
    d.NumberOfPodsOnSameNodeForDeployment = 4
    d2 = Deployment()
    d2.spec_replicas = 2
    d2.NumberOfPodsOnSameNodeForDeployment = 1
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 25
    node_item.memCapacity = 25
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(1, 2, 2, node_item, d, None, s1, pods)
    pod = build_running_pod_with_d(2, 2, 2, node_item, d, None, s1, pods)
    pod = build_running_pod_with_d(3, 2, 2, node_item, d, None, None, pods)
    pod = build_running_pod_with_d(4, 2, 2, node_item, d2, None, None, pods)
    pod = build_running_pod_with_d(5, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(6, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(7, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(8, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(9, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(10, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(11, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(12, 2, 2, node_item, None, None, s2, pods)

    node_item = Node()
    node_item.metadata_name = "node 2"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(13, 2, 2, node_item, d, None, s1, pods)
    pod = build_running_pod_with_d(14, 2, 2, node_item, d2, None, s1, pods)
    pod = build_running_pod_with_d(15, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(16, 2, 2, node_item, None, None, s2, pods)

    node_item = Node()
    node_item.metadata_name = "node 3"
    node_item.cpuCapacity = 4
    node_item.memCapacity = 4
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(17, 2, 2, node_item, d, None, None, pods)

    node_item = Node()
    node_item.metadata_name = "node 4"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    node_item = Node()
    node_item.metadata_name = "node 5"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    node_item = Node()
    node_item.metadata_name = "node 6"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    for node in nodes:
        globalVar.amountOfNodes += 1
        for pod in pods:
            if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node)
        for node2 in nodes:
            if node != node2:
                node2.different_than.add(node)

    pods[0].antiaffinity_set = True
    pods[0].podsMatchedByAntiaffinity.add(pods[1])
    pods[0].podsMatchedByAntiaffinity.add(pods[2])
    pods[0].podsMatchedByAntiaffinity.add(pods[12])
    pods[0].podsMatchedByAntiaffinity_length = 3
    pods[0].target_number_of_antiaffinity_pods = 3

    pods[1].antiaffinity_set = True
    pods[1].podsMatchedByAntiaffinity.add(pods[0])
    pods[1].podsMatchedByAntiaffinity.add(pods[2])
    pods[1].podsMatchedByAntiaffinity.add(pods[12])
    pods[1].podsMatchedByAntiaffinity_length = 3
    pods[1].target_number_of_antiaffinity_pods = 3

    pods[2].antiaffinity_set = True
    pods[2].podsMatchedByAntiaffinity.add(pods[1])
    pods[2].podsMatchedByAntiaffinity.add(pods[0])
    pods[2].podsMatchedByAntiaffinity.add(pods[12])
    pods[2].podsMatchedByAntiaffinity_length = 3
    pods[2].target_number_of_antiaffinity_pods = 3

    pods[12].antiaffinity_set = True
    pods[12].podsMatchedByAntiaffinity.add(pods[1])
    pods[12].podsMatchedByAntiaffinity.add(pods[2])
    pods[12].podsMatchedByAntiaffinity.add(pods[0])
    pods[12].podsMatchedByAntiaffinity_length = 3
    pods[12].target_number_of_antiaffinity_pods = 3

    nodes[2].isSearched = True
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s1, s2])
    create_objects = []
    # k._build_state()

    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
    print("LEN", len(yamlState))
    print(''.join(yamlState))
    update(''.join(yamlState))
コード例 #9
0
def prepare_test_29_many_pods_not_enough_capacity_for_service(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount):
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []
    
    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 0

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 2    
    pod_id = 1
    for i in range(nodes_amount):
        node_item = Node("node"+str(i))
        node_item.cpuCapacity = node_capacity
        node_item.memCapacity = node_capacity
        node_item.isNull = False
        node_item.status = STATUS_NODE["Active"]
        nodes.append(node_item)
        
        for j in range(pod2_amount):
            pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
            pod_id += 1
            pod_running_2.hasService = True
            pods.append(pod_running_2)
            node_item.amountOfActivePods += 1
            s.podList.add(pod_running_2)
            s.amountOfActivePods +=1

        for j in range(pod0_amount):
            pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None)
            pod_id += 1
            pods.append(pod_running_0)
            node_item.amountOfActivePods += 1

        for j in range(pod2_2_amount):
            pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
            pod_id += 1
            pod_running_2.hasService = True
            pods.append(pod_running_2)
            node_item.amountOfActivePods += 1
            s.podList.add(pod_running_2)
            s.amountOfActivePods +=1

    for j in range(pod3_amount):
        pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[0],None,None)
        pod_id += 1
        pod_running_2.hasService = True
        pods.append(pod_running_2)
        node_item.amountOfActivePods += 1
        s2.podList.add(pod_running_2)
        s2.amountOfActivePods +=1
    
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    
    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s, s2 ])
    create_objects = []
    k2 = reload_cluster_from_yaml(k,create_objects)
    k._build_state()
    class NewGoal_k1(CheckNodeOutage):
        pass
    p = NewGoal_k1(k.state_objects)
    class NewGoal_k2(CheckNodeOutage):
        pass
    p2 = NewGoal_k2(k2.state_objects)
    assert_conditions = ["MarkServiceOutageEvent",\
                        "Mark_node_outage_event"]
    not_assert_conditions = []
    return k, k2, p , p2
コード例 #10
0
def prepare_many_pods_without_yaml(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount):
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []
    
    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 0

    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 2    
    pod_id = 0
    for i in range(nodes_amount):
        node_item = Node("node"+str(i))
        node_item.metadata_name = "node"+str(i)
        node_item.cpuCapacity = node_capacity
        node_item.memCapacity = node_capacity
        node_item.isNull = False
        node_item.status = STATUS_NODE["Active"]
        nodes.append(node_item)
        
        for j in range(pod2_amount):
            pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
            pod_id += 1
            pod_running_2.hasService = True
            pods.append(pod_running_2)
            node_item.amountOfActivePods += 1
            s.podList.add(pod_running_2)
            s.amountOfActivePods += 1
            s.status = STATUS_SERV["Started"]

        for j in range(pod0_amount):
            pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None)
            pod_id += 1
            pods.append(pod_running_0)
            node_item.amountOfActivePods += 1

        for j in range(pod2_2_amount):
            pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
            pod_id += 1
            pods.append(pod_running_2)
            node_item.amountOfActivePods += 1


    for j in range(pod3_amount):
        pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[0],None,None)
        pod_id += 1
        pod_running_2.hasService = True
        pods.append(pod_running_2)
        nodes[0].amountOfActivePods += 1
        s2.podList.add(pod_running_2)
        s2.amountOfActivePods +=1
    
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    
    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s, s2 ])
    create_objects = []
    k._build_state()
    globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    class HypothesisysNode_k1(HypothesisysNode):
        pass
    p = HypothesisysNode_k1(k.state_objects)
    HypothesisysNode_k1.__name__ = inspect.stack()[0].function
    assert_conditions = ["MarkServiceOutageEvent",\
                        "Mark_node_outage_event"]
    not_assert_conditions = []
    print_objects(k.state_objects)
    test_case = StateSet()
    test_case.scheduler = scheduler
    test_case.globalVar = globalVar
    test_case.pods = pods
    test_case.nodes = nodes
    services = [s,s2]
    test_case.services = services
    return k, p, test_case
コード例 #11
0
def test_1():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    # initial node state
    i = 0
    j = 0
    nodes = []
    pods = []

    # Service to detecte eviction
    s1 = Service()
    s1.metadata_name = "test-service"
    s1.amountOfActivePods = 0
    s1.antiaffinity = True
    services = []
    services.append(s1)
    s2 = Service()
    s2.metadata_name = "test-service2"
    s2.amountOfActivePods = 0
    # create Deploymnent that we're going to detect failure of...
    d = Deployment()
    d.spec_replicas = 2
    node_item = Node()
    node_item.metadata_name = "node 1"
    node_item.cpuCapacity = 10
    node_item.memCapacity = 10
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(1, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(2, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(3, 2, 2, node_item, None, None, None, pods)
    pod = build_running_pod_with_d(4, 2, 2, node_item, None, None, None, pods)

    node_item = Node()
    node_item.metadata_name = "node 2"
    node_item.cpuCapacity = 10
    node_item.memCapacity = 10
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(5, 2, 2, node_item, None, None, s1, pods)
    pod = build_running_pod_with_d(7, 2, 2, node_item, None, None, s2, pods)
    pod = build_running_pod_with_d(8, 2, 2, node_item, None, None, s2, pods)

    node_item = Node()
    node_item.metadata_name = "node 3"
    node_item.cpuCapacity = 4
    node_item.memCapacity = 4
    node_item.isNull = False
    node_item.status = STATUS_NODE["Active"]
    nodes.append(node_item)

    pod = build_running_pod_with_d(9, 2, 2, node_item, None, None, None, pods)
    pod = build_running_pod_with_d(6, 2, 2, node_item, None, None, None, pods)

    node_item = Node()
    node_item.metadata_name = "node 4"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)

    node_item = Node()
    node_item.metadata_name = "node 5"
    node_item.cpuCapacity = 8
    node_item.memCapacity = 8
    node_item.isNull = False
    node_item.status = STATUS_NODE["New"]
    nodes.append(node_item)
    for node in nodes:
        for pod in pods:
            if not pod.nodeSelectorSet: pod.nodeSelectorList.add(node)
        for node2 in nodes:
            if node != node2:
                node2.different_than.add(node)
    # priority for pod-to-evict
    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    k.state_objects.extend(nodes)
    k.state_objects.extend(pods)
    k.state_objects.extend([pc, s1, s2])
    create_objects = []
    k._build_state()
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))

    class Antiaffinity_implement_k1(Antiaffinity_implement):
        def goal(self):
            assert services[0].antiaffinity_prefered_policy_met == True

    p = Antiaffinity_implement_k1(k.state_objects)
    Antiaffinity_implement_k1.__name__ = inspect.stack()[0].function
    assert_conditions = ["manually_initiate_killing_of_podt",\
                        "Not_at_same_node"]
    not_assert_conditions = []
    print_objects(k.state_objects)
    test_case = StateSet()
    test_case.scheduler = scheduler
    test_case.globalVar = globalVar
    test_case.pods = pods
    test_case.nodes = nodes
    services = [s1, s2]
    test_case.services = services
    assert_brake = checks_assert_conditions_in_one_mode(
        k, p, assert_conditions, not_assert_conditions, "functional test",
        DEBUG_MODE)