コード例 #1
0
def test_get_deployment():
    p = Pod()
    p.metadata_name = "test-pod-1"
    n = Node()
    l1 = Label("a:b")
    l1.key = "a"
    l1.value = "b"
    l2 = Label("c:d")
    l2.key = "c"
    l2.value = "b"
    n.metadata_labels.add(l1)
    n.metadata_labels.add(l2)

    d = Deployment()
    d.metadata_name = "dep-test1"
    d.podList.add(p)
    p.hasDeployment = True

    rs = ReplicaSet()
    rs.metadata_name = "rs-test1"
    rs.metadata_ownerReferences__name = "dep-test1"
    # typically, you can find correct replicaSet by ownerReferences
    # TODO: create utililty function to do that 

    print(move_pod_with_deployment_script_simple(p, n, [d, n, p, rs]))
コード例 #2
0
def build_running_pod(podName, cpuRequest, memRequest, atNode):
    pod_running_1 = Pod()
    pod_running_1.metadata_name = "pod"+str(podName)
    pod_running_1.cpuRequest = cpuRequest
    pod_running_1.memRequest = memRequest
    pod_running_1.atNode = atNode
    pod_running_1.status = STATUS_POD["Running"]
    pod_running_1.hasDeployment = False
    pod_running_1.hasService = False
    pod_running_1.hasDaemonset = False
    return pod_running_1
コード例 #3
0
 def Set_antiaffinity_between_pods_of_deployment(self, pod1: Pod, pod2: Pod,
                                                 deployment: Deployment,
                                                 globalVar: GlobalVar):
     if pod1 not in pod2.podsMatchedByAffinity and pod2 not in pod1.podsMatchedByAffinity:
         assert pod1 in deployment.podList
         assert pod2 in deployment.podList
         assert deployment.NumberOfPodsOnSameNodeForDeployment == globalVar.maxNumberOfPodsOnSameNodeForDeployment
         pod1.antiaffinity_set = True
         pod1.podsMatchedByAffinity.add(pod2)
         pod1.podsMatchedByAffinity_length += 1
         pod2.antiaffinity_set = True
         pod2.podsMatchedByAffinity.add(pod1)
         pod2.podsMatchedByAffinity_length += 1
コード例 #4
0
 def register(self):
     Service.register_property(name="antiaffinity",
                               type=bool,
                               default=False)
     Service.register_property(name="antiaffinity_prefered_policy_met",
                               type=bool,
                               default=False)
     Service.register_property(name="targetAmountOfPodsOnDifferentNodes",
                               type=int,
                               default=-1)
     Pod.register_property(name="not_on_same_node",
                           type=Set[Pod],
                           default=None)
コード例 #5
0
 def mark_antiaffinity_met_because_all_antiaffinity_pods_are_matched_and_those_that_cant_dont_suite(
         self, pod: Pod, globalVar: GlobalVar):
     assert pod.calc_antiaffinity_pods_list_length == pod.target_number_of_antiaffinity_pods
     assert pod.antiaffinity_set == True
     # assert globalVar.block_policy_calculated == True
     pod.antiaffinity_met = True
     globalVar.block_policy_calculated = True
コード例 #6
0
 def mark_affinity_met_because_all_affinity_pods_are_matched(
         self, pod: Pod, globalVar: GlobalVar):
     assert pod.calc_affinity_pods_list_length == pod.podsMatchedByAffinity_length
     assert pod.affinity_set == True
     # assert globalVar.block_policy_calculated == True
     pod.affinity_met = True
     globalVar.block_policy_calculated = True
コード例 #7
0
def build_running_pod(podName, cpuRequest, memRequest, atNode):
    pod_running_1 = Pod()
    pod_running_1.metadata_name = "pod" + str(podName)
    pod_running_1.cpuRequest = cpuRequest
    pod_running_1.memRequest = memRequest
    pod_running_1.atNode = atNode
    pod_running_1.status = STATUS_POD["Running"]
    return pod_running_1
コード例 #8
0
def test_get_fullscript():
    k = KubernetesCluster()
    p = Pod()
    p.status = STATUS_POD["Running"]
    p.metadata_name = "test-pod-1"
    p.cpuRequest = 2
    p.memRequest = 2
    n_orig = Node("orgi")
    n = Node()
    p.nodeSelectorList.add(n)
    n_orig.metadata_name = "ORIG"
    n_orig.currentFormalMemConsumption = 5
    n_orig.currentFormalCpuConsumption = 5
    n.status = STATUS_NODE["Active"]
    n.cpuCapacity = 10
    n.memCapacity = 10
    l1 = Label("a:b")
    l1.key = "a"
    l1.value = "b"
    l2 = Label("c:d")
    l2.key = "c"
    l2.value = "b"
    n.metadata_labels.add(l1)
    n.metadata_labels.add(l2)

    d = Deployment()
    d.metadata_name = "dep-test1"
    d.podList.add(p)
    p.hasDeployment = True
    p.atNode = n_orig

    rs = ReplicaSet()
    rs.metadata_name = "rs-test1"
    rs.metadata_ownerReferences__name = "dep-test1"
    # typically, you can find correct replicaSet by ownerReferences
    # TODO: create utililty function to do that 

    k.state_objects.extend([d, n, p, rs])

    prob = Balance_pods_and_drain_node(k.state_objects)
    s = k.scheduler
    g = k.globalvar
    prob.MoveRunningPodToAnotherNode(p, n_orig, n, s, g)

    assert len(prob.script)
コード例 #9
0
 def debug(self):
     self.problem()
     self_methods = [
         getattr(self, m) for m in dir(self) if callable(getattr(self, m))
         and hasattr(getattr(self, m), "_planned")
     ]
     model_methods = []
     methods_scanned = set()
     for obj in self.objectList:
         if not obj.__class__.__name__ in methods_scanned:
             methods_scanned.add(obj.__class__.__name__)
             for m in dir(obj):
                 if callable(getattr(obj, m)) and hasattr(
                         getattr(obj, m), "_planned"):
                     model_methods.append(getattr(obj, m))
     debug_plan(methods=self_methods + list(model_methods),
                space=list(self.__dict__.values()) + self.objectList,
                goal=lambda: (self.goal()),
                plan=[Pod().connect_pod_service_labels])
コード例 #10
0
def build_pending_pod_with_d(podName, cpuRequest, memRequest, toNode, d, ds, s):
    p = Pod()
    p.metadata_name = "pod"+str(podName)
    p.cpuRequest = cpuRequest
    p.memRequest = memRequest
    p.status = STATUS_POD["Pending"]
    p.hasDeployment = False
    p.hasService = False
    p.hasDaemonset = False
    if d is not None:
        d.podList.add(p)
        p.hasDeployment = True
    if ds is not None:
        ds.podList.add(p)
        p.hasDaemonset = True
        p.toNode = toNode
    if s is not None:
        p.hasService = True
        s.podList.add(p)
    return p
コード例 #11
0
def build_running_pod_with_d(podName, cpuRequest, memRequest, atNode, d, ds, s, pods):
    pod_running_1 = Pod()
    pod_running_1.metadata_name = "pod"+str(podName)
    pod_running_1.cpuRequest = cpuRequest
    pod_running_1.memRequest = memRequest
    pod_running_1.cpuLimit = 1
    pod_running_1.memLimit = 1
    pod_running_1.atNode = atNode
    pod_running_1.status = STATUS_POD["Running"]
    pod_running_1.hasDeployment = False
    pod_running_1.hasService = False
    pod_running_1.hasDaemonset = False
    atNode.currentFormalCpuConsumption += cpuRequest
    atNode.currentFormalMemConsumption += memRequest
    atNode.amountOfActivePods += 1
    pods.append(pod_running_1)
    if d is not None:
        d.podList.add(pod_running_1)
        d.amountOfActivePods += 1
        pod_running_1.hasDeployment = True
    if ds is not None:
        ds.podList.add(pod_running_1)
        ds.amountOfActivePods += 1
        pod_running_1.hasDaemonset = True
    if s is not None:
        pod_running_1.hasService = True
        s.podList.add(pod_running_1)
        s.amountOfActivePods += 1
        s.status = STATUS_SERV["Started"]
    return pod_running_1
コード例 #12
0
ファイル: test_gentest.py プロジェクト: KellyGriffin/kalc
def test_node_killer_pod_with_service():
#   value                         start   stop    step
    node_amount_range =       range(2,     5,     2)
    pod_amount_range =        range(16,    61,     1)
    per_node_capacity_range = range(20,    41,     10)

    search = True

    assert_brake = False

    csvfile = open("{0}_{1}.csv".format(inspect.stack()[1].function, sha[:7]), 'w')
    csvwriter = csv.writer(csvfile, delimiter=';')

    for node_capacity in per_node_capacity_range:
        for node_amount in node_amount_range:
            for pod_amount in pod_amount_range:
                if pod_amount > (node_amount * node_capacity) : continue
                # Initialize scheduler, globalvar
                start = time.time()
                k = KubernetesCluster()
                scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
                # initial node state
                i = 0
                j = 0
                nodes = []
                pods_running = []
                high = PriorityClass()
                high.priority = 10
                high.metadata_name = "high"
                # low = PriorityClass()
                # low.priority = 0
                # low.metadata_name = "low"
                s = Service()
                s.metadata_name = "test-service"
                s.amountOfActivePods = 0
                s.status = STATUS_SERV["Started"]
                s.isSearched = True
                isSearched = True
                pod_id=0
                for i in range(node_amount):
                    node_item = Node("node"+str(i))
                    node_item.cpuCapacity = node_capacity
                    node_item.memCapacity = node_capacity
                    node_item.isNull = False
                    node_item.status = STATUS_NODE["Active"]
                    
                    node_item.isSearched = isSearched
                    isSearched = False
                    nodes.append(node_item)
                node_counter = 0
                for j in range(pod_amount):
                    node_item = nodes[node_counter]
                    if node_item.currentFormalCpuConsumption == node_capacity:
                        break
                    pod_running = Pod()
                    pod_running.metadata_name = "pod_prio_0_{0}_{1}".format(i,j)
                    pod_running.cpuRequest = 1
                    pod_running.memRequest = 1
                    pod_running.atNode = node_item
                    pod_running.status = STATUS_POD["Running"]
                    pod_running.hasDeployment = False
                    pod_running.hasService = False
                    pod_running.hasDaemonset = False
                    pod_running.priorityClass = high
                    pod_running.hasService = True
                    pods_running.append(pod_running)
                    # node_item.podList.add(pod_running)
                    node_item.currentFormalCpuConsumption += 1
                    node_item.currentFormalMemConsumption += 1
                    node_item.amountOfActivePods += 1
                    s.podList.add(pod_running)
                    s.amountOfActivePods += 1
                    node_counter += 1
                    if node_counter == len(nodes):
                        node_counter=0

                k.state_objects.extend(nodes)
                k.state_objects.extend(pods_running)
                # k.state_objects.extend([low])
                k.state_objects.append(high)
                k.state_objects.append(s)
                k._build_state()
                
                print("(node_capacity * (node_amount - 1))(",(node_capacity * (node_amount - 1)), ")<(", pod_amount,")pod_amount")

                if (node_capacity * (node_amount - 1)) < pod_amount:
                    task_type = "no-outage"
                else:
                    task_type = "NodeOutageFinished"

    
                print("check break node_amount {0} with capacity {1} pod amount {2}".format( node_amount, node_capacity,pod_amount))
                print("-------------------")
                print_objects(k.state_objects)


                GenClass = type("{0}_{1}_{2}_{3}".format(inspect.stack()[1].function, node_amount, pod_amount, sha[:7]),(HypothesisysNode,),{})

                p = GenClass(k.state_objects)

                try:
                    p.run(timeout=1000, sessionName=f"gen_test_{node_capacity}_{node_amount}_{pod_amount}_L{LIN_COUNT}")
                except Exception as e:
                    print("run break exception is \n",e)
                    assert False
                # print_plan(p)
                end = time.time()
                print("-------------------")
                print("timer :", int(end - start))
                if p.plan != None:
                    csvwriter.writerow([node_amount, node_capacity, pod_amount, int(end - start), "ok"])
                else:
                    csvwriter.writerow([node_amount, node_capacity, pod_amount, int(end - start), "empty_plan"])
                csvfile.flush()
                print("-------------------")
コード例 #13
0
 def mark_antiaffinity_met_because_all_antiaffinity_pods_are_matched_and_those_that_cant_dont_suite_below_the_limit_for_node_amount(
         self, pod: Pod, globalVar: GlobalVar):
     assert pod.nodesThatHaveAllocatedPodsThatHaveAntiaffinityWithThisPod_length + pod.nodesThatCantAllocateThisPod_length == globalVar.amountOfNodes_limit
     assert pod.antiaffinity_set == True
     pod.antiaffinity_met = True