def test_spec_selector_labels():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    for ds in filter(lambda x: isinstance(x, Service), k.state_objects):
        if labelFactory.get("app",
                            "redis-evict") in ds.spec_selector._get_value():
            return
    raise Exception("Can not check labels load")
def test_load_folder_load_pod_labels():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    for ds in filter(lambda x: isinstance(x, Pod), k.state_objects):
        if labelFactory.get("app",
                            "redis-evict") in ds.metadata_labels._get_value():
            return
    raise Exception("Can not check labels load")
def test_service_load():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    objects = filter(lambda x: isinstance(x, Service), k.state_objects)
    for p in objects:
        if p.metadata_name == "redis-master-create" and \
            labelFactory.get("app", "redis-create") in p.metadata_labels._get_value():
            return
    raise ValueError("Could not find service loded")
    def goal(self):
        # for ob in self.objectList:
        #     print(str(ob))

        evict_service = next(filter(lambda x: isinstance(x, Service) and \
            labelFactory.get("app", "redis-evict") in x.spec_selector._get_value(),
            self.objectList))
        scheduler = next(filter(lambda x: isinstance(x, Scheduler), self.objectList))
        return evict_service.status == STATUS_SERV["Interrupted"] and \
                                    scheduler.status == STATUS_SCHED["Clean"]
def test_load_folder_create_labels():
    k = KubernetesCluster()
    # k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    for ds in filter(lambda x: isinstance(x, DaemonSet), k.state_objects):
        if labelFactory.get(
                "k8s-app",
                "fluentd-logging") in ds.metadata_labels._get_value():
            return
    raise Exception("Can not check labels load")
def test_service_status():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    objects = filter(lambda x: isinstance(x, Service), k.state_objects)
    service_found = False
    for p in objects:
        if p.metadata_name == "redis-master-evict" and \
            labelFactory.get("app", "redis-evict") in p.metadata_labels._get_value() and \
            labelFactory.get("app", "redis-evict") in p.spec_selector._get_value() and \
                p.status == STATUS_SERV["Pending"]:
            service_found = True
            break
    assert service_found

    objects = filter(lambda x: isinstance(x, Pod), k.state_objects)
    for p in objects:
        if p.targetService == Pod.TARGET_SERVICE_NULL and \
            labelFactory.get("app", "redis-evict") in p.metadata_labels._get_value():
            return

    raise ValueError("Could not find service loded")
def test_service_link_to_pods():
    objects = filter(lambda x: isinstance(x, Service), ALL_STATE)
    serv = None
    for p in objects:
        if p.metadata_name == "redis-master-evict" and \
            labelFactory.get("app", "redis-evict") in p.metadata_labels._get_value() and \
                p.status == STATUS_SERV["Started"]:
            serv = p
    assert not serv is None
    objects = filter(lambda x: isinstance(x, Pod), ALL_STATE)
    for p in objects:
        if str(p.metadata_name).startswith("redis-master-evict")\
             and p.targetService == serv:
            return
    raise ValueError("Could not find service loded")
Exemple #8
0
def k8s_to_domain_object(obj):
    try_int = False
    try:
        int(obj)
        try_int = True
    except:
        pass
    if isinstance(obj, int):
        return obj
    elif isinstance(obj, dict) and len(obj) == 1:
        k,v=list(obj.items())[0]
        return labelFactory.get(k,v)
    elif isinstance(obj, str) and obj[0] in string.digits+"-" and not obj[-1] in string.digits:
        # pass on, probably someone will take care
        return obj
    elif isinstance(obj, str) and try_int:
        return int(obj)
    elif isinstance(obj, str) and not obj[0] in string.digits+"-":
        return obj
    else:
        return obj.__str__()
def test_service_active_pods():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    p = StartServiceGoal(k.state_objects)
    p.select_target_service()
    global ALL_STATE
    ALL_STATE = k.state_objects
    # p.debug()
    try:
        p.xrun()
    except EmptyPlanError:
        return
    objects = filter(lambda x: isinstance(x, Service), k.state_objects)
    pods_active = False
    for p in objects:
        if p.metadata_name == "redis-master-evict" and \
            labelFactory.get("app", "redis-evict") in p.metadata_labels._get_value() and \
                p.status == STATUS_SERV["Started"] and\
                    p.amountOfActivePods > 0:
            pods_active = True
            break
    assert pods_active
Exemple #10
0
    def problem(self):
        # super().problem()

        self.priorityHigh = self.addObject(PriorityClass('HighPreemptive'))
        self.priorityHigh.priority = 5
        self.priorityHigh.preemtionPolicy = POLICY["PreemptLowerPriority"]

        self.priorityHighNoPreem = self.addObject(
            PriorityClass('HighNoPreemptive'))
        self.priorityHighNoPreem.priority = 5
        self.priorityHighNoPreem.preemtionPolicy = POLICY["Never"]

        self.priorityLow = self.addObject(PriorityClass('Low'))
        self.priorityLow.priority = 1
        self.priorityLow.preemtionPolicy = POLICY["Never"]

        self.priorityUndefined = self.addObject(PriorityClass('Undefined'))
        self.priorityUndefined.priority = 0
        self.priorityUndefined.preemtionPolicy = POLICY["Never"]

        self.daemonset1 = self.addObject(DaemonSet('DS1'))

        self.nullLb = self.addObject(LoadBalancer('Null'))

        self.service1 = self.addObject(mservice.Service('service1'))
        self.service1.amountOfActivePods = 0
        self.service1.status = STATUS_SERV["Pending"]
        self.service1.spec_selector.add(labelFactory.get("test", "test"))
        self.service.append(self.service1)

        self.service2 = self.addObject(mservice.Service('service2'))
        self.service2.amountOfActivePods = 0
        self.service2.status = STATUS_SERV["Pending"]
        self.service2.spec_selector.add(labelFactory.get("test2", "test2"))
        self.service.append(self.service2)

        self.service3 = self.addObject(mservice.Service('service3'))
        self.service3.amountOfActivePods = 0
        self.service3.spec_selector.add(labelFactory.get("test3", "test3"))
        self.service3.status = STATUS_SERV["Pending"]

        self.node = self.addObject(mnode.Node('node'))
        self.node.status = STATUS_NODE[
            "Active"]  ##TODO - make Node activation mechanism
        self.node.cpuCapacity = 3
        self.node.memCapacity = 3
        self.node.currentFormalCpuConsumption = 2
        self.node.currentFormalMemConsumption = 2
        self.node.currentRealMemConsumption = 0
        self.node.currentRealCpuConsumption = 0
        self.node.AmountOfPodsOverwhelmingMemLimits = 0
        self.node.append(self.node)

        self.node2 = self.addObject(mnode.Node('node2'))
        self.node2.status = STATUS_NODE["Active"]
        self.node2.cpuCapacity = 3
        self.node2.memCapacity = 3
        self.node2.memCapacityBarier = 3
        self.node2.currentFormalCpuConsumption = 2
        self.node2.currentFormalMemConsumption = 2
        self.node2.currentRealMemConsumption = 0
        self.node2.currentRealCpuConsumption = 0
        self.node2.AmountOfPodsOverwhelmingMemLimits = 0
        self.node.append(self.node2)

        self.node2.prevNode = self.node
        self.node.prevNode = self.node2

        self.pod1 = self.addObject(mpod.Pod('pod1'))
        self.pod1.currentRealCpuConsumption = 0
        self.pod1.currentRealMemConsumption = 0
        self.pod1.status = STATUS_POD["Running"]
        self.pod1.memRequest = 2
        self.pod1.cpuRequest = 2
        self.pod1.podNotOverwhelmingLimits = True
        self.pod1.realInitialMemConsumption = 0
        self.pod1.realInitialCpuConsumption = 0
        self.pod1.memLimit = 1
        self.pod1.cpuLimit = 1
        self.pod1.atNode = self.node2
        self.pod1.toNode = mnode.Node.NODE_NULL
        self.pod1.memLimitsStatus = STATUS_LIM["Limit Met"]
        self.pod1.amountOfActiveRequests = 0
        # self.pod1.targetService = self.service1
        self.pod1.metadata_labels.add(labelFactory.get("test", "test"))
        self.pod1.priorityClass = self.priorityLow
        self.pod.append(self.pod1)

        self.pod2 = self.addObject(mpod.Pod('pod2'))
        self.pod2.currentRealCpuConsumption = 0
        self.pod2.currentRealMemConsumption = 0
        self.pod2.status = STATUS_POD["Running"]
        self.pod2.memRequest = 2
        self.pod2.cpuRequest = 2
        self.pod2.podNotOverwhelmingLimits = True
        self.pod2.realInitialMemConsumption = 0
        self.pod2.realInitialCpuConsumption = 0
        self.pod2.memLimit = 1
        self.pod2.cpuLimit = 1
        self.pod2.atNode = self.node2
        self.pod2.toNode = mnode.Node.NODE_NULL
        self.pod2.memLimitsStatus = STATUS_LIM["Limit Met"]
        ## todo:  for relations  it should give helpful error message when = instead of add.
        self.pod2.amountOfActiveRequests = 0
        # self.pod2.targetService = self.service1
        self.pod2.metadata_labels.add(labelFactory.get("test", "test"))
        self.pod2.priorityClass = self.priorityLow
        self.pod.append(self.pod2)

        self.pod3 = self.addObject(mpod.Pod('pod3'))
        self.pod3.currentRealCpuConsumption = 0
        self.pod3.currentRealMemConsumption = 0
        self.pod3.status = STATUS_POD["Running"]
        self.pod3.memRequest = 1
        self.pod3.cpuRequest = 1
        self.pod3.podNotOverwhelmingLimits = True
        self.pod3.realInitialMemConsumption = 0
        self.pod3.realInitialCpuConsumption = 0
        self.pod3.memLimit = 2
        self.pod3.cpuLimit = 2
        self.pod3.atNode = self.node
        self.pod3.toNode = mnode.Node.NODE_NULL
        self.pod3.memLimitsStatus = STATUS_LIM["Limit Met"]
        self.pod3.amountOfActiveRequests = 0
        # self.pod3.targetService = self.service2
        self.pod3.metadata_labels.add(labelFactory.get("test2", "test2"))
        self.pod3.priorityClass = self.priorityLow
        self.pod.append(self.pod3)

        self.pod4 = self.addObject(mpod.Pod('pod4'))
        self.pod4.currentRealCpuConsumption = 0
        self.pod4.currentRealMemConsumption = 0
        self.pod4.status = STATUS_POD["Running"]
        self.pod4.memRequest = 1
        self.pod4.cpuRequest = 1
        self.pod4.podNotOverwhelmingLimits = True
        self.pod4.realInitialMemConsumption = 0
        self.pod4.realInitialCpuConsumption = 0
        self.pod4.memLimit = 1
        self.pod4.cpuLimit = 1
        self.pod4.atNode = self.node
        self.pod4.toNode = mnode.Node.NODE_NULL
        self.pod4.memLimitsStatus = STATUS_LIM["Limit Met"]
        self.pod4.amountOfActiveRequests = 0
        # self.pod4.targetService = self.service3
        self.pod4.metadata_labels.add(labelFactory.get("test3", "test3"))
        self.pod4.priorityClass = self.priorityLow
        self.pod.append(self.pod4)

        self.pod5 = self.addObject(mpod.Pod('pod5'))
        self.pod5.currentRealCpuConsumption = 0
        self.pod5.currentRealMemConsumption = 0
        self.pod5.status = STATUS_POD["Pending"]
        self.pod5.memRequest = 2
        self.pod5.cpuRequest = 2
        self.pod5.podNotOverwhelmingLimits = True
        self.pod5.realInitialMemConsumption = 0
        self.pod5.realInitialCpuConsumption = 0
        self.pod5.memLimit = 1
        self.pod5.cpuLimit = 1
        self.pod5.atNode = mnode.Node.NODE_NULL
        self.pod5.toNode = mnode.Node.NODE_NULL
        self.pod5.memLimitsStatus = STATUS_LIM["Limit Met"]
        self.pod5.amountOfActiveRequests = 0
        # self.pod5.targetService = self.service3
        self.pod5.metadata_labels.add(labelFactory.get("test3", "test3"))
        self.pod5.priorityClass = self.priorityHigh
        self.pod.append(self.pod5)

        self.pod6 = self.addObject(mpod.Pod('pod6'))
        self.pod6.currentRealCpuConsumption = 0
        self.pod6.currentRealMemConsumption = 0
        self.pod6.status = STATUS_POD["Pending"]
        self.pod6.memRequest = 2
        self.pod6.cpuRequest = 2
        self.pod6.podNotOverwhelmingLimits = True
        self.pod6.realInitialMemConsumption = 0
        self.pod6.realInitialCpuConsumption = 0
        self.pod6.memLimit = 1
        self.pod6.cpuLimit = 1
        self.pod6.atNode = mnode.Node.NODE_NULL
        self.pod6.toNode = self.node
        self.pod6.memLimitsStatus = STATUS_LIM["Limit Met"]
        self.pod6.amountOfActiveRequests = 0
        # self.pod6.targetService = self.service3
        self.pod6.metadata_labels.add(labelFactory.get("test3", "test3"))
        self.daemonset1.podList.add(self.pod6)
        self.pod6.priorityClass = self.priorityHigh
        self.pod.append(self.pod6)

        self.pod7 = self.addObject(mpod.Pod('pod7'))
        self.pod7.currentRealCpuConsumption = 0
        self.pod7.currentRealMemConsumption = 0
        self.pod7.status = STATUS_POD["Pending"]
        self.pod7.memRequest = 2
        self.pod7.cpuRequest = 2
        self.pod7.podNotOverwhelmingLimits = True
        self.pod7.realInitialMemConsumption = 0
        self.pod7.realInitialCpuConsumption = 0
        self.pod7.memLimit = 1
        self.pod7.cpuLimit = 1
        self.pod7.atNode = mnode.Node.NODE_NULL
        self.pod7.toNode = self.node2
        self.pod7.memLimitsStatus = STATUS_LIM["Limit Met"]
        self.pod7.amountOfActiveRequests = 0
        # self.pod7.targetService = self.service3
        self.pod7.metadata_labels.add(labelFactory.get("test3", "test3"))
        self.daemonset1.podList.add(self.pod7)
        self.pod7.priorityClass = self.priorityHigh
        self.pod.append(self.pod7)

        # WARN: can do "optimal skips" of RR chain

        self.pod7.nextPod = self.pod1
        self.pod6.nextPod = self.pod7
        self.pod5.nextPod = self.pod6
        self.pod4.nextPod = self.pod5
        self.pod3.nextPod = self.pod4
        self.pod2.nextPod = self.pod3
        self.pod1.nextPod = self.pod2
        # mpod.Pod.POD_NULL.nextPod = self.pod1

        self.globalVar1 = self.addObject(mglobals.GlobalVar('globalVar1'))
        ##self.globalVar1.numberOfRejectedReq =0
        #self.globalVar1.lastPod = self.pod1
        #self.globalVar1.memCapacity = 6
        #self.globalVar1.cpuCapacity = 6
        #self.globalVar1.currentFormalCpuConsumption  = 4
        #self.globalVar1.currentFormalMemConsumption  = 4
        #self.globalVar1.queueLength =0
        #self.globalVar1.amountOfPods = 5

        self.scheduler = self.addObject(mscheduler.Scheduler('scheduler'))
        self.scheduler.podQueue.add(self.pod5)
        self.scheduler.podQueue.add(self.pod6)
        self.scheduler.podQueue.add(self.pod7)
        self.scheduler.status = STATUS_SCHED["Changed"]
        self.scheduler.queueLength = 3
Exemple #11
0
def test_label_factory():
    l = labelFactory.get("test", "test")