def test_killpod():
    k = KubernetesCluster()
    k.load(open(NODE1).read())  # trim resource, run only one Node
    k.load(open(PODS).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())

    k.create_resource(open(DEPLOYMENT_NEW).read())

    k._build_state()
    p = OptimisticRun(
        k.state_objects
    )  # TODO check me, i'd like to run exiction test with killpod execution
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_start_pods")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    assert "StartPod" in p.plan.__str__(
    )  # test for transition from Pending to Running
    #get pods only in Running state to check atNode value
    runningPods = filter(
        lambda z: z.status != STATUS_POD["Running"],
        (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
    nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
    for pod in runningPods:
        assert pod.atNode in nodes._get_value(
        )  # check each pod than each have atNode
    killingPods = filter(
        lambda z: z.status != STATUS_POD["Killing"],
        (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
    assert len(killingPods) > 0  # test that some pod Killed
Esempio n. 2
0
def test_eviction_fromfiles_strictgoal():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    p = SingleGoalEvictionDetect(k.state_objects)
    p.select_target_service()
    p.run(timeout=6600, sessionName="test_eviction_fromfiles_strictgoal")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    if p.plan:
        i = 0
        for a in p.plan:
            i = i + 1
            print(
                i, ":", a.__class__.__name__, "\n",
                yaml.dump(
                    {
                        str(k):
                        repr(v._get_value()) if v else f"NONE_VALUE:{v}"
                        for (k, v) in a.kwargs.items()
                    },
                    default_flow_style=False))
    assert "StartPod" in p.plan.__str__()
Esempio n. 3
0
def test_load_deployments():
    k = KubernetesCluster()
    k.load(open(TEST_PRIORITYCLASS).read())
    k.load(open(TEST_NODES).read())
    k.load(open(TEST_PODS).read())
    k.load(open(TEST_DEPLOYMENTS).read())
    k._build_state()
    assert k.state_objects
Esempio n. 4
0
def test_load_folder():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    # check that no pods are orphan
    pods = list(filter(lambda x: isinstance(x, Pod), k.state_objects))
    assert pods
    for pod in pods:
        assert pod.atNode._property_value != Node.NODE_NULL
Esempio n. 5
0
def test_load_folder_load_pod_labels():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    for ds in filter(lambda x: isinstance(x, Pod), k.state_objects):
        if labelFactory.get("app",
                            "redis-evict") in ds.metadata_labels._get_value():
            return
    raise Exception("Can not check labels load")
Esempio n. 6
0
def test_spec_selector_labels():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    for ds in filter(lambda x: isinstance(x, Service), k.state_objects):
        if labelFactory.get("app",
                            "redis-evict") in ds.spec_selector._get_value():
            return
    raise Exception("Can not check labels load")
Esempio n. 7
0
def update(data=None):
    "Fetch information from currently selected cluster"
    if isinstance(data, io.IOBase):
        data = data.read()
    k = KubernetesCluster()
    if not data:
        global md5_cluster
        result = subprocess.Popen(cluster_md5_sh,
                                  shell=True,
                                  stdout=subprocess.PIPE,
                                  executable='/bin/bash')
        md5_cluster = result.stdout.read().decode('ascii').split()[0]
        assert len(md5_cluster
                   ) == 32, "md5_cluster sum wrong len({0}) not is 32".format(
                       md5_cluster)

        result = subprocess.run(
            ['kubectl', 'get', 'all', '--all-namespaces', '-o=json'],
            stdout=subprocess.PIPE)
        if len(result.stdout) < 100:
            print(result.stdout)
            raise SystemError(
                "Error using kubectl. Make sure `kubectl get pods` is working."
            )
        data = json.loads(result.stdout.decode("utf-8"))
        for item in data["items"]:
            k.load_item(item)

        result = subprocess.run(['kubectl', 'get', 'node', '-o=json'],
                                stdout=subprocess.PIPE)
        if len(result.stdout) < 100:
            raise SystemError(
                "Error using kubectl. Make sure `kubectl get pods` is working."
            )
        data = json.loads(result.stdout.decode("utf-8"))
        for item in data["items"]:
            k.load_item(item)
        result = subprocess.run(['kubectl', 'get', 'pc', '-o=json'],
                                stdout=subprocess.PIPE)
        if len(result.stdout) < 100:
            raise SystemError(
                "Error using kubectl. Make sure `kubectl get pods` is working."
            )
        data = json.loads(result.stdout.decode("utf-8"))
        for item in data["items"]:
            k.load_item(item)
    else:
        for ys in kalc.misc.util.split_yamldumps(data):
            k.load(ys)

    k._build_state()
    global kalc_state_objects
    kalc_state_objects.clear()
    kalc_state_objects.extend(k.state_objects)
    global cluster
    cluster = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
Esempio n. 8
0
def test_convert_node_problem():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    # initial node state
    n = Node()
    n.cpuCapacity = 5
    n.memCapacity = 5
    # Create running pods
    pod_running_1 = build_running_pod(1, 2, 2, n)
    pod_running_2 = build_running_pod(2, 2, 2, n)

    ## Set consumptoin as expected
    n.currentFormalCpuConsumption = 4
    n.currentFormalMemConsumption = 4
    n.amountOfActivePods = 2

    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 2
    s.status = STATUS_SERV["Started"]

    # our service has multiple pods but we are detecting pods pending issue
    # remove service as we are detecting service outage by a bug above
    pod_running_1.targetService = s
    pod_running_2.targetService = s
    pod_running_1.hasService = True
    pod_running_2.hasService = True
    pod_running_1.priorityClass = pc
    pod_running_2.priorityClass = pc

    d = Deployment()
    d.spec_replicas = 2
    d.amountOfActivePods = 2
    pod_running_1.hasDeployment = True
    pod_running_2.hasDeployment = True
    d.podList.add(pod_running_1)
    d.podList.add(pod_running_2)

    k.state_objects.extend([n, pod_running_1, pod_running_2, s, d, pc])
    k2 = KubernetesCluster()
    for y in convert_space_to_yaml(k.state_objects, wrap_items=True):
        # print(y)
        k2.load(y)
    k2._build_state()


# TODO: test node outage exclusion
Esempio n. 9
0
def test_exclude_regexp_unit():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    mark_excluded(k.state_objects, "Service:front*", skip_check=False)
    for p in filter(lambda x: isinstance(x, Service), k.state_objects):
        if str(p.metadata_name) == "frontend":
            if p.searchable:
                raise ValueError("exclude doesn't work")
def test_service_load():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    objects = filter(lambda x: isinstance(x, Service), k.state_objects)
    for p in objects:
        if p.metadata_name == "redis-master-create" and \
            labelFactory.get("app", "redis-create") in p.metadata_labels._get_value():
            return
    raise ValueError("Could not find service loded")
Esempio n. 11
0
def test_load_requests():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    objects = filter(lambda x: isinstance(x, DaemonSet), k.state_objects)
    for p in objects:
        if p.metadata_name == "fluentd-elasticsearch":
            return
    raise ValueError("Could not find service loded")
Esempio n. 12
0
def test_load_priorityclass_custom_high_priority():
    k = KubernetesCluster()
    k.load(open(TEST_PRIORITYCLASS).read())
    k._build_state()
    found = False
    for pc in filter(lambda x: isinstance(x, PriorityClass), k.state_objects):
        if pc.metadata_name == "high-priority":
            # print(pc.priority, pc.priority._get_value())
            assert pc.priority > 0
            found = True
    assert found
Esempio n. 13
0
def test_load_folder_create_labels():
    k = KubernetesCluster()
    # k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    for ds in filter(lambda x: isinstance(x, DaemonSet), k.state_objects):
        if labelFactory.get(
                "k8s-app",
                "fluentd-logging") in ds.metadata_labels._get_value():
            return
    raise Exception("Can not check labels load")
Esempio n. 14
0
def test_heapster_load():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    heapsterpod = next(filter(lambda x: isinstance(x, Pod) and \
        "heapster" in str(x.metadata_name), k.state_objects))
    criticalpc = next(filter(lambda x: isinstance(x, PriorityClass) \
        and x.metadata_name == "system-cluster-critical", k.state_objects))
    # print(heapsterpod.priorityClass._get_value(), criticalpc._get_value())
    assert heapsterpod.priorityClass._get_value() == criticalpc._get_value()
    assert heapsterpod.priorityClass == criticalpc
Esempio n. 15
0
def test_pod_target_attached():
    k = KubernetesCluster()
    k.load_dir(TEST_TARGET_DUMP)
    k.create_resource(open(TEST_TARGET_CREATE).read())
    k._build_state()
    deployments = filter(lambda x: isinstance(x, Deployment), k.state_objects)
    for deployment in deployments:
        if deployment.metadata_name._get_value() == "redis-master-create":
            for pod in util.objDeduplicatorByName(
                    deployment.podList._get_value()):
                assert pod.targetService._get_value() != None
def test_priority_is_loaded():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    priorityClasses = filter(lambda x: isinstance(x, PriorityClass),
                             k.state_objects)
    for p in priorityClasses:
        if p.metadata_name == "high-priority" and p.preemptionPolicy == POLICY["PreemptLowerPriority"]\
            and p.priority > 0:
            return
    raise ValueError("Could not find priority loded")
def test_queue_status():
    "test length and status of scheduler queue after load"
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    nodes = list(filter(lambda x: isinstance(x, Node), k.state_objects))
    # assert scheduler.queueLength == len(nodes)
    assert scheduler.podQueue._get_value()
    assert scheduler.status == STATUS_SCHED["Changed"]
def test_anyservice_interrupted_fromfiles():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    mark_excluded_service(k.state_objects)
    p = AnyServiceInterrupted(k.state_objects)
    print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_anyservice_interrupted_fromfiles")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
Esempio n. 19
0
def test_load_pods_new():
    k = KubernetesCluster()
    k.load(open(TEST_PRIORITYCLASS).read())
    k.load(open(TEST_PODS).read())
    k._build_state()
    # TODO: check if pod is fully loaded
    pod = k.state_objects[2 + 3]
    assert isinstance(pod, Pod)
    assert len(pod.metadata_labels._get_value()) > 0
    assert pod.status == STATUS_POD["Running"]

    assert k.state_objects
Esempio n. 20
0
def test_limits_for_pods_created():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    objects = filter(lambda x: isinstance(x, Pod), k.state_objects)
    for p in objects:
        if str(p.metadata_name).startswith("fluentd-elasticsearch") and \
            p.cpuRequest > -1 and \
            p.memRequest > -1 and \
            p.memLimit > -1:
            return
    raise ValueError("Could not find service loded")
Esempio n. 21
0
def test_load_limits():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DEPLOYMENT).read())
    k._build_state()
    objects = filter(lambda x: isinstance(x, Deployment), k.state_objects)
    for p in objects:
        if p.metadata_name == "redis-master" and \
           p.cpuRequest > -1 and \
           p.memRequest > -1 and \
           p.memLimit > -1:
            return
    raise ValueError("Could not find service loded")
Esempio n. 22
0
def test_load():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k._build_state()
    objects = filter(lambda x: isinstance(x, DaemonSet), k.state_objects)
    for p in objects:
        if p.metadata_name == "fluentd-elasticsearch":
            assert p.cpuRequest._get_value(
            ) == util.cpuConvertToAbstractProblem("400m")
            assert p.memRequest._get_value(
            ) == util.memConvertToAbstractProblem("400Mi")
            assert p.memLimit._get_value() == util.memConvertToAbstractProblem(
                "400Mi")
            return
    raise ValueError("Could not find service loded")
Esempio n. 23
0
def test_anydeployment_interrupted_fromfiles():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DEPLOYMENT).read())
    k._build_state()
    mark_excluded_service(k.state_objects)
    print("------Objects before solver processing------")
    print_objects(k.state_objects)
    p = NodeInterupted(k.state_objects)
    p.run(timeout=6600, sessionName="test_anydeployment_interrupted_fromfiles")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print("------Objects after solver processing------")
    print(Scenario(p.plan).asyaml())
    print_objects(k.state_objects)
Esempio n. 24
0
def test_load_load_create_exeption():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DEPLOYMENT).read())
    try:
        k._build_state()
    except AssertionError as e:
        print(str(e))
        assert str(
            e
        ) == "Error from server (AlreadyExists): deployments.apps \"redis-master\" already exists"
    objects = filter(lambda x: isinstance(x, Deployment), k.state_objects)
    for p in objects:
        if p.metadata_name == "redis-master":
            return
    raise ValueError("Could not find service loded")
Esempio n. 25
0
def test_load_create():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.apply_resource(open(TEST_DAEMONSET_APPLY).read())
    k._build_state()
    objects = filter(lambda x: isinstance(x, DaemonSet), k.state_objects)
    for p in objects:
        if p.metadata_name == "fluentd-elasticsearch":
            assert len(util.objDeduplicatorByName(p.podList._get_value())) == 2
            assert p.cpuRequest._get_value(
            ) == util.cpuConvertToAbstractProblem("10m")
            assert p.memRequest._get_value(
            ) == util.memConvertToAbstractProblem("10Mi")
            assert p.memLimit._get_value() == util.memConvertToAbstractProblem(
                "10Mi")
            return
    raise ValueError("Could not find service loded")
Esempio n. 26
0
def test_cyclic_load_1():
    k, globalVar, n = prepare_test_single_node_dies_2pod_killed_service_outage()
    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
    k2 = KubernetesCluster()
    for y in yamlState: 
        # print(y)
        k2.load(y)
    k2._build_state()
    globalVar = k2.state_objects[1]
    # print("--- RUN 2 ---")
    
    yamlState2 = convert_space_to_yaml(k2.state_objects, wrap_items=True)
    # for y in yamlState2:
        # print(y)

    ys1 = ''.join([i for i in repr(yamlState) if not i.isdigit()])
    ys2 = ''.join([i for i in repr(yamlState2) if not i.isdigit()])

    assert ys1 == ys2
Esempio n. 27
0
def test_OptimisticRun():
    k = KubernetesCluster()
    k.load(open(NODE1).read())
    k.load(open(NODE2).read())
    k.load(open(PODS).read())
    # k.load(open(PODS_PENDING).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())
    k.create_resource(open(DEPLOYMENT_NEW).read())
    k._build_state()
    p = OptimisticRun(
        k.state_objects)  # self.scheduler.status == STATUS_SCHED["Clean"]
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_OptimisticRun")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
Esempio n. 28
0
def run_dir_wo_cli(DUMP_local,CHANGE_local):
    k = KubernetesCluster()
    if not (DUMP_local is None):
        for dump_item in DUMP_local:
            k.load_dir(dump_item)
    if not (CHANGE_local is None):
        for change_item in CHANGE_local:
            k.create_resource(open(change_item).read())
    k._build_state()
    p = OptimisticRun(k.state_objects)
    print("#### run_wo_cli:")
    print("#### print_objects before run: #####")
    print(print_objects(k.state_objects))

    p.run(timeout=999000, sessionName="test_OptimisticRun")
    if not p.plan:
         raise Exception("Could not solve %s" % p.__class__.__name__)

    print("#### print_objects after run: ######")
    print(print_objects(k.state_objects))
def test_pod_cant_start():
    k = KubernetesCluster()
    k.load(open(NODE1).read())  # trim resource, run only one Node
    k.load(open(PODS).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())

    k.create_resource(open(DEPLOYMENT_NEW_WO_PRIO).read())

    k._build_state()
    p = OptimisticRun(
        k.state_objects
    )  # TODO check me, i'd like to run exiction test with killpod execution
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_pod_cant_start")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
Esempio n. 30
0
def test_cyclic_create():
    k, globalVar, n = prepare_test_single_node_dies_2pod_killed_service_outage()
    yamlStateBeforeCreate = convert_space_to_yaml(k.state_objects, wrap_items=True)

    pod_pending_1 = build_pending_pod(3,2,2,n)

    dnew = Deployment()
    dnew.amountOfActivePods = 0
    dnew.spec_replicas = 1
    dnew.podList.add(pod_pending_1) # important to add as we extract status, priority spec from pod

    snew = Service()
    snew.metadata_name = "test-service-new"
    snew.amountOfActivePods = 0
    pod_pending_1.targetService = snew

    create_objects = [dnew, snew]
    yamlCreate = convert_space_to_yaml(create_objects, wrap_items=False, load_logic_support=False)

    # snew.status = STATUS_SERV["Started"]
    k.state_objects.extend([pod_pending_1, dnew, snew])
    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)

    k2 = KubernetesCluster()
    for y in yamlStateBeforeCreate: 
        # print(y)
        k2.load(y)
    for y in yamlCreate:
        k2.load(y, mode=KubernetesCluster.CREATE_MODE)
    k2._build_state()
    globalVar = k2.state_objects[1]
    # print("--- RUN 2 ---")
    
    yamlState2 = convert_space_to_yaml(k2.state_objects, wrap_items=True)
    # for y in yamlState2:
        # print(y)

    assert prepare_yamllist_for_diff(yamlState, ignore_names=True) == \
                    prepare_yamllist_for_diff(yamlState2, ignore_names=True)