Esempio n. 1
0
def update(data=None):
    "Fetch information from currently selected cluster"
    if isinstance(data, io.IOBase):
        data = data.read()
    k = KubernetesCluster()
    if not data:
        global md5_cluster
        result = subprocess.Popen(cluster_md5_sh,
                                  shell=True,
                                  stdout=subprocess.PIPE,
                                  executable='/bin/bash')
        md5_cluster = result.stdout.read().decode('ascii').split()[0]
        assert len(md5_cluster
                   ) == 32, "md5_cluster sum wrong len({0}) not is 32".format(
                       md5_cluster)

        result = subprocess.run(
            ['kubectl', 'get', 'all', '--all-namespaces', '-o=json'],
            stdout=subprocess.PIPE)
        if len(result.stdout) < 100:
            print(result.stdout)
            raise SystemError(
                "Error using kubectl. Make sure `kubectl get pods` is working."
            )
        data = json.loads(result.stdout.decode("utf-8"))
        for item in data["items"]:
            k.load_item(item)

        result = subprocess.run(['kubectl', 'get', 'node', '-o=json'],
                                stdout=subprocess.PIPE)
        if len(result.stdout) < 100:
            raise SystemError(
                "Error using kubectl. Make sure `kubectl get pods` is working."
            )
        data = json.loads(result.stdout.decode("utf-8"))
        for item in data["items"]:
            k.load_item(item)
        result = subprocess.run(['kubectl', 'get', 'pc', '-o=json'],
                                stdout=subprocess.PIPE)
        if len(result.stdout) < 100:
            raise SystemError(
                "Error using kubectl. Make sure `kubectl get pods` is working."
            )
        data = json.loads(result.stdout.decode("utf-8"))
        for item in data["items"]:
            k.load_item(item)
    else:
        for ys in kalc.misc.util.split_yamldumps(data):
            k.load(ys)

    k._build_state()
    global kalc_state_objects
    kalc_state_objects.clear()
    kalc_state_objects.extend(k.state_objects)
    global cluster
    cluster = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
Esempio n. 2
0
def test_convert_node_problem():
    # Initialize scheduler, globalvar
    k = KubernetesCluster()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    globalVar = next(
        filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
    # initial node state
    n = Node()
    n.cpuCapacity = 5
    n.memCapacity = 5
    # Create running pods
    pod_running_1 = build_running_pod(1, 2, 2, n)
    pod_running_2 = build_running_pod(2, 2, 2, n)

    ## Set consumptoin as expected
    n.currentFormalCpuConsumption = 4
    n.currentFormalMemConsumption = 4
    n.amountOfActivePods = 2

    pc = PriorityClass()
    pc.priority = 10
    pc.metadata_name = "high-prio-test"

    # Service to detecte eviction
    s = Service()
    s.metadata_name = "test-service"
    s.amountOfActivePods = 2
    s.status = STATUS_SERV["Started"]

    # our service has multiple pods but we are detecting pods pending issue
    # remove service as we are detecting service outage by a bug above
    pod_running_1.targetService = s
    pod_running_2.targetService = s
    pod_running_1.hasService = True
    pod_running_2.hasService = True
    pod_running_1.priorityClass = pc
    pod_running_2.priorityClass = pc

    d = Deployment()
    d.spec_replicas = 2
    d.amountOfActivePods = 2
    pod_running_1.hasDeployment = True
    pod_running_2.hasDeployment = True
    d.podList.add(pod_running_1)
    d.podList.add(pod_running_2)

    k.state_objects.extend([n, pod_running_1, pod_running_2, s, d, pc])
    k2 = KubernetesCluster()
    for y in convert_space_to_yaml(k.state_objects, wrap_items=True):
        # print(y)
        k2.load(y)
    k2._build_state()


# TODO: test node outage exclusion
Esempio n. 3
0
def test_load_priorityclass_custom_high_priority():
    k = KubernetesCluster()
    k.load(open(TEST_PRIORITYCLASS).read())
    k._build_state()
    found = False
    for pc in filter(lambda x: isinstance(x, PriorityClass), k.state_objects):
        if pc.metadata_name == "high-priority":
            # print(pc.priority, pc.priority._get_value())
            assert pc.priority > 0
            found = True
    assert found
Esempio n. 4
0
def test_load_pods_new():
    k = KubernetesCluster()
    k.load(open(TEST_PRIORITYCLASS).read())
    k.load(open(TEST_PODS).read())
    k._build_state()
    # TODO: check if pod is fully loaded
    pod = k.state_objects[2 + 3]
    assert isinstance(pod, Pod)
    assert len(pod.metadata_labels._get_value()) > 0
    assert pod.status == STATUS_POD["Running"]

    assert k.state_objects
Esempio n. 5
0
def test_load_deployments():
    k = KubernetesCluster()
    k.load(open(TEST_PRIORITYCLASS).read())
    k.load(open(TEST_NODES).read())
    k.load(open(TEST_PODS).read())
    k.load(open(TEST_DEPLOYMENTS).read())
    k._build_state()
    assert k.state_objects
Esempio n. 6
0
def test_cyclic_load_1():
    k, globalVar, n = prepare_test_single_node_dies_2pod_killed_service_outage()
    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
    k2 = KubernetesCluster()
    for y in yamlState: 
        # print(y)
        k2.load(y)
    k2._build_state()
    globalVar = k2.state_objects[1]
    # print("--- RUN 2 ---")
    
    yamlState2 = convert_space_to_yaml(k2.state_objects, wrap_items=True)
    # for y in yamlState2:
        # print(y)

    ys1 = ''.join([i for i in repr(yamlState) if not i.isdigit()])
    ys2 = ''.join([i for i in repr(yamlState2) if not i.isdigit()])

    assert ys1 == ys2
Esempio n. 7
0
def run_wo_cli(DUMP_local,CHANGE_local):
    k = KubernetesCluster()
    if not (DUMP_local is None):
        for dump_item in DUMP_local:
            k.load(open(dump_item).read())
    if not (CHANGE_local is None):
        for change_item in CHANGE_local:
            k.create_resource(open(change_item).read())
    k._build_state()
    p = OptimisticRun(k.state_objects)
    print("#### run_wo_cli:")
    print("#### print_objects before run: ######")
    print(print_objects(k.state_objects))

    p.run(timeout=999000, sessionName="test_OptimisticRun")
    if not p.plan:
         raise Exception("Could not solve %s" % p.__class__.__name__)

    print("#### print_objects after run: ######")
    print(print_objects(k.state_objects))
Esempio n. 8
0
def test_cyclic_create():
    k, globalVar, n = prepare_test_single_node_dies_2pod_killed_service_outage()
    yamlStateBeforeCreate = convert_space_to_yaml(k.state_objects, wrap_items=True)

    pod_pending_1 = build_pending_pod(3,2,2,n)

    dnew = Deployment()
    dnew.amountOfActivePods = 0
    dnew.spec_replicas = 1
    dnew.podList.add(pod_pending_1) # important to add as we extract status, priority spec from pod

    snew = Service()
    snew.metadata_name = "test-service-new"
    snew.amountOfActivePods = 0
    pod_pending_1.targetService = snew

    create_objects = [dnew, snew]
    yamlCreate = convert_space_to_yaml(create_objects, wrap_items=False, load_logic_support=False)

    # snew.status = STATUS_SERV["Started"]
    k.state_objects.extend([pod_pending_1, dnew, snew])
    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)

    k2 = KubernetesCluster()
    for y in yamlStateBeforeCreate: 
        # print(y)
        k2.load(y)
    for y in yamlCreate:
        k2.load(y, mode=KubernetesCluster.CREATE_MODE)
    k2._build_state()
    globalVar = k2.state_objects[1]
    # print("--- RUN 2 ---")
    
    yamlState2 = convert_space_to_yaml(k2.state_objects, wrap_items=True)
    # for y in yamlState2:
        # print(y)

    assert prepare_yamllist_for_diff(yamlState, ignore_names=True) == \
                    prepare_yamllist_for_diff(yamlState2, ignore_names=True) 

    
Esempio n. 9
0
def test_single_node_dies_2pod_killed_service_outage_invload():
    k, globalVar = prepare_test_single_node_dies_2pod_killed_service_outage()
    yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
    k2 = KubernetesCluster()
    for y in yamlState:
        # print(y)
        k2.load(y)
    k2._build_state()
    globalVar = k2.state_objects[1]

    class Task_check_services(Check_services):
        goal = lambda self: globalVar.is_node_disrupted == True \
                                and globalVar.is_service_disrupted == True

    p = Task_check_services(k2.state_objects)
    p.run(timeout=200)
    print_plan(p)
    assert "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
    assert "Initiate_killing_of_Pod_because_of_node_outage" in "\n".join(
        [repr(x) for x in p.plan])
    assert "MarkServiceOutageEvent" in "\n".join([repr(x) for x in p.plan])
Esempio n. 10
0
def reload_cluster_from_yaml(k, create_objects):
    perform_yaml_test = True

    try:
        yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
    except Exception as e:
        print("yaml conertion error",e)
        perform_yaml_test = False
    try:
        yamlCreate = convert_space_to_yaml(create_objects, wrap_items=False, load_logic_support=False)
    except Exception as e:
        print("yaml 2 conertion error",e)
        perform_yaml_test = False
    k2 = KubernetesCluster()
    if perform_yaml_test:
        for y in yamlState:
            k2.load(y)
    if perform_yaml_test:
        for y in yamlCreate:
            k2.load(y, mode=KubernetesCluster.CREATE_MODE)
    k2._build_state()
    return k2
Esempio n. 11
0
def run_wo_cli_step1(DUMP_local,CHANGE_local):
    k = KubernetesCluster()
    if not (DUMP_local is None):
        for dump_item in DUMP_local:
            k.load(open(dump_item).read())
    if not (CHANGE_local is None):
        for change_item in CHANGE_local:
            k.create_resource(open(change_item).read())
    k._build_state()
    pod_running = next(filter(lambda x: isinstance(x, Pod) and x.status == STATUS_POD["Running"], k.state_objects))
    class NewGoal(OptimisticRun):
        goal = lambda self: pod_running.status == STATUS_POD["Killing"]
    p = NewGoal(k.state_objects)
    print("#### run_wo_cli:")
    print("#### print_objects before run: #### ")
    print(print_objects(k.state_objects))

    p.run(timeout=999000, sessionName="test_OptimisticRun")
    if not p.plan:
         raise Exception("Could not solve %s" % p.__class__.__name__)

    print("#### print_objects after run: ####")
    print(print_objects(k.state_objects))
Esempio n. 12
0
def test_OptimisticRun():
    k = KubernetesCluster()
    k.load(open(NODE1).read())
    k.load(open(NODE2).read())
    k.load(open(PODS).read())
    # k.load(open(PODS_PENDING).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())
    k.create_resource(open(DEPLOYMENT_NEW).read())
    k._build_state()
    p = OptimisticRun(
        k.state_objects)  # self.scheduler.status == STATUS_SCHED["Clean"]
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_OptimisticRun")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
def test_pod_cant_start():
    k = KubernetesCluster()
    k.load(open(NODE1).read())  # trim resource, run only one Node
    k.load(open(PODS).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())

    k.create_resource(open(DEPLOYMENT_NEW_WO_PRIO).read())

    k._build_state()
    p = OptimisticRun(
        k.state_objects
    )  # TODO check me, i'd like to run exiction test with killpod execution
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_pod_cant_start")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
def test_start_pod():
    k = KubernetesCluster()
    k.load(open(NODE1).read())
    k.load(open(NODE2).read())
    k.load(open(PODS).read())
    # k.load(open(PODS_PENDING).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())
    k.create_resource(open(DEPLOYMENT_NEW).read())
    k._build_state()

    class PodStart(K8ServiceInterruptSearch):
        goal = lambda self: self.goalFoo()

        def goalFoo(self):
            for pod in filter(lambda x: isinstance(x, mpod.Pod),
                              k.state_objects):
                if pod.status != STATUS_POD["Running"]:
                    return False
            return True

    p = PodStart(
        k.state_objects)  # self.scheduler.status == STATUS_SCHED["Clean"]
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_start_pods")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    assert "StartPod" in p.plan.__str__(
    )  # test for transition from Pending to Running
    pods = filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)
    nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
    for pod in pods:
        assert pod.atNode in nodes._get_value()
def test_start_pod_from_dump():
    k = KubernetesCluster()
    k.load(open(NODE1).read())
    k.load(open(NODE2).read())
    k.load(open(PODS_PENDING).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())
    k._build_state()
    p = QueueLoadCheck(
        k.state_objects)  # self.scheduler.status == STATUS_SCHED["Clean"]
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_start_pod_from_dump")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    assert "StartPod" in p.plan.__str__(
    )  # test for transition from Pending to Running
    pods = filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)
    nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
    for pod in pods:
        assert pod.atNode in nodes._get_value(
        )  # check each pod than each have atNode
Esempio n. 16
0
def test_load_nodes():
    k = KubernetesCluster()
    k.load(open(TEST_NODES).read())
    k._build_state()
    assert k.state_objects
Esempio n. 17
0
def test_load_pods():
    k = KubernetesCluster()
    k.load(open(TEST_PRIORITYCLASS).read())
    k.load(open(TEST_PODS).read())
    k._build_state()
    assert k.state_objects
Esempio n. 18
0
def test_load_deployment():
    k = KubernetesCluster()
    k.load(
        open("./tests/client-cases/criticalhopmanifest_prefixed.yaml").read())
    k._build_state()
    print_objects(k.state_objects)
def test_killpod():
    k = KubernetesCluster()
    k.load(open(NODE1).read())  # trim resource, run only one Node
    k.load(open(PODS).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())

    k.create_resource(open(DEPLOYMENT_NEW).read())

    k._build_state()
    p = OptimisticRun(
        k.state_objects
    )  # TODO check me, i'd like to run exiction test with killpod execution
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_start_pods")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    assert "StartPod" in p.plan.__str__(
    )  # test for transition from Pending to Running
    #get pods only in Running state to check atNode value
    runningPods = filter(
        lambda z: z.status != STATUS_POD["Running"],
        (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
    nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
    for pod in runningPods:
        assert pod.atNode in nodes._get_value(
        )  # check each pod than each have atNode
    killingPods = filter(
        lambda z: z.status != STATUS_POD["Killing"],
        (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
    assert len(killingPods) > 0  # test that some pod Killed