Exemple #1
0
def test_eviction_fromfiles_strictgoal():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    p = SingleGoalEvictionDetect(k.state_objects)
    p.select_target_service()
    p.run(timeout=6600, sessionName="test_eviction_fromfiles_strictgoal")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    if p.plan:
        i = 0
        for a in p.plan:
            i = i + 1
            print(
                i, ":", a.__class__.__name__, "\n",
                yaml.dump(
                    {
                        str(k):
                        repr(v._get_value()) if v else f"NONE_VALUE:{v}"
                        for (k, v) in a.kwargs.items()
                    },
                    default_flow_style=False))
    assert "StartPod" in p.plan.__str__()
def test_killpod():
    k = KubernetesCluster()
    k.load(open(NODE1).read())  # trim resource, run only one Node
    k.load(open(PODS).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())

    k.create_resource(open(DEPLOYMENT_NEW).read())

    k._build_state()
    p = OptimisticRun(
        k.state_objects
    )  # TODO check me, i'd like to run exiction test with killpod execution
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_start_pods")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    assert "StartPod" in p.plan.__str__(
    )  # test for transition from Pending to Running
    #get pods only in Running state to check atNode value
    runningPods = filter(
        lambda z: z.status != STATUS_POD["Running"],
        (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
    nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
    for pod in runningPods:
        assert pod.atNode in nodes._get_value(
        )  # check each pod than each have atNode
    killingPods = filter(
        lambda z: z.status != STATUS_POD["Killing"],
        (filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)))
    assert len(killingPods) > 0  # test that some pod Killed
Exemple #3
0
def test_exclude_regexp_unit():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    mark_excluded(k.state_objects, "Service:front*", skip_check=False)
    for p in filter(lambda x: isinstance(x, Service), k.state_objects):
        if str(p.metadata_name) == "frontend":
            if p.searchable:
                raise ValueError("exclude doesn't work")
Exemple #4
0
def test_simple_load_create_scale():
    k = KubernetesCluster()
    k.load_dir(TEST_DEPLOYMENT_DUMP)
    k.create_resource(open(TEST_DEPLOYMENT1).read())
    k.scale(5, "deployment/redis-master deployment/redis-master1")

    objects = filter(lambda x: isinstance(x, Deployment), k.state_objects)
    for p in objects:
        if len(p.podList) != 5:
            raise ValueError("Scale doesn't work")
def test_load_requests():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    objects = filter(lambda x: isinstance(x, DaemonSet), k.state_objects)
    for p in objects:
        if p.metadata_name == "fluentd-elasticsearch":
            return
    raise ValueError("Could not find service loded")
def test_load_folder_create_labels():
    k = KubernetesCluster()
    # k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    for ds in filter(lambda x: isinstance(x, DaemonSet), k.state_objects):
        if labelFactory.get(
                "k8s-app",
                "fluentd-logging") in ds.metadata_labels._get_value():
            return
    raise Exception("Can not check labels load")
Exemple #7
0
def test_pod_target_attached():
    k = KubernetesCluster()
    k.load_dir(TEST_TARGET_DUMP)
    k.create_resource(open(TEST_TARGET_CREATE).read())
    k._build_state()
    deployments = filter(lambda x: isinstance(x, Deployment), k.state_objects)
    for deployment in deployments:
        if deployment.metadata_name._get_value() == "redis-master-create":
            for pod in util.objDeduplicatorByName(
                    deployment.podList._get_value()):
                assert pod.targetService._get_value() != None
def test_anyservice_interrupted_fromfiles():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    mark_excluded_service(k.state_objects)
    p = AnyServiceInterrupted(k.state_objects)
    print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_anyservice_interrupted_fromfiles")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
def test_queue_status():
    "test length and status of scheduler queue after load"
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    scheduler = next(
        filter(lambda x: isinstance(x, Scheduler), k.state_objects))
    nodes = list(filter(lambda x: isinstance(x, Node), k.state_objects))
    # assert scheduler.queueLength == len(nodes)
    assert scheduler.podQueue._get_value()
    assert scheduler.status == STATUS_SCHED["Changed"]
Exemple #10
0
def test_load_limits():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DEPLOYMENT).read())
    k._build_state()
    objects = filter(lambda x: isinstance(x, Deployment), k.state_objects)
    for p in objects:
        if p.metadata_name == "redis-master" and \
           p.cpuRequest > -1 and \
           p.memRequest > -1 and \
           p.memLimit > -1:
            return
    raise ValueError("Could not find service loded")
def test_limits_for_pods_created():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DAEMONET).read())
    k._build_state()
    objects = filter(lambda x: isinstance(x, Pod), k.state_objects)
    for p in objects:
        if str(p.metadata_name).startswith("fluentd-elasticsearch") and \
            p.cpuRequest > -1 and \
            p.memRequest > -1 and \
            p.memLimit > -1:
            return
    raise ValueError("Could not find service loded")
Exemple #12
0
def test_anydeployment_interrupted_fromfiles():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DEPLOYMENT).read())
    k._build_state()
    mark_excluded_service(k.state_objects)
    print("------Objects before solver processing------")
    print_objects(k.state_objects)
    p = NodeInterupted(k.state_objects)
    p.run(timeout=6600, sessionName="test_anydeployment_interrupted_fromfiles")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print("------Objects after solver processing------")
    print(Scenario(p.plan).asyaml())
    print_objects(k.state_objects)
Exemple #13
0
def test_load_load_create_exeption():
    k = KubernetesCluster()
    k.load_dir(TEST_CLUSTER_FOLDER)
    k.create_resource(open(TEST_DEPLOYMENT).read())
    try:
        k._build_state()
    except AssertionError as e:
        print(str(e))
        assert str(
            e
        ) == "Error from server (AlreadyExists): deployments.apps \"redis-master\" already exists"
    objects = filter(lambda x: isinstance(x, Deployment), k.state_objects)
    for p in objects:
        if p.metadata_name == "redis-master":
            return
    raise ValueError("Could not find service loded")
Exemple #14
0
def test_OptimisticRun():
    k = KubernetesCluster()
    k.load(open(NODE1).read())
    k.load(open(NODE2).read())
    k.load(open(PODS).read())
    # k.load(open(PODS_PENDING).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())
    k.create_resource(open(DEPLOYMENT_NEW).read())
    k._build_state()
    p = OptimisticRun(
        k.state_objects)  # self.scheduler.status == STATUS_SCHED["Clean"]
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_OptimisticRun")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
def test_pod_cant_start():
    k = KubernetesCluster()
    k.load(open(NODE1).read())  # trim resource, run only one Node
    k.load(open(PODS).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())

    k.create_resource(open(DEPLOYMENT_NEW_WO_PRIO).read())

    k._build_state()
    p = OptimisticRun(
        k.state_objects
    )  # TODO check me, i'd like to run exiction test with killpod execution
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_pod_cant_start")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
Exemple #16
0
def run_dir_wo_cli(DUMP_local,CHANGE_local):
    k = KubernetesCluster()
    if not (DUMP_local is None):
        for dump_item in DUMP_local:
            k.load_dir(dump_item)
    if not (CHANGE_local is None):
        for change_item in CHANGE_local:
            k.create_resource(open(change_item).read())
    k._build_state()
    p = OptimisticRun(k.state_objects)
    print("#### run_wo_cli:")
    print("#### print_objects before run: #####")
    print(print_objects(k.state_objects))

    p.run(timeout=999000, sessionName="test_OptimisticRun")
    if not p.plan:
         raise Exception("Could not solve %s" % p.__class__.__name__)

    print("#### print_objects after run: ######")
    print(print_objects(k.state_objects))
Exemple #17
0
def run_wo_cli_step1(DUMP_local,CHANGE_local):
    k = KubernetesCluster()
    if not (DUMP_local is None):
        for dump_item in DUMP_local:
            k.load(open(dump_item).read())
    if not (CHANGE_local is None):
        for change_item in CHANGE_local:
            k.create_resource(open(change_item).read())
    k._build_state()
    pod_running = next(filter(lambda x: isinstance(x, Pod) and x.status == STATUS_POD["Running"], k.state_objects))
    class NewGoal(OptimisticRun):
        goal = lambda self: pod_running.status == STATUS_POD["Killing"]
    p = NewGoal(k.state_objects)
    print("#### run_wo_cli:")
    print("#### print_objects before run: #### ")
    print(print_objects(k.state_objects))

    p.run(timeout=999000, sessionName="test_OptimisticRun")
    if not p.plan:
         raise Exception("Could not solve %s" % p.__class__.__name__)

    print("#### print_objects after run: ####")
    print(print_objects(k.state_objects))
def test_start_pod():
    k = KubernetesCluster()
    k.load(open(NODE1).read())
    k.load(open(NODE2).read())
    k.load(open(PODS).read())
    # k.load(open(PODS_PENDING).read())
    k.load(open(SERVICES).read())
    k.load(open(REPLICASETS).read())
    k.load(open(PRIORITYCLASSES).read())
    k.load(open(DEPLOYMENT).read())
    k.create_resource(open(DEPLOYMENT_NEW).read())
    k._build_state()

    class PodStart(K8ServiceInterruptSearch):
        goal = lambda self: self.goalFoo()

        def goalFoo(self):
            for pod in filter(lambda x: isinstance(x, mpod.Pod),
                              k.state_objects):
                if pod.status != STATUS_POD["Running"]:
                    return False
            return True

    p = PodStart(
        k.state_objects)  # self.scheduler.status == STATUS_SCHED["Clean"]
    # print_objects(k.state_objects)
    p.run(timeout=6600, sessionName="test_start_pods")
    if not p.plan:
        raise Exception("Could not solve %s" % p.__class__.__name__)
    print(Scenario(p.plan).asyaml())
    assert "StartPod" in p.plan.__str__(
    )  # test for transition from Pending to Running
    pods = filter(lambda x: isinstance(x, mpod.Pod), k.state_objects)
    nodes = filter(lambda x: isinstance(x, Node), k.state_objects)
    for pod in pods:
        assert pod.atNode in nodes._get_value()
Exemple #19
0
def test_load_twise_warning():
    k = KubernetesCluster()
    k.create_resource(open(TEST_DEPLOYMENT).read())
    k.create_resource(open(TEST_DEPLOYMENT1).read())