Пример #1
0
def crash_test(string):
    print color.INFO("Opening persistent TCP connection for diagnostics")
    sock_mem.connect((HOST, PORT_MEM))
    get_mem_start()

    print color.HEADER("Initial crash test")
    burst_size = BURST_SIZE * 10

    ARP_burst(burst_size, 0)
    UDP_burst(burst_size, 0)
    ICMP_flood(burst_size, 0)
    httperf(burst_size, 0)
    time.sleep(BURST_INTERVAL)
    return get_mem()
Пример #2
0
def fire_bursts(func, sub_test_name, lead_out=3):
    name_tag = "<" + sub_test_name + ">"
    print color.HEADER(test_name + " initiating " + sub_test_name)
    membase_start = func()
    mem_base = membase_start

    # Track heap behavior
    increases = 0
    decreases = 0
    constant = 0

    for i in range(0, BURST_COUNT):
        print color.INFO(name_tag), " Run ", i + 1
        memi = func()
        if memi > mem_base:
            memincrease = memi - mem_base
            increases += 1
        elif memi == mem_base:
            memincrease = 0
            constant += 1
        else:
            memincrease = 0
            decreases += 1

        # We want to know how much each burst increases memory relative to the last burst
        mem_base = memi

        if memincrease > acceptable_increase:
            print color.WARNING(
                name_tag), "Memory increased by ", memincrease, "b, ", float(
                    memincrease) / BURST_SIZE, "pr. packet \n"
        else:
            print color.OK(name_tag), "Memory increase ", memincrease, "b \n"

        # Memory can decrease, we don't care about that
        # if memincrease > 0:
        #  mem_base += memincrease
    print color.INFO(
        name_tag
    ), "Heap behavior: ", "+", increases, ", -", decreases, ", ==", constant
    print color.INFO(name_tag), "Done. Checking for liveliness"
    if memory_increase(lead_out, membase_start) > acceptable_increase:
        print color.FAIL(sub_test_name + " failed ")
        return False
    print color.PASS(sub_test_name + " succeeded ")
    return True
Пример #3
0
def misc_working(misc_tests):
    global test_count
    test_count += len(misc_tests)
    if len(misc_tests) == 0:
        return 0

    if ("misc" in args.skip):
        print pretty.WARNING("Misc test skipped")
        return 0

    print pretty.HEADER("Building " + str(len(misc_tests)) + " misc")
    fail_count = 0

    for test in misc_tests:
        build = test.start().wait_status()
        fail_count += 1 if build else 0

    return fail_count
Пример #4
0
def filter_tests(all_tests, arguments):
    """ Will figure out which tests are to be run

    Arguments:
        all_tests (list of Test obj): all processed test objects
        arguments (argument object): Contains arguments from argparse

    returns:
        list: All Test objects that are to be run
    """
    print pretty.HEADER("Filtering tests")

    # Strip trailing slashes from paths
    add_args = [ x.rstrip("/") for x in arguments.tests ]
    skip_args = [ x.rstrip("/") for x in arguments.skip ]

    print pretty.INFO("Tests to run"), ", ".join(add_args)

    # 1) Add tests to be run

    # If no tests specified all are run
    if not add_args:
        tests_added = [ x for x in all_tests if x.type_ in test_types ]
    else:
        tests_added = [ x for x in all_tests
                        if x.type_ in add_args
                        or x.category_ in add_args
                        or x.name_ in add_args ]

    # 2) Remove tests defined by the skip argument
    print pretty.INFO("Tests marked skip on command line"), ", ".join(skip_args)
    skipped_tests = [ x for x in tests_added
                  if x.type_ in skip_args
                  or x.category_ in skip_args
                  or x.name_ in skip_args
                  or x.skip_]

    # Print all the skipped tests
    print_skipped(skipped_tests)

    fin_tests = [ x for x in tests_added if x not in skipped_tests ]
    print pretty.INFO("Accepted tests"), ", ".join([x.name_ for x in fin_tests])

    return fin_tests
Пример #5
0
def misc_working():
    global test_count
    if ("misc" in args.skip):
        print pretty.WARNING("Misc test skipped")
        return 0

    misc_dir = 'misc'
    dirs = os.walk(misc_dir).next()[1]
    dirs.sort()
    print pretty.HEADER("Building " + str(len(dirs)) + " misc")
    test_count += len(dirs)
    fail_count = 0
    for directory in dirs:
        misc = misc_dir + "/" + directory
        print "Building misc ", misc
        build = Test(misc, command = ['./test.sh'], name = directory).start().wait_status()
        run = 0 #TODO: Make a 'test' folder for each miscellanous test, containing test.py, vm.json etc.
        fail_count += 1 if build or run else 0
    return fail_count
Пример #6
0
def stress_test():
    """Perform stresstest"""
    global test_count
    test_count += 1
    if ("stress" in args.skip):
        print pretty.WARNING("Stress test skipped")
        return 0

    if (not validate_tests.validate_test("stress")):
        raise Exception("Stress test failed validation")

    print pretty.HEADER("Starting stress test")
    stress = Test("stress", clean = args.clean).start()

    if (stress and args.fail):
        print pretty.FAIL("Stress test failed")
        sys.exit(stress)

    return 1 if stress.wait_status() else 0
Пример #7
0
def stress_test(stress_tests):
    """Perform stresstest"""
    global test_count
    test_count += len(stress_tests)
    if len(stress_tests) == 0:
        return 0

    if ("stress" in args.skip):
        print pretty.WARNING("Stress test skipped")
        return 0

    if (not validate_tests.validate_test("stress")):
        raise Exception("Stress test failed validation")

    print pretty.HEADER("Starting stress test")
    for test in stress_tests:
        test.start()

    for test in stress_tests:
        return 1 if test.wait_status() else 0
Пример #8
0
def crash_test(string):
    print(color.INFO("Opening persistent TCP connection for diagnostics"))
    sock_mem.connect((HOST, PORT_MEM))
    mem_before = get_mem_start()
    if mem_before <= 0:
        print(color.FAIL("Initial memory reported as " + str(mem_before)))
        return False

    if not heap_verified:
        print(color.FAIL("Heap behavior was not verified as expected. "))
        return False

    print(color.HEADER("Initial crash test"))
    burst_size = BURST_SIZE * 10

    ARP_burst(burst_size, 0)
    UDP_burst(burst_size, 0)
    ICMP_flood(burst_size, 0)
    httperf(burst_size, 0)
    time.sleep(BURST_INTERVAL)
    mem_after = get_mem()
    print(color.INFO("Crash test complete. Memory in use: "), mem_after)
    return mem_after >= memuse_at_start
Пример #9
0
            color.INFO(
                "There are {0} sockets in use, waiting for value to drop below {1}"
                .format(time_wait_proc, socket_limit)))
        time.sleep(7)


# Add custom event-handlers
vm.on_output("Heap functioning as expected", heap_ok)
vm.on_output("Ready to start", crash_test)
vm.on_output("Ready for ARP", ARP)
vm.on_output("Ready for UDP", UDP)
vm.on_output("Ready for ICMP", ICMP)
vm.on_output("Ready for TCP", TCP)
vm.on_output("Ready to end", check_vitals)

if len(sys.argv) > 1:
    thread_timeout = int(sys.argv[1])

if len(sys.argv) > 3:
    BURST_COUNT = int(sys.argv[2])
    BURST_SIZE = int(sys.argv[3])

print(color.HEADER(test_name + " initializing"))
print(color.INFO(name_tag), "configured for ", BURST_COUNT, "bursts of",
      BURST_SIZE, "packets each")

if len(sys.argv) > 4:
    vm.boot(timeout=thread_timeout, image_name=str(sys.argv[4]))
else:
    vm.cmake().boot(thread_timeout).clean()
Пример #10
0
def filter_tests(all_tests, arguments):
    """ Will figure out which tests are to be run

    Arguments:
        all_tests (list of Test obj): all processed test objects
        arguments (argument object): Contains arguments from argparse

    returns:
        tuple: (All Test objects that are to be run, skipped_tests)
    """
    print pretty.HEADER("Filtering tests")

    # Strip trailing slashes from paths
    add_args = [x.rstrip("/") for x in arguments.tests]
    skip_args = [x.rstrip("/") for x in arguments.skip]

    print pretty.INFO("Tests to run"), ", ".join(add_args)

    # 1) Add tests to be run

    # If no tests specified all are run
    if not add_args:
        tests_added = [x for x in all_tests if x.type_ in test_types]
    else:
        tests_added = [
            x for x in all_tests if x.type_ in add_args
            or x.category_ in add_args or x.name_ in add_args
        ]

        # Deal with specific properties
        add_properties = list(
            set(add_args).intersection(all_tests[0].properties_.keys()))
        for test in all_tests:
            for argument in add_properties:
                if test.properties_[argument] and test not in tests_added:
                    tests_added.append(test)

    # 2) Remove tests defined by the skip argument
    print pretty.INFO("Tests marked skip on command line"), ", ".join(
        skip_args)
    skipped_tests = [
        x for x in tests_added if x.type_ in skip_args
        or x.category_ in skip_args or x.name_ in skip_args or x.skip_
    ]

    # Deal with specific properties
    skip_properties = list(
        set(skip_args).intersection(all_tests[0].properties_.keys()))
    for test in tests_added:
        for argument in skip_properties:
            if test.properties_[argument] and test not in skipped_tests:
                test.skip_ = True
                test.skip_reason_ = "Test marked skip on command line"
                skipped_tests.append(test)

    # Print all the skipped tests
    print_skipped(skipped_tests)

    fin_tests = [x for x in tests_added if x not in skipped_tests]
    print pretty.INFO("Accepted tests"), ", ".join(
        [x.name_ for x in fin_tests])

    return (fin_tests, skipped_tests)
Пример #11
0
def integration_tests(tests):
    """ Function that runs the tests that are passed to it.
    Filters out any invalid tests before running

    Arguments:
        tests: List containing test objects to be run

    Returns:
        integer: Number of tests that failed
    """
    if len(tests) == 0:
        return 0

    time_sensitive_tests = [
        x for x in tests if x.properties_["time_sensitive"]
    ]
    tests = [x for x in tests if x not in time_sensitive_tests]

    # Print info before starting to run
    print pretty.HEADER("Starting " + str(len(tests)) + " integration test(s)")
    for test in tests:
        print pretty.INFO("Test"), "starting", test.name_

    if time_sensitive_tests:
        print pretty.HEADER("Then starting " + str(len(time_sensitive_tests)) +
                            " time sensitive integration test(s)")
        for test in time_sensitive_tests:
            print pretty.INFO("Test"), "starting", test.name_

    processes = []
    fail_count = 0
    global test_count
    test_count += len(tests) + len(time_sensitive_tests)

    # Find number of cpu cores
    if args.parallel:
        num_cpus = args.parallel
    else:
        num_cpus = multiprocessing.cpu_count()

# Collect test results
    print pretty.HEADER(
        "Collecting integration test results, on {0} cpu(s)".format(num_cpus))

    # Run a maximum number of parallell tests equal to cpus available
    while tests or processes:  # While there are tests or processes left
        try:
            processes.append(
                tests.pop(0).start())  # Remove test from list after start
        except IndexError:
            pass  # All tests done

        while (len(processes) == num_cpus) or not tests:
            # While there are a maximum of num_cpus to process
            # Or there are no more tests left to start we wait for them
            for p in list(processes):  # Iterate over copy of list
                if p.proc_.poll() is not None:
                    fail_count += 1 if p.wait_status() else 0
                    processes.remove(p)

            time.sleep(1)
            if not processes and not tests:
                break

        # Exit early if any tests failed
        if fail_count and args.fail:
            print pretty.FAIL(str(fail_count) + "integration tests failed")
            sys.exit(fail_count)

    # Start running the time sensitive tests
    for test in time_sensitive_tests:
        process = test.start()
        fail_count += 1 if process.wait_status() else 0
        if fail_count and args.fail:
            print pretty.FAIL(str(fail_count) + "integration tests failed")
            sys.exit(fail_count)

    return fail_count