コード例 #1
0
def rotate(problem):
    output_dir = '../rotate_output/'
    source_dir = 'rotate_test/'
    target_dir = '../rotate_target/'

    cs.mkdir_empty(output_dir, logging_level=problem.logging_level)
    cs.mkdir_empty(target_dir, logging_level=problem.logging_level)

    test_sorted = sorted(os.listdir(source_dir))
    test_sorted = sorted(test_sorted, key=lambda fn: len(fn))
    test_sorted = [
        x.replace('.cpp', '') for x in test_sorted if x.find('.cpp') != -1
    ]

    norun, timeout, crashed, failed, passed = cs.run_gtest_in_dir(
        test_sorted,
        source_dir,
        output_dir,
        target_dir,
        compile_flags=problem.compile_flags,
        compile_file=problem.compile_file,
        use_valgrind=problem.use_valgrind,
        valgrind_file=problem.valgrind_file,
        timeout=problem.timeout,
        logging_level=problem.logging_level)

    cs.write_test_result(problem.result_file,
                         test_sorted,
                         norun,
                         timeout,
                         crashed,
                         failed,
                         passed,
                         logging_level=problem.logging_level)
    cs.write_formatted_result(problem.formatted_file,
                              test_sorted,
                              norun,
                              timeout,
                              crashed,
                              failed,
                              passed,
                              logging_level=problem.logging_level)

    if problem.remove_output:
        cs.mkdir_empty(output_dir, logging_level=problem.logging_level)
        cs.mkdir_empty(target_dir, logging_level=problem.logging_level)
        cs.remove_dir(output_dir)
        cs.remove_dir(target_dir)
コード例 #2
0
ファイル: cmake_problem.py プロジェクト: jacksals/hw-jacksals
def cmake_problem(problem):
    # set path to output files
    problem.compile_file = os.path.join('compile-logs',
                                        problem.name + '.complog')
    stdout_file_path = os.path.join('test-output',
                                    problem.name + '-test-stdout.txt')

    stdout_file = open(stdout_file_path, 'w')

    # Find tests' output XML file, buried a couple of directories deep
    xml_path = glob.glob("Testing/*-*/Test.xml")
    if len(xml_path) == 0:
        logging_tools.log_error("Cannot find test XML output file!",
                                problem.logging_level)
        return

    elif len(xml_path) > 1:
        logging_tools.log_error(
            "Multiple candidates for test XML file: " + " ".join(xml_path),
            problem.logging_level)
        return

    logging_tools.log_info("Found XML output file: " + xml_path[0],
                           problem.logging_level)

    # parse XML file
    test_xml = open(xml_path[0])
    test_results = xmltodict.parse(test_xml.read())
    test_xml.close()

    test_list = []
    didnt_run_tests = set()
    crashed_tests = set()
    timed_out_tests = set()

    failed_tests = {}
    passed_tests = {}

    # Valgrind exit codes, indexed by test
    valgrind_exit_codes = []

    # now, go through all tests
    test_results_list_element = test_results['Site']['Testing']['Test']
    for test_results_element in test_results_list_element:
        if problem.name in test_results_element['Path']:
            test_name = test_results_element['Name']

            #print("\n>> Processing test: " + test_name)

            # write test results to output file
            stdout_file.write("""
------------------------------------------------------------------------------
OUTPUT OF TEST %s:
------------------------------------------------------------------------------
""" % test_name)
            stdout_file.write(
                test_results_element['Results']['Measurement']['Value'])

            # detect Valgrind failues
            # note: we want to assign seperate deductions for Valgrind failures and actual test case failures.  So,
            # we can't use Valgrind's --error-exitcode option, since that would make CTest think all the tests had failed.
            valgrind_error = False
            match_list = valgrind_results_re.findall(
                test_results_element['Results']['Measurement']['Value'])

            if match_list is None or len(match_list) < 1:
                # program may have died before it got to even producing the valgrind output, or it's a test that doesn't use Valgrind
                pass

            else:
                # make sure to grab the last match in case a student tries to defeat this by printing a fake valgrind summary
                definitely_lost = int(match_list[-1][0].replace(",", ""))
                indirectly_lost = int(match_list[-1][1].replace(",", ""))
                possibly_lost = int(match_list[-1][2].replace(",", ""))
                still_reachable = int(match_list[-1][3].replace(",", ""))
                suppressed = int(match_list[-1][4].replace(",", ""))

                # print("Valgrind Results: definitely_lost: %d, indirectly_lost: %d, possibly_lost: %d, still_reachable: %d, suppressed: %d" % (definitely_lost, indirectly_lost, possibly_lost, still_reachable, suppressed))

                if definitely_lost > 0 or indirectly_lost > 0 or possibly_lost > 0 or still_reachable > 0:
                    valgrind_error = True

            # now look for "X errors in X contexts"
            error_match_list = valgrind_errors_re.findall(
                test_results_element['Results']['Measurement']['Value'])

            if error_match_list is None or len(error_match_list) < 1:
                # program may have died before it got to even producing the valgrind output, or it's a test that doesn't use Valgrind
                pass

            else:
                # make sure to grab the last match in case a student tries to defeat this by printing a fake valgrind summary
                num_errors = int(error_match_list[-1].replace(",", ""))
                #print("%d valgrind errors" % num_errors)

                if num_errors > 0:
                    valgrind_error = True

            if subprocess_valgrind_failure_re.search(
                    test_results_element['Results']['Measurement']
                ['Value']) != None:
                # print("Valgrind errors found in subprocess execution!")
                valgrind_error = True

            # now, parse out the test status
            passed = test_results_element['@Status'] == 'passed'

            # true if the test was not run (as in, it failed to build)
            didnt_run = False

            # true if the test dies with a segfault/sigfpe/etc
            crashed = False

            # true if test timed out
            timed_out = False

            test_time = -1.0

            if type(test_results_element['Results']
                    ['NamedMeasurement']) != list:
                # if the test did not run because the excutable was not found, there might not be the usual set of NamedMeasurements
                didnt_run = True
            elif test_results_element["@Status"] == "notrun":
                didnt_run = True
            else:

                # iterate through namedmeasurements
                for measurement_element in test_results_element['Results'][
                        'NamedMeasurement']:
                    if measurement_element['@name'] == 'Execution Time':
                        test_time = float(measurement_element['Value'])

                    if not passed and measurement_element[
                            '@name'] == 'Exit Code':

                        if measurement_element['Value'] == 'Timeout':
                            timed_out = True

                        elif measurement_element['Value'] != 'Failed':
                            # if exit code is something other than Failed it means that the test crashed
                            crashed = True

                    if not passed and measurement_element[
                            '@name'] == 'Exit Value':

                        if int(measurement_element['Value']) == 127:
                            # Valgrind returns this to indicate that the test was not run
                            didnt_run = True

            # print("crashed: %r, passed: %r" % (crashed, passed))

            # write test data to collections
            test_list.append(test_name)
            if crashed:
                crashed_tests.add(test_name)
            elif timed_out:
                timed_out_tests.add(test_name)
            elif didnt_run:
                didnt_run_tests.add(test_name)
            elif passed:
                passed_tests[test_name] = test_time
            else:
                failed_tests[test_name] = test_time

            # figure out what the Valgrind exit code should have been for this test
            if valgrind_error:
                valgrind_exit_codes.append(executable_tools.VALGRIND_ERROR)
            elif crashed:
                valgrind_exit_codes.append(executable_tools.EXE_ERROR)
            else:
                valgrind_exit_codes.append(executable_tools.VALGRIND_SUCCESS)

    stdout_file.close()

    # write test result files
    cs_grading.write_test_result(problem.result_file,
                                 test_list,
                                 didnt_run_tests,
                                 timed_out_tests,
                                 crashed_tests,
                                 failed_tests,
                                 passed_tests,
                                 logging_level=problem.logging_level)
    cs_grading.write_formatted_result(problem.formatted_file,
                                      test_list,
                                      valgrind_exit_codes,
                                      didnt_run_tests,
                                      timed_out_tests,
                                      crashed_tests,
                                      failed_tests,
                                      passed_tests,
                                      logging_level=problem.logging_level)
コード例 #3
0
def hypercube(problem):
    test = 'permitted'
    start = 'hypercube_test/start.txt'
    input_dir = 'hypercube_test/'
    sol_dir = 'hypercube_sol/'
    output_dir = '../hypercube_output/'
    target = 'hypercube'
    makefile_dir = '../../'

    if not cs.exist_file(start):
        cs.log_error('Failed to open start node file', problem.logging_level)
        return

    cs.mkdir_empty(output_dir, logging_level=problem.logging_level)

    test_names = sorted(os.listdir(input_dir))
    test_names = sorted(test_names, key=lambda fn: len(fn))
    test_names = [
        x.replace('.txt', '') for x in test_names if x.find(test) != -1
    ]

    with open(start) as start_file:
        start_nodes = [x.strip() for x in start_file.readlines()]

    if len(test_names) != len(start_nodes):
        cs.log_error('Found {} permitted files but {} start nodes'.format(
            str(len(test_names)), str(len(start_nodes))),
                     logging_level=problem.logging_level)
        return

    input_files = [input_dir + x + '.txt' for x in test_names]
    output_files = [output_dir + x + '.txt' for x in test_names]
    solution_files = [sol_dir + x + '.txt' for x in test_names]

    test_args = list()
    for i in range(len(test_names)):
        cs.clean_file(output_files[i])
        test_args.append(
            [start_nodes[i], input_files[i], '>', output_files[i]])

    cs.make_code(make_command=target,
                 makefile_dir=makefile_dir,
                 compile_file=problem.compile_file)
    cs.move_file(makefile_dir + target, './')
    target = cs.fix_executable_path(target)

    norun, timeout, finished = cs.run_tests_in_list(
        target,
        test_names,
        test_args,
        use_valgrind=problem.use_valgrind,
        valgrind_file=problem.valgrind_file,
        timeout=problem.timeout,
        logging_level=problem.logging_level)

    failed, passed = cs.check_test_output(
        test_names,
        solution_files,
        output_files,
        problem.result_file,
        ordered_compare=True,
        skip_white_space=True,
        detailed_results=problem.detailed_results,
        logging_level=problem.logging_level)

    failed, passed = cs.get_test_runtime(finished, failed, passed)
    crashed = set()

    cs.write_test_result(problem.result_file,
                         test_names,
                         norun,
                         timeout,
                         crashed,
                         failed,
                         passed,
                         final_log=True,
                         logging_level=problem.logging_level)
    cs.write_formatted_result(problem.formatted_file,
                              test_names,
                              norun,
                              timeout,
                              crashed,
                              failed,
                              passed,
                              logging_level=problem.logging_level)

    if problem.remove_output:
        cs.mkdir_empty(output_dir, logging_level=problem.logging_level)
        cs.remove_dir(output_dir)
        cs.remove_file(target)