def grade_problem(self, general_rubric=None, problem_rubric=None): if not self._results_generated(): log.log_error( 'Grading problem {} before results are generated'.format( self.name), self.logging_level) else: if general_rubric is not None: self.rubric = rb.load_rubric(general_rubric, self.rubric, logging_level=self.logging_level) if problem_rubric is not None: self.rubric = rb.load_rubric(problem_rubric, self.rubric, logging_level=self.logging_level) # check that rubric adds up to the correct point value pointsum = 0 for testname in self.rubric[rb.RUBRIC_SECT_TEST]: pointsum = pointsum + self.rubric.getfloat( rb.RUBRIC_SECT_TEST, testname, fallback=0) if abs(pointsum - self.test_max) < .001: log.log_info("Sum of all points in rubric: %.02f" % pointsum, self.logging_level) else: log.log_error( "Total point values in rubric (%.02f) DO NOT MATCH score assigned to problem (%d)!" % (pointsum, self.test_max), self.logging_level) self._grade_compile_result() self.test_count = self._grade_test_result() self._grade_valgrind_result()
def zip_dir(target, file_name, logging_level=log.LOGLEVEL_ERROR): if not exist_dir(target): log.log_info('Directory {} does not exists'.format(target), logging_level) else: file_name = os.path.abspath(file_name) cwd = os.getcwd() os.chdir(target) os.system('zip ' + file_name + ' *') os.chdir(cwd)
def mkdir(target, logging_level=log.LOGLEVEL_ERROR): if exist_dir(target): log.log_info('Directory {} already exists'.format(target), logging_level) return True elif exist_file(target): log.log_error('Expected file {} to be directory'.format(target), logging_level) return False log.log_info('Creating directory {}'.format(target), logging_level) os.mkdir(target) return True
def cmake_problem(problem): # set path to output files problem.compile_file = os.path.join('compile-logs', problem.name + '.complog') stdout_file_path = os.path.join('test-output', problem.name + '-test-stdout.txt') stdout_file = open(stdout_file_path, 'w') # Find tests' output XML file, buried a couple of directories deep xml_path = glob.glob("Testing/*-*/Test.xml") if len(xml_path) == 0: logging_tools.log_error("Cannot find test XML output file!", problem.logging_level) return elif len(xml_path) > 1: logging_tools.log_error( "Multiple candidates for test XML file: " + " ".join(xml_path), problem.logging_level) return logging_tools.log_info("Found XML output file: " + xml_path[0], problem.logging_level) # parse XML file test_xml = open(xml_path[0]) test_results = xmltodict.parse(test_xml.read()) test_xml.close() test_list = [] didnt_run_tests = set() crashed_tests = set() timed_out_tests = set() failed_tests = {} passed_tests = {} # Valgrind exit codes, indexed by test valgrind_exit_codes = [] # now, go through all tests test_results_list_element = test_results['Site']['Testing']['Test'] for test_results_element in test_results_list_element: if problem.name in test_results_element['Path']: test_name = test_results_element['Name'] #print("\n>> Processing test: " + test_name) # write test results to output file stdout_file.write(""" ------------------------------------------------------------------------------ OUTPUT OF TEST %s: ------------------------------------------------------------------------------ """ % test_name) stdout_file.write( test_results_element['Results']['Measurement']['Value']) # detect Valgrind failues # note: we want to assign seperate deductions for Valgrind failures and actual test case failures. So, # we can't use Valgrind's --error-exitcode option, since that would make CTest think all the tests had failed. valgrind_error = False match_list = valgrind_results_re.findall( test_results_element['Results']['Measurement']['Value']) if match_list is None or len(match_list) < 1: # program may have died before it got to even producing the valgrind output, or it's a test that doesn't use Valgrind pass else: # make sure to grab the last match in case a student tries to defeat this by printing a fake valgrind summary definitely_lost = int(match_list[-1][0].replace(",", "")) indirectly_lost = int(match_list[-1][1].replace(",", "")) possibly_lost = int(match_list[-1][2].replace(",", "")) still_reachable = int(match_list[-1][3].replace(",", "")) suppressed = int(match_list[-1][4].replace(",", "")) # print("Valgrind Results: definitely_lost: %d, indirectly_lost: %d, possibly_lost: %d, still_reachable: %d, suppressed: %d" % (definitely_lost, indirectly_lost, possibly_lost, still_reachable, suppressed)) if definitely_lost > 0 or indirectly_lost > 0 or possibly_lost > 0 or still_reachable > 0: valgrind_error = True # now look for "X errors in X contexts" error_match_list = valgrind_errors_re.findall( test_results_element['Results']['Measurement']['Value']) if error_match_list is None or len(error_match_list) < 1: # program may have died before it got to even producing the valgrind output, or it's a test that doesn't use Valgrind pass else: # make sure to grab the last match in case a student tries to defeat this by printing a fake valgrind summary num_errors = int(error_match_list[-1].replace(",", "")) #print("%d valgrind errors" % num_errors) if num_errors > 0: valgrind_error = True if subprocess_valgrind_failure_re.search( test_results_element['Results']['Measurement'] ['Value']) != None: # print("Valgrind errors found in subprocess execution!") valgrind_error = True # now, parse out the test status passed = test_results_element['@Status'] == 'passed' # true if the test was not run (as in, it failed to build) didnt_run = False # true if the test dies with a segfault/sigfpe/etc crashed = False # true if test timed out timed_out = False test_time = -1.0 if type(test_results_element['Results'] ['NamedMeasurement']) != list: # if the test did not run because the excutable was not found, there might not be the usual set of NamedMeasurements didnt_run = True elif test_results_element["@Status"] == "notrun": didnt_run = True else: # iterate through namedmeasurements for measurement_element in test_results_element['Results'][ 'NamedMeasurement']: if measurement_element['@name'] == 'Execution Time': test_time = float(measurement_element['Value']) if not passed and measurement_element[ '@name'] == 'Exit Code': if measurement_element['Value'] == 'Timeout': timed_out = True elif measurement_element['Value'] != 'Failed': # if exit code is something other than Failed it means that the test crashed crashed = True if not passed and measurement_element[ '@name'] == 'Exit Value': if int(measurement_element['Value']) == 127: # Valgrind returns this to indicate that the test was not run didnt_run = True # print("crashed: %r, passed: %r" % (crashed, passed)) # write test data to collections test_list.append(test_name) if crashed: crashed_tests.add(test_name) elif timed_out: timed_out_tests.add(test_name) elif didnt_run: didnt_run_tests.add(test_name) elif passed: passed_tests[test_name] = test_time else: failed_tests[test_name] = test_time # figure out what the Valgrind exit code should have been for this test if valgrind_error: valgrind_exit_codes.append(executable_tools.VALGRIND_ERROR) elif crashed: valgrind_exit_codes.append(executable_tools.EXE_ERROR) else: valgrind_exit_codes.append(executable_tools.VALGRIND_SUCCESS) stdout_file.close() # write test result files cs_grading.write_test_result(problem.result_file, test_list, didnt_run_tests, timed_out_tests, crashed_tests, failed_tests, passed_tests, logging_level=problem.logging_level) cs_grading.write_formatted_result(problem.formatted_file, test_list, valgrind_exit_codes, didnt_run_tests, timed_out_tests, crashed_tests, failed_tests, passed_tests, logging_level=problem.logging_level)
def run_executable(executable_path, **kwargs): if kwargs is None: kwargs = dict() extra_arguments = kwargs.get('extra_arguments', list()) use_valgrind = kwargs.get('use_valgrind', False) valgrind_file = kwargs.get('valgrind_file', None) timeout = kwargs.get('timeout', None) logging_level = kwargs.get('logging_level', log.LOGLEVEL_ERROR) logging_force_suppressed = kwargs.get('logging_force_suppressed', False) killed = EXE_ERROR utime = EXE_ERROR retcode = EXE_ERROR error = False args = [] if use_valgrind: if valgrind_file is None: log.log_warning( 'valgrind turned on but no valgrind log file speficied', logging_level) temp_valgrind_log = 'temp_valgrind_log.txt' args.extend(VALGRIND) args.append('--log-file=' + temp_valgrind_log) redirected_stdin_file = None redirected_stdout_mode = None redirected_stdout_file = None redirected_stderr_mode = None redirected_stderr_file = None i = 0 while i < len(extra_arguments): if extra_arguments[i] == '<': # redirect input if i + 1 >= len(extra_arguments): log.log_error('Found input redirection with no input file', logging_level) error = True break else: redirected_stdin_file = extra_arguments[i + 1] del extra_arguments[i + 1] del extra_arguments[i] elif extra_arguments[i] == '>' or extra_arguments[i] == '>>': # redirect output if extra_arguments[i] == '>': redirected_stdout_mode = 'w' else: redirected_stdout_mode = 'a' if i + 1 >= len(extra_arguments): log.log_error('Found output redirection with no output file', logging_level) error = True break else: redirected_stdout_file = extra_arguments[i + 1] del extra_arguments[i + 1] del extra_arguments[i] elif extra_arguments[i] == '2>' or extra_arguments[i] == '2>>': # redirect output if extra_arguments[i] == '2>': redirected_stderr_mode = 'w' else: redirected_stderr_mode = 'a' if i + 1 >= len(extra_arguments): log.log_error('Found error redirection with no output file', logging_level) error = True break else: redirected_stderr_file = extra_arguments[i + 1] del extra_arguments[i + 1] del extra_arguments[i] else: i += 1 if not error: extra_arguments = [executable_path] + extra_arguments if not logging_force_suppressed: log.log_info('Running ' + ' '.join(extra_arguments), log.LOGLEVEL_INFO) args.extend(extra_arguments) if redirected_stdout_file is not None: redirected_stdout_file = open(redirected_stdout_file, redirected_stdout_mode) if redirected_stderr_file is not None: redirected_stderr_file = open(redirected_stderr_file, redirected_stderr_mode) if redirected_stdin_file is not None: redirected_stdin_file = open(redirected_stdin_file, 'r') try: start_time = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime proc = subprocess.Popen(args, stdout=redirected_stdout_file, stderr=redirected_stderr_file, stdin=redirected_stdin_file) proc.communicate(timeout=timeout) killed = EXE_SUCCESS retcode = proc.returncode except subprocess.TimeoutExpired: log.log_warning('Executable {} timed out'.format(executable_path), logging_level) killed = EXE_TIMEOUT proc.kill() except OSError: log.log_warning('Executable {} not found'.format(executable_path), logging_level) killed = EXE_ERROR end_time = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime utime = end_time - start_time if redirected_stdin_file is not None: redirected_stdin_file.close() if redirected_stdout_file is not None: redirected_stdout_file.close() if redirected_stderr_file is not None: redirected_stderr_file.close() if use_valgrind: if valgrind_file is not None: sysio.write_file_contents(valgrind_file, temp_valgrind_log, logging_level=logging_level) sysio.write_message(valgrind_file, '\n\n') sysio.clean_file(temp_valgrind_log) return killed, utime, retcode