def makefun(test_case, solution_file): if generate_solutions: # write solution file to disk return lambda grades: test_case.write_solution(module_dict, solution_file) else: # read in solution dictionary and pass as an argument test_dict = test_parser.TestParser(test_file).parse() solution_dict = test_parser.TestParser(solution_file).parse() if print_test_case: return lambda grades: print_test(test_dict, solution_dict) or test_case.execute(grades, module_dict, solution_dict) else: return lambda grades: test_case.execute(grades, module_dict, solution_dict)
def get_depends(test_parser, test_root, question): all_deps = [question] question_dict = test_parser.TestParser(os.path.join(test_root, question, 'CONFIG')).parse() if 'depends' in question_dict: depends = question_dict['depends'].split() for d in depends: # run dependencies first all_deps = get_depends(test_parser, test_root, d) + all_deps return all_deps
def get_test_subdirs(test_parser, test_root, question_to_grade): problem_dict = test_parser.TestParser(os.path.join(test_root, 'CONFIG')).parse() if question_to_grade != None: questions = get_depends(test_parser, test_root, question_to_grade) if len(questions) > 1: print('Note: due to dependencies, the following tests will be run: %s' % ' '.join(questions)) return questions if 'order' in problem_dict: return problem_dict['order'].split() return sorted(os.listdir(test_root))
def run_test(test_name, module_dict, print_test_case=False, display=None): import test_parser import test_classes for module in module_dict: setattr(sys.modules[__name__], module, module_dict[module]) test_dict = test_parser.TestParser(test_name + ".test").parse() solution_dict = test_parser.TestParser(test_name + ".solution").parse() test_out_file = os.path.join('%s.test_output' % test_name) test_dict['test_out_file'] = test_out_file test_class = getattr(project_test_classes, test_dict['class']) question_class = getattr(test_classes, 'Question') question = question_class({'max_points': 0}, display) test_case = test_class(question, test_dict) if print_test_case: print_test(test_dict, solution_dict) # This is a fragile hack to create a stub grades object grades = grading.Grades(project_params.PROJECT_NAME, [(None, 0)]) test_case.execute(grades, module_dict, solution_dict)
def evaluate(generate_solutions, test_root, module_dict, exception_map=ERROR_HINT_MAP, edx_output=False, mute_output=False, print_test_case=False, question_to_grade=None, display=None): # imports of testbench code. note that the test_classes import must follow # the import of student code due to dependencies import test_parser import test_classes for module in module_dict: setattr(sys.modules[__name__], module, module_dict[module]) questions = [] question_dicts = {} test_subdirs = get_test_subdirs(test_parser, test_root, question_to_grade) for q in test_subdirs: subdir_path = os.path.join(test_root, q) if not os.path.isdir(subdir_path) or q[0] == '.': continue # create a question object question_dict = test_parser.TestParser( os.path.join(subdir_path, 'CONFIG')).parse() question_class = getattr(test_classes, question_dict['class']) question = question_class(question_dict, display) question_dicts[q] = question_dict # load test cases into question tests = filter(lambda t: re.match('[^#~.].*\.test\Z', t), os.listdir(subdir_path)) tests = map(lambda t: re.match('(.*)\.test\Z', t).group(1), tests) for t in sorted(tests): test_file = os.path.join(subdir_path, '%s.test' % t) solution_file = os.path.join(subdir_path, '%s.solution' % t) test_out_file = os.path.join(subdir_path, '%s.test_output' % t) test_dict = test_parser.TestParser(test_file).parse() if test_dict.get("disabled", "false").lower() == "true": continue test_dict['test_out_file'] = test_out_file test_class = getattr(project_test_classes, test_dict['class']) test_case = test_class(question, test_dict) def makefun(test_case, solution_file): if generate_solutions: # write solution file to disk return lambda grades: test_case.write_solution( module_dict, solution_file) else: # read in solution dictionary and pass as an argument test_dict = test_parser.TestParser(test_file).parse() solution_dict = test_parser.TestParser( solution_file).parse() if print_test_case: return lambda grades: print_test( test_dict, solution_dict) or test_case.execute( grades, module_dict, solution_dict) else: return lambda grades: test_case.execute( grades, module_dict, solution_dict) question.add_test_case(test_case, makefun(test_case, solution_file)) # Note extra function is necessary for scoping reasons def makefun(question): return lambda grades: question.execute(grades) setattr(sys.modules[__name__], q, makefun(question)) questions.append((q, question.get_max_points())) grades = grading.Grades(project_params.PROJECT_NAME, questions, edx_output=edx_output, mute_output=mute_output) if question_to_grade == None: for q in question_dicts: for prereq in question_dicts[q].get('depends', '').split(): grades.add_prereq(q, prereq) grades.grade(sys.modules[__name__], bonus_pic=project_params.BONUS_PIC) return grades.points
#!/usr/bin/python3 import test_lexer import test_parser if __name__ == "__main__": print( '============================================================================' ) test_lexer.TestLexer() print( '============================================================================' ) test_parser.TestParser() print( '============================================================================' )