Esempio n. 1
0
def runTest(testName, moduleDict, printTestCase=False, display=None):
    import testParser
    import testClasses
    for module in moduleDict:
        setattr(sys.modules[__name__], module, moduleDict[module])

    testDict = testParser.TestParser(testName + ".test").parse()
    solutionDict = testParser.TestParser(testName + ".solution").parse()
    test_out_file = os.path.join('%s.test_output' % testName)
    testDict['test_out_file'] = test_out_file
    testClass = getattr(projectTestClasses, testDict['class'])

    questionClass = getattr(testClasses, 'Question')
    question = questionClass({'max_points': 0}, display)
    testCase = testClass(question, testDict)

    if printTestCase:
        printTest(testDict, solutionDict)

    # This is a fragile hack to create a stub grades object
    grades = grading.Grades(projectParams.PROJECT_NAME, [(None, 0)])
    testCase.execute(grades, moduleDict, solutionDict)
Esempio n. 2
0
def runTest(testName, moduleDict, printTestCase=False):
    import testParser
    import testClasses

    for module in moduleDict:
        setattr(sys.modules[__name__], module, moduleDict[module])

    # This is a hack, will break if tests check question without testing for None
    question = None

    testDict = testParser.TestParser(testName + ".test").parse()
    solutionDict = testParser.TestParser(testName + ".solution").parse()
    test_out_file = os.path.join("%s.test_output" % testName)
    testDict["test_out_file"] = test_out_file
    testClass = getattr(projectTestClasses, testDict["class"])
    testCase = testClass(question, testDict)

    if printTestCase:
        printTest(testDict, solutionDict)

    # This is a fragile hack to create a stub grades object
    grades = grading.Grades(projectParams.PROJECT_NAME, [(None, 0)])
    testCase.execute(grades, moduleDict, solutionDict)
def evaluate(generateSolutions,
             testRoot,
             moduleDict,
             exceptionMap=ERROR_HINT_MAP,
             edxOutput=False,
             muteOutput=False,
             gsOutput=False,
             printTestCase=False,
             questionToGrade=None,
             display=None):
    # imports of testbench code.  note that the testClasses import must follow
    # the import of student code due to dependencies
    import testParser
    import testClasses
    for module in moduleDict:
        setattr(sys.modules[__name__], module, moduleDict[module])

    questions = []
    questionDicts = {}
    test_subdirs = getTestSubdirs(testParser, testRoot, questionToGrade)
    for q in test_subdirs:
        subdir_path = os.path.join(testRoot, q)
        if not os.path.isdir(subdir_path) or q[0] == '.':
            continue

        # create a question object
        questionDict = testParser.TestParser(
            os.path.join(subdir_path, 'CONFIG')).parse()
        questionClass = getattr(testClasses, questionDict['class'])
        question = questionClass(questionDict, display)
        questionDicts[q] = questionDict

        # load test cases into question
        tests = filter(lambda t: re.match('[^#~.].*\.test\Z', t),
                       os.listdir(subdir_path))
        tests = map(lambda t: re.match('(.*)\.test\Z', t).group(1), tests)
        for t in sorted(tests):
            test_file = os.path.join(subdir_path, '%s.test' % t)
            solution_file = os.path.join(subdir_path, '%s.solution' % t)
            test_out_file = os.path.join(subdir_path, '%s.test_output' % t)
            testDict = testParser.TestParser(test_file).parse()
            if testDict.get("disabled", "false").lower() == "true":
                continue
            testDict['test_out_file'] = test_out_file
            testClass = getattr(projectTestClasses, testDict['class'])
            testCase = testClass(question, testDict)

            def makefun(testCase, solution_file):
                if generateSolutions:
                    # write solution file to disk
                    return lambda grades: testCase.writeSolution(
                        moduleDict, solution_file)
                else:
                    # read in solution dictionary and pass as an argument
                    testDict = testParser.TestParser(test_file).parse()
                    solutionDict = testParser.TestParser(solution_file).parse()
                    if printTestCase:
                        return lambda grades: printTest(
                            testDict, solutionDict) or testCase.execute(
                                grades, moduleDict, solutionDict)
                    else:
                        return lambda grades: testCase.execute(
                            grades, moduleDict, solutionDict)

            question.addTestCase(testCase, makefun(testCase, solution_file))

        # Note extra function is necessary for scoping reasons
        def makefun(question):
            return lambda grades: question.execute(grades)

        setattr(sys.modules[__name__], q, makefun(question))
        questions.append((q, question.getMaxPoints()))

    grades = grading.Grades(projectParams.PROJECT_NAME,
                            questions,
                            gsOutput=gsOutput,
                            edxOutput=edxOutput,
                            muteOutput=muteOutput)
    if questionToGrade == None:
        for q in questionDicts:
            for prereq in questionDicts[q].get('depends', '').split():
                grades.addPrereq(q, prereq)

    grades.grade(sys.modules[__name__], bonusPic=projectParams.BONUS_PIC)
    return grades.points
Esempio n. 4
0
    #sys.exit(2)
    sys.argv[1] = '-'
  if sys.argv[1] != '-': sys.stdout = file(sys.argv[1], 'w')
  import grading
  
  import pacman, time, layout, textDisplay
  textDisplay.SLEEP_TIME = 0
  textDisplay.DRAW_EVERY = 1000

  # Our code
  import solution.searchAgents as searchAgentsSol
  import solution.search as searchSol

  # Their code
  import searchAgents, search

  questions = [
          ('q1', 2), 
          #('q2', 1), 
          #('q3', 2), 
          #('q4', 3), 
          #('q5', 2), 
          #('q6', 3), 
          #('q7', 5), 
          #('q8', 2), 
          #('extra', 0)
          ]
  grades = grading.Grades('Project 1: Search', questions)
  grades.grade(sys.modules[__name__], )
  sys.stdout.close()
def evaluate(generate_solutions,
             test_root,
             module_dict,
             edx_output=False,
             mute_output=False,
             gs_output=False,
             print_test_case=False,
             question_to_grade=None,
             display=None,
             student_code=None,
             just_lint=False):
    """Evaluate student code."""
    # imports of testbench code.  note that the test_classes import must follow
    # the import of student code due to dependencies
    import test_parser
    import test_classes
    for module in module_dict:
        setattr(sys.modules[__name__], module, module_dict[module])

    questions = []
    question_dicts = {}
    test_subdirs = get_test_subdirs(test_parser, test_root, question_to_grade)
    for q in test_subdirs:
        subdir_path = os.path.join(test_root, q)
        if not os.path.isdir(subdir_path) or q[0] == '.':
            continue

        # create a question object
        question_dict = test_parser.TestParser(
            os.path.join(subdir_path, 'CONFIG')).parse()
        question_class = getattr(test_classes, question_dict['class'])
        question = question_class(question_dict, display)
        question_dicts[q] = question_dict

        # load test cases into question
        tests = [
            t for t in os.listdir(subdir_path)
            if re.match('[^#~.].*\.test\Z', t)
        ]
        tests = [re.match('(.*)\.test\Z', t).group(1) for t in tests]
        for t in sorted(tests):
            test_file = os.path.join(subdir_path, '%s.test' % t)
            solution_file = os.path.join(subdir_path, '%s.solution' % t)
            test_out_file = os.path.join(subdir_path, '%s.test_output' % t)
            test_dict = test_parser.TestParser(test_file).parse()
            if test_dict.get("disabled", "false").lower() == "true":
                continue
            test_dict['test_out_file'] = test_out_file
            test_class = getattr(project_test_classes, test_dict['class'])
            test_case = test_class(question, test_dict)

            def makefun(test_case, solution_file):
                if generate_solutions:
                    # write solution file to disk
                    return lambda grades: test_case.write_solution(
                        module_dict, solution_file)
                else:
                    # read in solution dictionary and pass as an argument
                    test_dict = test_parser.TestParser(test_file).parse()
                    solution_dict = test_parser.TestParser(
                        solution_file).parse()
                    if print_test_case:
                        return lambda grades: (print_test(
                            test_dict, solution_dict) or test_case.execute(
                                grades, module_dict, solution_dict))
                    else:
                        return lambda grades: test_case.execute(
                            grades, module_dict, solution_dict)

            question.add_test_case(test_case, makefun(test_case,
                                                      solution_file))

        # Note extra function is necessary for scoping reasons
        def makefun(question):
            return lambda grades: question.execute(grades)

        setattr(sys.modules[__name__], q, makefun(question))
        questions.append((q, question.max_points))

    grades = grading.Grades(project_params.PROJECT_NAME,
                            questions if not just_lint else [],
                            gs_output=gs_output,
                            edx_output=edx_output,
                            mute_output=mute_output,
                            student_code=student_code,
                            linting_value=project_params.LINTING_VALUE)
    if question_to_grade is None:
        for q in question_dicts:
            for prereq in question_dicts[q].get('depends', '').split():
                grades.add_prereq(q, prereq)

    grades.grade(sys.modules[__name__], bonus_pic=project_params.BONUS_PIC)
    return grades.points
Esempio n. 6
0
def evaluate(
    generateSolutions,
    testRoot,
    moduleDict,
    exceptionMap=ERROR_HINT_MAP,
    edxOutput=False,
    muteOutput=False,
    printTestCase=False,
    questionToGrade=None,
):
    # imports of testbench code.  note that the testClasses import must follow
    # the import of student code due to dependencies
    import testParser
    import testClasses

    for module in moduleDict:
        setattr(sys.modules[__name__], module, moduleDict[module])

    problemDict = testParser.TestParser(os.path.join(testRoot, "CONFIG")).parse()

    # iterate through and run tests
    if "order" in problemDict:
        test_subdirs = problemDict["order"].split()
    else:
        test_subdirs = sorted(os.listdir(testRoot))
    questions = []
    questionDicts = {}
    for q in test_subdirs:
        subdir_path = os.path.join(testRoot, q)
        if not os.path.isdir(subdir_path) or q[0] == ".":
            continue

        if questionToGrade != None and q != questionToGrade:
            continue

        # create a question object
        questionDict = testParser.TestParser(
            os.path.join(subdir_path, "CONFIG")
        ).parse()
        questionClass = getattr(testClasses, questionDict["class"])
        question = questionClass(questionDict)
        questionDicts[q] = questionDict

        # load test cases into question
        tests = [t for t in os.listdir(subdir_path) if re.match("[^#~.].*\.test\Z", t)]
        tests = [re.match("(.*)\.test\Z", t).group(1) for t in tests]
        for t in sorted(tests):
            test_file = os.path.join(subdir_path, "%s.test" % t)
            solution_file = os.path.join(subdir_path, "%s.solution" % t)
            test_out_file = os.path.join(subdir_path, "%s.test_output" % t)
            testDict = testParser.TestParser(test_file).parse()
            if testDict.get("disabled", "false").lower() == "true":
                continue
            testDict["test_out_file"] = test_out_file
            testClass = getattr(projectTestClasses, testDict["class"])
            testCase = testClass(question, testDict)

            def makefun(testCase, solution_file):
                if generateSolutions:
                    # write solution file to disk
                    return lambda grades: testCase.writeSolution(
                        moduleDict, solution_file
                    )
                else:
                    # read in solution dictionary and pass as an argument
                    testDict = testParser.TestParser(test_file).parse()
                    solutionDict = testParser.TestParser(solution_file).parse()
                    if printTestCase:
                        return lambda grades: printTest(
                            testDict, solutionDict
                        ) or testCase.execute(grades, moduleDict, solutionDict)
                    else:
                        return lambda grades: testCase.execute(
                            grades, moduleDict, solutionDict
                        )

            question.addTestCase(testCase, makefun(testCase, solution_file))

        # Note extra function is necessary for scoping reasons
        def makefun(question):
            return lambda grades: question.execute(grades)

        setattr(sys.modules[__name__], q, makefun(question))
        questions.append((q, question.getMaxPoints()))

    grades = grading.Grades(
        projectParams.PROJECT_NAME,
        questions,
        edxOutput=edxOutput,
        muteOutput=muteOutput,
    )
    if questionToGrade == None:
        for q in questionDicts:
            for prereq in questionDicts[q].get("depends", "").split():
                grades.addPrereq(q, prereq)

    grades.grade(sys.modules[__name__])
    return grades.points
Esempio n. 7
0
                    # read in solution dictionary and pass as an argument
                    testDict = testParser.TestParser(test_file).parse()
                    solutionDict = testParser.TestParser(solution_file).parse()
                    if printTestCase:
                        return lambda grades: printTest(testDict, solutionDict) or testCase.execute(grades, moduleDict, solutionDict)
                    else:
                        return lambda grades: testCase.execute(grades, moduleDict, solutionDict)
            question.addTestCase(testCase, makefun(testCase, solution_file))

        # Note extra function is necessary for scoping reasons
        def makefun(question):
            return lambda grades: question.execute(grades)
        setattr(sys.modules[__name__], q, makefun(question))
        questions.append((q, question.getMaxPoints()))

    grades = grading.Grades(projectParams.PROJECT_NAME, questions, edxOutput=edxOutput, muteOutput=muteOutput)
    if questionToGrade == None:
        for q in questionDicts:
            for prereq in questionDicts[q].get('depends', '').split():
                grades.addPrereq(q, prereq)

    grades.grade(sys.modules[__name__], bonusPic = projectParams.BONUS_PIC)
    return grades.points



def getDisplay(graphicsByDefault, options=None):
    graphics = graphicsByDefault
    if options is not None and options.noGraphics:
        graphics = False
    if graphics:
Esempio n. 8
0
def evaluate(generateSolutions,
             testRoot,
             moduleDict,
             exceptionMap=ERROR_HINT_MAP,
             edxOutput=False,
             muteOutput=False):
    # imports of testbench code.  note that the testClasses import must follow
    # the import of student code due to dependencies
    import testParser
    import testClasses
    for module in moduleDict:
        setattr(sys.modules[__name__], module, moduleDict[module])

    # iterate through and run tests
    test_subdirs = os.listdir(testRoot)
    questions = []
    for i in test_subdirs:
        subdir_path = os.path.join(testRoot, i)
        if not os.path.isdir(subdir_path) or i[0] == '.':
            continue

        # create a question object
        questionDict = testParser.TestParser(
            os.path.join(subdir_path, 'CONFIG')).parse()
        questionClass = getattr(testClasses, questionDict['class'])
        question = questionClass(questionDict)

        # load test cases into question
        tests = filter(lambda t: re.match('[^#~]*\.test\Z', t),
                       os.listdir(subdir_path))
        tests = map(lambda t: re.match('(.*)\.test\Z', t).group(1), tests)
        for t in tests:
            test_file = os.path.join(subdir_path, '%s.test' % t)
            solution_file = os.path.join(subdir_path, '%s.solution' % t)
            testDict = testParser.TestParser(test_file).parse()
            if testDict.get("disabled", "false").lower() == "true":
                continue
            testClass = getattr(projectTestClasses, testDict['class'])
            testCase = testClass(testDict)

            def makefun(testCase, solution_file):
                if generateSolutions:
                    # write solution file to disk
                    return lambda grades: testCase.writeSolution(
                        moduleDict, solution_file)
                else:
                    # read in solution dictionary and pass as an argument
                    solutionDict = testParser.TestParser(solution_file).parse()
                    return lambda grades: testCase.execute(
                        grades, moduleDict, solutionDict)

            question.addTestCase(testCase, makefun(testCase, solution_file))

        # Note extra function is necessary for scoping reasons
        def makefun(question):
            return lambda grades: question.execute(grades)

        setattr(sys.modules[__name__], i, makefun(question))
        questions.append((i, question.getMaxPoints()))

    grades = grading.Grades(projectParams.PROJECT_NAME,
                            questions,
                            edxOutput=edxOutput,
                            muteOutput=muteOutput)
    grades.grade(sys.modules[__name__])
    return grades.points