Пример #1
0
    def grade_bracket(self, answers, student_answer, grade_entry):
        """
        Grade a bracket, adding to the grade_entry as appropriate.
        """
        # If the number is wrong, the bracket doesn't need to be graded
        if grade_entry['grade_decimal'] == 0:
            return grade_entry

        # Find the bracket that awards the most credit (could be 0)
        best = None
        for bracket in answers:
            if student_answer == bracket['expect']:
                if best is None or bracket['grade_decimal'] > best['grade_decimal']:
                    best = bracket

        # If no answer was found, zero out the score
        if best is None:
            grade_entry['grade_decimal'] = 0
            grade_entry['ok'] = False
            return grade_entry

        # Update the grade_decimal and ok entries, as well as the message
        grade_entry['grade_decimal'] *= best['grade_decimal']
        if best['msg']:
            if grade_entry['msg']:
                grade_entry['msg'] += '\n'
            grade_entry['msg'] += best['msg']

        # Fix the ok entry
        grade_entry['ok'] = AbstractGrader.grade_decimal_to_ok(grade_entry['grade_decimal'])

        return grade_entry
Пример #2
0
def consolidate_single_return(input_list, n_expect=None, partial_credit=True):
    r"""
    Consolidates a long-form customresponse return dictionary into a single dictionary.

    Arguments:
        input_list (list): a list of customresponse single-answer dictionaries
            each has keys 'ok', 'grade_decimal', 'msg'
        n_expect: The expected number of answers, defaults to len(input_list).
            Used in assigning partial credit.

    Usage
    =====
    >>> input_list = [
    ...     {'ok': True, 'msg': 'msg_0', 'grade_decimal':1},
    ...     {'ok': 'partial', 'msg': 'msg_1', 'grade_decimal':0.5},
    ...     {'ok': False, 'msg': 'msg_2', 'grade_decimal':0},
    ...     {'ok': 'partial', 'msg': 'msg_3', 'grade_decimal':0.1},
    ... ]
    >>> expect = {
    ...     'ok':'partial',
    ...     'grade_decimal': (1 + 0.5 + 0 + 0.1)/4,
    ...     'msg': 'msg_0\nmsg_1\nmsg_2\nmsg_3'
    ... }
    >>> result = consolidate_single_return(input_list)
    >>> expect == result
    True
    """
    if n_expect is None:
        n_expect = len(input_list)

    grade_decimals = [result['grade_decimal'] for result in input_list]
    grade_decimal = consolidate_grades(grade_decimals, n_expect)
    if not partial_credit:
        if grade_decimal < 1:
            grade_decimal = 0
    ok_status = AbstractGrader.grade_decimal_to_ok(grade_decimal)

    messages = [result['msg'] for result in input_list]

    result = {
        'grade_decimal': grade_decimal,
        'ok': ok_status,
        'msg': '\n'.join([message for message in messages if message != ''])
    }

    return result
Пример #3
0
    def process_grade_list(self, grade_list, num_answers, msg, grade_decimal):
        """
        Convert a list of grades into a single grade for returning.
        """
        # Consolidate the separate results into a single result
        result = consolidate_single_return(
            grade_list,
            n_expect=num_answers,
            partial_credit=self.config['partial_credit'])

        # Check if all inputs were awarded credit
        if not isinstance(self.config['subgrader'], SingleListGrader):
            # Check to see if all items were awarded credit
            all_awarded = all(item['grade_decimal'] > 0 for item in grade_list)
        else:
            # Check to see if all_awarded was True for all of the child SingleListGraders
            all_awarded = all(item['all_awarded'] for item in grade_list)

        # Mark if all inputs were awarded in the result, so that any higher
        # level graders can use this information.
        result['all_awarded'] = all_awarded

        # Append the message if there is one (and it's deserved)
        if all_awarded and msg != '':
            result['msg'] = msg if result[
                'msg'] == '' else result['msg'] + '\n' + msg

        # Apply the overall grade_decimal for this answer
        result['grade_decimal'] *= grade_decimal
        result['ok'] = AbstractGrader.grade_decimal_to_ok(
            result['grade_decimal'])

        # Tack on the individual grades (may be used by subclasses)
        result['individual'] = grade_list

        return result
Пример #4
0
def test_registered_defaults():
    # Test that each class has it's own default_variables parameter
    AbstractGrader.register_defaults({'test': False})
    assert StringGrader.default_values is None
    AbstractGrader.clear_registered_defaults()

    # Test that registered defaults are used in instantiation
    StringGrader.register_defaults({'case_sensitive': False})
    assert StringGrader.default_values == {'case_sensitive': False}
    grader = StringGrader()
    assert grader.config['case_sensitive'] is False

    # Test that registered defaults clear correctly
    StringGrader.clear_registered_defaults()
    assert StringGrader.default_values is None

    # Check that registered defaults propagate to subclasses
    AbstractGrader.register_defaults({'debug': True})
    grader = StringGrader()
    assert grader.config['debug']
    assert StringGrader.default_values is None
    AbstractGrader.clear_registered_defaults()

    # Check that registered defaults layer up through a subclass chain
    AbstractGrader.register_defaults({'debug': True})
    ItemGrader.register_defaults({'wrong_msg': 'haha!'})
    StringGrader.register_defaults({'case_sensitive': False})
    grader = StringGrader()
    assert grader.config['debug']
    assert grader.config['wrong_msg'] == 'haha!'
    assert not grader.config['case_sensitive']
    AbstractGrader.clear_registered_defaults()
    ItemGrader.clear_registered_defaults()
    StringGrader.clear_registered_defaults()

    # Check that registered defaults can be higher level than where they're defined
    StringGrader.register_defaults({'debug': True})
    assert AbstractGrader.default_values is None
    grader = StringGrader()
    assert grader.config['debug']
    StringGrader.clear_registered_defaults()

    # Check that registered defaults are logged in the debug log
    StringGrader.register_defaults({'debug': True})
    grader = StringGrader()
    result = grader('cat', 'cat')
    expect = """<pre>MITx Grading Library Version {}<br/>
Student Response:<br/>
cat<br/>
Using modified defaults: {{"debug": true}}<br/>
Expect value inferred to be "cat"</pre>""".format(__version__)
    assert result['msg'] == expect
    StringGrader.clear_registered_defaults()
Пример #5
0
    def check_response(self, answer, student_input, **kwargs):
        """Check student_input against a given answer list"""
        # Unpack the given answer
        answers = answer['expect']  # The list of answers
        msg = answer['msg']
        grade_decimal = answer['grade_decimal']

        # Split the student response
        student_list = student_input.split(self.config['delimiter'])

        # Check for empty entries in the list
        if self.config['missing_error']:
            bad_items = [
                idx + 1 for (idx, item) in enumerate(student_list)
                if item.strip() == ''
            ]
            if bad_items:
                if len(bad_items) == 1:
                    msg = 'List error: Empty entry detected in position '
                else:
                    msg = 'List error: Empty entries detected in positions '
                msg += ', '.join(map(str, bad_items))
                raise MissingInput(msg)

        # Check for the wrong number of entries
        if self.config['length_error'] and len(answers) != len(student_list):
            msg = 'List length error: Expected {} terms in the list, but received {}. ' + \
                  'Separate items with character "{}"'
            raise MissingInput(
                msg.format(len(answers), len(student_list),
                           self.config['delimiter']))

        # We need to keep track of missing and extra answers.
        # Idea is:
        #    use _AutomaticFailure to pad expect and answers to equal length
        #    modify check to reject _AutomaticFailure
        pad_ans, pad_stud = get_padded_lists(answers, student_list)
        # Modify the check function to deal with the padding
        checker = padded_check(self.config['subgrader'].check)

        # Compute the results
        if self.config['ordered']:
            input_list = [checker(*pair) for pair in zip(pad_ans, pad_stud)]
        else:
            input_list = find_optimal_order(checker, pad_ans, pad_stud)

        # Consolidate the separate results into a single result
        result = consolidate_single_return(
            input_list,
            n_expect=len(answers),
            partial_credit=self.config['partial_credit'])

        # Check if all inputs were awarded credit
        if not isinstance(self.config['subgrader'], SingleListGrader):
            # Check to see if all items were awarded credit
            all_awarded = all(item['grade_decimal'] > 0 for item in input_list)
        else:
            # Check to see if all_awarded was True for all of the child SingleListGraders
            all_awarded = all(item['all_awarded'] for item in input_list)

        # Mark if all inputs were awarded in the result, so that any higher
        # level graders can use this information.
        result['all_awarded'] = all_awarded

        # Append the message if there is one (and it's deserved)
        if all_awarded and msg != '':
            result['msg'] = msg if result[
                'msg'] == '' else result['msg'] + '\n' + msg

        # Apply the overall grade_decimal for this answer
        result['grade_decimal'] *= grade_decimal
        result['ok'] = AbstractGrader.grade_decimal_to_ok(
            result['grade_decimal'])

        return result