예제 #1
0
def vector_phase_comparer(comparer_params_eval, student_eval, utils):
    """
    Check that student input equals a given input (to within tolerance), up to
    an overall phase factor.

    comparer_params: [target_vector]

    Usage
    =====

    >>> from mitxgraders import MatrixGrader
    >>> grader = MatrixGrader(
    ...     answers={
    ...         'comparer_params': [
    ...             '[1, exp(-i*phi)]',
    ...         ],
    ...         'comparer': vector_phase_comparer
    ...     },
    ...     variables=['phi'],
    ... )

    >>> grader(None, '[1, exp(-i*phi)]')['ok']
    True
    >>> grader(None, '[exp(i*phi/2), exp(-i*phi/2)]')['ok']
    True
    >>> grader(None, '[i, exp(i*(pi/2 - phi))]')['ok']
    True

    >>> grader(None, '[1, exp(+i*phi)]')['ok']
    False
    >>> grader(None, '[2, 2*exp(-i*phi)]')['ok']
    False

    The comparer_params should be list with a single vector:
    >>> grader = MatrixGrader(
    ...     answers={
    ...         'comparer_params': [
    ...             '[1, 1, 0]',
    ...             '[0, 1, 1]'
    ...         ],
    ...         'comparer': vector_phase_comparer
    ...     },
    ... )
    >>> try:
    ...     grader(None, '[1, 2, 3]')               # doctest: +ELLIPSIS
    ... except StudentFacingError as error:
    ...     print(error)
    Problem Configuration Error: ...to a single vector.
    """
    # Validate that author comparer_params evaluate to a single vector
    if not len(comparer_params_eval) == 1 and is_vector(comparer_params_eval[0]):
        raise StudentFacingError('Problem Configuration Error: comparer_params '
            'should be a list of strings that evaluate to a single vector.')

    # We'll check that student input is in the span as target vector and that
    # it has the same magnitude

    in_span = vector_span_comparer(comparer_params_eval, student_eval, utils)

    expected_mag = np.linalg.norm(comparer_params_eval[0])
    student_mag = np.linalg.norm(student_eval)
    same_magnitude = utils.within_tolerance(expected_mag, student_mag)

    return in_span and same_magnitude
예제 #2
0
def vector_span_comparer(comparer_params_eval, student_eval, utils):
    """
    Check whether student's answer is nonzero and in the span of some given
    vectors.

    comparer_params: A list of vectors

    Usage
    =====

    Use a single vector as comparer_params to test whether student input is
    parallel to a particular vector:
    >>> from mitxgraders import MatrixGrader
    >>> grader = MatrixGrader(
    ...     answers={
    ...         'comparer_params': [
    ...             '[3, x, 1 + i]',
    ...         ],
    ...         'comparer': vector_span_comparer
    ...     },
    ...     variables=['x'],
    ... )
    >>> grader(None, '[3, x, 1 + i]')['ok']
    True
    >>> grader(None, '[9, 3*x, 3 + 3*i]')['ok']
    True
    >>> grader(None, '[9, 3*x, 3 - 3*i]')['ok']
    False

    Complex scale factors work, too:
    >>> grader(None, '(4 + 2*i)*[3, x, 1 + i]')['ok']
    True

    Student input should be nonzero:
    >>> result = grader(None, '[0, 0, 0]')
    >>> expected = {
    ...     'ok': False,
    ...     'grade_decimal': 0.0,
    ...     'msg': 'Input should be a nonzero vector.'
    ... }
    >>> result == expected
    True

    Input shape is validated:
    >>> try:
    ...     grader(None, '5')
    ... except InputTypeError as error:
    ...     print(error)
    Expected answer to be a vector, but input is a scalar

    Multiple vectors can be provided:
    >>> grader = MatrixGrader(
    ...     answers={
    ...         'comparer_params': [
    ...             '[1, 1, 0]',    # v0
    ...             '[0, 1, 2]'     # v1
    ...         ],
    ...         'comparer': vector_span_comparer
    ...     },
    ... )

    The vector 2*v0 + 3i*v1 = [2, 2+3i, 6i] is in the span of v0 and v1:
    >>> grader(None, '[2, 2 + 3*i, 6*i]')['ok']
    True

    The comparer_params should be list of equal-length vectors:
    >>> grader = MatrixGrader(
    ...     answers={
    ...         'comparer_params': [
    ...             '[1, 1, 0]',
    ...             '5'
    ...         ],
    ...         'comparer': vector_span_comparer
    ...     },
    ... )
    >>> try:
    ...     grader(None, '[1, 2, 3]')               # doctest: +ELLIPSIS
    ... except StudentFacingError as error:
    ...     print(error)
    Problem Configuration Error: ...to equal-length vectors
    """

    # Validate the comparer params
    if not are_same_length_vectors(comparer_params_eval):
        raise StudentFacingError('Problem Configuration Error: comparer_params '
            'should be a list of strings that evaluate to equal-length vectors')

    # Validate student input shape
    utils.validate_shape(student_eval, comparer_params_eval[0].shape)

    if utils.within_tolerance(0, np.linalg.norm(student_eval)):
        return {
            'ok': False,
            'grade_decimal': 0,
            'msg': 'Input should be a nonzero vector.'
        }

    # Use ordinary least squares to find an approximation to student_eval
    # that lies within the span of given vectors, then check that the
    # residual-sum is small in comparison to student input.
    column_vectors = np.array(comparer_params_eval).transpose()
    # rcond=-1 uses machine precision for testing singular values
    # In numpy 1.14+, use rcond=None fo this behavior. (we use 1.6)
    ols = np.linalg.lstsq(column_vectors, student_eval, rcond=-1)
    error = np.sqrt(ols[1])

    # Check that error is nearly zero, using student_eval as a reference
    # when tolerance is specified as a percentage
    return is_nearly_zero(error, utils.tolerance, reference=student_eval)
예제 #3
0
    def __call__(self, expect, student_input):
        """
        Used to ask the grading class to grade student_input.
        Used by edX as the check function (cfn).

        Arguments:
            expect: The value of edX customresponse expect attribute (ignored)
            student_input: The student's input passed by edX

        Notes:
            This function ignores the value of expect.

            This is because edX requires a two-parameter check function cfn
            with the signature above. In the graders module, we NEVER use
            the <customresponse /> tag's expect attribute for grading.

            (Our check functions require an answer dictionary, as described
            in the documentation.)

            But we do want to allow authors to use the edX <customresponse />
            expect attribute because its value is displayed to students as
            the "correct" answer.

            The answer that we pass to check is None, indicating that the
            grader should read the answer from its internal configuration.
        """
        # Initialize the debug log
        # The debug log always exists and is written to, so that it can be accessed
        # programmatically. It is only output with the grading when config["debug"] is True
        # When subgraders are used, they need to be given access to this debuglog.
        # Note that debug=True must be set on parents to obtain debug output from children
        # when nested graders (lists) are used.
        self.debuglog = []
        # Add the version to the debug log
        self.log("MITx Grading Library Version " + __version__)
        # Add the student inputs to the debug log
        if isinstance(student_input, list):
            self.log("Student Responses:\n" +
                     "\n".join(map(str, student_input)))
        else:
            self.log("Student Response:\n" + str(student_input))

        # Compute the result of the check
        try:
            result = self.check(None, student_input)
        except Exception as error:
            if self.config['debug']:
                raise
            elif isinstance(error, MITxError):
                # we want to re-raise the error with a modified message but the
                # same class type, hence calling __class__
                raise error.__class__(error.message.replace('\n', '<br/>'))
            else:
                # Otherwise, give a generic error message
                if isinstance(student_input, list):
                    msg = "Invalid Input: Could not check inputs '{}'"
                    formatted = msg.format("', '".join(student_input))
                else:
                    msg = "Invalid Input: Could not check input '{}'"
                    formatted = msg.format(student_input)
                raise StudentFacingError(formatted)

        # Append the debug log to the result if requested
        if self.config['debug']:
            if "input_list" in result:
                # Multiple inputs
                if result.get('overall_message', ''):
                    result['overall_message'] += "\n\n" + self.log_output(
                    )  # pragma: no cover
                else:
                    result['overall_message'] = self.log_output()
            else:
                # Single input
                if result.get('msg', ''):
                    result['msg'] += "\n\n" + self.log_output()
                else:
                    result['msg'] = self.log_output()

        self.format_messages(result)
        return result
예제 #4
0
    def __call__(self, expect, student_input, **kwargs):
        """
        Used to ask the grading class to grade student_input.
        Used by edX as the check function (cfn).

        Arguments:
            expect: The value of edX customresponse expect attribute (often ignored)
            student_input: The student's input passed by edX
            **kwargs: Anything else that edX passes (using the "cfn_extra_args" XML tag)

        The only kwarg that can influence grading at all is 'attempt'.

        Notes:
            This function ignores the value of expect. The expect argument is
            provided because edX requires that a check function to have the
            signature above.

            Our graders usually read the author's expected answer from the
            grader configuration. This is because we generally use
            dictionaries to store the expected input along with correctness,
            grades, and feedback messages.

            Authors should still specify the <customresponse />
            expect attribute because its value is displayed to students as
            the "correct" answer.

            ItemGraders: If no answer is provided in the configuration, an
            ItemGrader will attempt to infer its answer from the expect
            parameter of a textline or CustomResponse tag. Note that this does
            not work when an ItemGrader is embedded inside a ListGrader. See
            ItemGrader.__call__ for the implementation.
        """
        student_input = self.ensure_text_inputs(student_input)

        # Initialize the debug log
        self.create_debuglog(student_input)
        # Clear the log_created flag so that a new log will be created when called again
        self.log_created = False

        # Compute the result of the check
        try:
            result = self.check(None, student_input)
        except Exception as error:
            if self.config['debug']:
                raise
            elif isinstance(error, MITxError):
                # we want to re-raise the error with a modified message but the
                # same class type, hence calling __class__
                raise error.__class__(
                    six.text_type(error).replace('\n', '<br/>'))
            else:
                # Otherwise, give a generic error message
                if isinstance(student_input, list):
                    msg = "Invalid Input: Could not check inputs '{}'"
                    formatted = msg.format("', '".join(student_input))
                else:
                    msg = "Invalid Input: Could not check input '{}'"
                    formatted = msg.format(student_input)
                raise StudentFacingError(formatted)

        # Make sure we're only returning the relevant keys in the result.
        # List graders may use other keys to track information between nesting levels.
        keys = ['ok', 'grade_decimal', 'msg']
        if 'input_list' in result:
            # Multiple inputs
            for idx, entry in enumerate(result['input_list']):
                cleaned = {
                    key: val
                    for key, val in entry.items() if key in keys
                }
                result['input_list'][idx] = cleaned
        else:
            # Single input
            result = {key: val for key, val in result.items() if key in keys}

        # Handle partial credit based on attempt number
        if self.config['attempt_based_credit']:
            self.apply_attempt_based_credit(result, kwargs.get('attempt'))

        # Append the debug log to the result if requested
        if self.config['debug']:
            if "input_list" in result:
                # Multiple inputs
                if result.get('overall_message', ''):
                    result['overall_message'] += "\n\n" + self.log_output(
                    )  # pragma: no cover
                else:
                    result['overall_message'] = self.log_output()
            else:
                # Single input
                if result.get('msg', ''):
                    result['msg'] += "\n\n" + self.log_output()
                else:
                    result['msg'] = self.log_output()

        self.format_messages(result)
        return result
예제 #5
0
    def __call__(self, expect, student_input):
        """
        Used to ask the grading class to grade student_input.
        Used by edX as the check function (cfn).

        Arguments:
            expect: The value of edX customresponse expect attribute (ignored).
            student_input: The student's input passed by edX

        Notes:
            This function ignores the value of expect. The expect argument is
            provided because edX requires that a check function to have the
            signature above.

            Our graders usually read the author's expected answer from the
            grader configuration. This is because we generally use
            dictionaries to store the expected input along with correctness,
            grades, and feedback messages.

            Authors should still specify the <customresponse />
            expect attribute because its value is displayed to students as
            the "correct" answer.

            ItemGraders: If no answer is provided in the configuration, an
            ItemGrader will attempt to infer its answer from the expect
            parameter of a textline or CustomResponse tag. Note that this does
            not work when an ItemGrader is embedded inside a ListGrader. See
            ItemGrader.__call__ for the implementation.
        """
        # Initialize the debug log
        # The debug log always exists and is written to, so that it can be accessed
        # programmatically. It is only output with the grading when config["debug"] is True
        # When subgraders are used, they need to be given access to this debuglog.
        # Note that debug=True must be set on parents to obtain debug output from children
        # when nested graders (lists) are used.
        self.debuglog = []
        # Add the version to the debug log
        self.log("MITx Grading Library Version " + __version__)
        # Add the student inputs to the debug log
        if isinstance(student_input, list):
            self.log("Student Responses:\n" + "\n".join(map(str, student_input)))
        else:
            self.log("Student Response:\n" + str(student_input))

        # Compute the result of the check
        try:
            result = self.check(None, student_input)
        except Exception as error:
            if self.config['debug']:
                raise
            elif isinstance(error, MITxError):
                # we want to re-raise the error with a modified message but the
                # same class type, hence calling __class__
                raise error.__class__(error.message.replace('\n', '<br/>'))
            else:
                # Otherwise, give a generic error message
                if isinstance(student_input, list):
                    msg = "Invalid Input: Could not check inputs '{}'"
                    formatted = msg.format("', '".join(student_input))
                else:
                    msg = "Invalid Input: Could not check input '{}'"
                    formatted = msg.format(student_input)
                raise StudentFacingError(formatted)

        # Append the debug log to the result if requested
        if self.config['debug']:
            if "input_list" in result:
                # Multiple inputs
                if result.get('overall_message', ''):
                    result['overall_message'] += "\n\n" + self.log_output()  # pragma: no cover
                else:
                    result['overall_message'] = self.log_output()
            else:
                # Single input
                if result.get('msg', ''):
                    result['msg'] += "\n\n" + self.log_output()
                else:
                    result['msg'] = self.log_output()

        self.format_messages(result)
        return result