Beispiel #1
0
    def post_schema_ans_val(self, answer_tuple):
        """
        Used to validate the individual 'expect' lists in the 'answers' key.
        This must be done after the schema has finished validation, as we need access
        to the 'subgraders' configuration key to perform this validation.
        """
        # The structure of answer_tuple at this stage is:
        # tuple(dict('expect', 'grade_decimal', 'ok', 'msg'))
        # where 'expect' is a list that needs validation.

        # Check that all lists in the tuple have the same length
        for answer_list in answer_tuple:
            if len(answer_list['expect']) != len(answer_tuple[0]['expect']):
                raise ConfigError(
                    "All possible list answers must have the same length")

        # Check for empty entries anywhere in answers_tuple (which can be a nested mess!)
        # We do this before validating individual entries, as strings may be coerced into other
        # objects by schema validation (e.g., FormulaGrader coerces expect into a dict)
        if self.config['missing_error']:
            demand_no_empty(answer_tuple)

        # Validate each entry in 'expect' lists using the subgrader
        for answer_list in answer_tuple:
            expect = answer_list['expect']
            for index, answer in enumerate(expect):
                # Run the answers through the subgrader schema and the post-schema validation
                expect[index] = self.config['subgrader'].schema_answers(answer)
                expect[index] = self.config['subgrader'].post_schema_ans_val(
                    expect[index])
            if not expect:
                raise ConfigError("Cannot have an empty list of answers")

        return answer_tuple
def validate_blacklist_whitelist_config(default_funcs, blacklist, whitelist):
    """Validates the whitelist/blacklist configuration.

    Arguments:
        default_funcs: an iterable whose elements are are function names
            Examples: {'func1':..., 'func2':..., ...} or ['func1', 'func2']
        blacklist ([str]): a list of function names
        whitelist ([str]): a list of function names

    Notes: Voluptuous should already have type-checked blacklist and whitelist.
    Now check:
    1. whitelist/blacklist are not both used
    2. All whitelist/blacklist functions actually exist in default_funcs
    """
    if blacklist and whitelist:
        raise ConfigError("Cannot whitelist and blacklist at the same time")
    for func in blacklist:
        # no need to check user_functions too ... if you don't want student to
        # use one of the user_functions, just don't add it in the first place.
        if func not in default_funcs:
            raise ConfigError(
                "Unknown function in blacklist: {func}".format(func=func))

    if whitelist == [None]:
        return

    for func in whitelist:
        if func not in default_funcs:
            raise ConfigError(
                "Unknown function in whitelist: {func}".format(func=func))
Beispiel #3
0
    def validate_grouping(self):
        """Validate a grouping list"""
        # Single subgraders must be a ListGrader
        if not self.subgrader_list and not isinstance(self.config['subgraders'], ListGrader):
            msg = "A ListGrader with groupings must have a ListGrader subgrader " + \
                  "or a list of subgraders"
            raise ConfigError(msg)

        # Unordered, each group must have the same number of entries
        if not self.config['ordered']:
            group_len = len(self.grouping[0])
            for group in self.grouping:
                if len(group) != group_len:
                    raise ConfigError("Groups must all be the same length when unordered")

        # If using multiple subgraders, make sure we have the right number of subgraders
        if self.subgrader_list:
            if len(self.grouping) != len(self.config['subgraders']):
                raise ConfigError("Number of subgraders and number of groups are not equal")
            # Furthermore, lists (groups with more than one entry) must go to ListGraders
            for py_idx, group in enumerate(self.grouping):
                group_idx = py_idx + 1
                num_items = len(group)
                subgrader = self.config['subgraders'][py_idx]
                if num_items > 1 and not isinstance(subgrader, ListGrader):
                    msg = "Grouping index {} has {} items, but has a {} subgrader " + \
                          "instead of ListGrader"
                    raise ConfigError(msg.format(group_idx, num_items, type(subgrader).__name__))
Beispiel #4
0
    def check(self, answers, student_input, **kwargs):
        """Checks student_input against answers, which may be provided"""
        # If no answers provided, use the internal configuration
        answers = self.config['answers'] if answers is None else answers

        # answers should now be a tuple of answers
        # Check that there is at least one answer to compare to
        if not isinstance(answers, tuple):  # pragma: no cover
            msg = "Expected answers to be a tuple of answers, instead received {}"
            raise ConfigError(msg.format(type(answers)))
        if not answers:
            raise ConfigError("Expected at least one answer in answers")

        # Pass our debuglog to the subgraders, so that any that have debug=True can use it
        if self.subgrader_list:
            for subgrader in self.config['subgraders']:
                subgrader.debuglog = self.debuglog
        else:
            self.config['subgraders'].debuglog = self.debuglog

        # Go and grade the responses
        if isinstance(student_input, list):
            # Compute the results for each answer
            results = [self.perform_check(answer_list, student_input) for answer_list in answers]
            return self.get_best_result(results)
        else:
            msg = "Expected answer to have type <type list>, but received {}"
            raise ConfigError(msg.format(type(student_input)))
    def validate_input_positions(input_positions):
        """
        Ensure that the provided student input positions are valid.
        """
        used_positions_list = [
            input_positions[key] for key in input_positions
            if input_positions[key] is not None
        ]
        used_positions_set = set(used_positions_list)

        # Ensure no position is used twice
        if len(used_positions_list) > len(used_positions_set):
            raise ConfigError("Key input_positions has repeated indices.")

        # Ensure positions are sequential, starting at 1
        if used_positions_set != set(range(1, len(used_positions_set) + 1)):
            msg = "Key input_positions values must be consecutive positive integers starting at 1"
            raise ConfigError(msg)

        return {
            key: input_positions[key] -
            1  # Turn 1-based indexing into 0-based indexing
            if input_positions[key] is not None else None
            for key in input_positions
        }
Beispiel #6
0
    def schema_answers(self, answers_tuple):
        """
        Defines the schema to validate an answer tuple against.

        This will transform the input to a tuple as necessary, and then attempt to
        validate the answers_tuple using the defined subgraders.

        Two forms for the answer tuple are acceptable:

        1. A list of answers
        2. A tuple of lists of answers
        """
        # Turn answers_tuple into a tuple if it isn't already
        if isinstance(answers_tuple, list):
            if len(answers_tuple) == 1:
                raise ConfigError('ListGrader does not work with a single answer')
            elif not answers_tuple:  # empty list
                # Nothing further to check here. This must be a nested grader, which will
                # be called upon to check answers again a bit later.
                return tuple()
            answers_tuple = (answers_tuple,)
        elif not isinstance(answers_tuple, tuple):  # pragma: no cover
            # Should not get here; voluptuous should catch this beforehand
            raise ConfigError("Answer list must be a list or a tuple of lists")

        # Check that all lists in the tuple have the same length
        for answer_list in answers_tuple:
            if len(answer_list) != len(answers_tuple[0]):
                raise ConfigError("All possible list answers must have the same length")

        # Check that the subgraders are commensurate with the answers
        if self.subgrader_list:
            # We have a list of subgraders
            subgraders = self.config['subgraders']

            # Ensure that multiple subgraders are valid
            if len(subgraders) != len(answers_tuple[0]):
                raise ConfigError('The number of subgraders and answers are different')
            if not self.config['ordered']:
                raise ConfigError('Cannot use unordered lists with multiple graders')

            # Validate answer_list using the subgraders
            for answer_list in answers_tuple:
                for index, answer in enumerate(answer_list):
                    answer_list[index] = subgraders[index].schema_answers(answer)
        else:
            # We have a single subgrader
            subgrader = self.config['subgraders']

            # Validate answer_list using the subgraders
            for answer_list in answers_tuple:
                for index, answer in enumerate(answer_list):
                    answer_list[index] = subgrader.schema_answers(answer)

        return answers_tuple
Beispiel #7
0
 def validate_submission(self, answers, student_list):
     """
     Make sure that the student_list has the right number of entries.
     Compares to both grouping and answers.
     """
     if self.config['grouping']:
         if len(self.config['grouping']) != len(student_list):
             msg = "Grouping indicates {} inputs are expected, but only {} inputs exist."
             raise ConfigError(msg.format(len(self.config['grouping']), len(student_list)))
     else:
         if len(answers) != len(student_list):
             msg = "The number of answers ({}) and the number of inputs ({}) are different"
             raise ConfigError(msg.format(len(answers), len(student_list)))
Beispiel #8
0
    def check(self, answers, student_input, **kwargs):
        """
        Compares student input to each answer in answers, using check_response.
        Computes the best outcome for the student.

        Arguments:
            answer: A tuple of answers to compare to, or None to use internal config
            student_input (str): The student's input passed by edX
            **kwargs: Anything else that has been passed in. For example, sibling
                graders when a grader is used as a subgrader in a ListGrader.
        """
        # If no answers provided, use the internal configuration
        answers = self.config['answers'] if answers is None else answers

        # answers should now be a tuple of answers
        # Check that there is at least one answer to compare to
        if not isinstance(answers, tuple):  # pragma: no cover
            msg = (
                "There is a problem with the author's problem configuration: "
                "Expected answers to be a tuple of answers, instead received {}"
            )
            raise ConfigError(msg.format(type(answers)))
        if not answers:
            msg = (
                "There is a problem with the author's problem configuration: "
                "Expected at least one answer in answers")
            raise ConfigError(msg)

        # Make sure the input is in the expected format
        if not isinstance(student_input, basestring):
            msg = "Expected string for student_input, received {}"
            raise ConfigError(msg.format(type(student_input)))

        # Compute the results for each answer
        results = [
            self.check_response(answer, student_input, **kwargs)
            for answer in answers
        ]

        # Now find the best result for the student
        best_score = max([r['grade_decimal'] for r in results])
        best_results = [r for r in results if r['grade_decimal'] == best_score]
        best_result_with_longest_msg = max(best_results,
                                           key=lambda r: len(r['msg']))

        # Add in wrong_msg if appropriate
        if best_result_with_longest_msg['msg'] == "" and best_score == 0:
            best_result_with_longest_msg['msg'] = self.config["wrong_msg"]

        return best_result_with_longest_msg
Beispiel #9
0
    def __init__(self, config=None, **kwargs):
        """
        Configure the class as normal, then set complex for hermitian/antihermitian
        """
        super(SquareMatrices, self).__init__(config, **kwargs)
        if self.config['symmetry'] in ['hermitian', 'antihermitian']:
            self.config['complex'] = True

        # A couple of cases that are possible but we can't handle:
        if self.config['determinant'] == 0:
            if self.config['traceless']:
                raise ConfigError("Unable to generate zero determinant traceless matrices")
            if self.config['symmetry'] == 'antisymmetric':
                # Real antisymmetric matrices in odd dimension automatically have zero determinant
                if self.config['complex']:
                    raise ConfigError("Unable to generate complex zero determinant antisymmetric matrices")
                if self.config['dimension'] % 2 == 0:
                    raise ConfigError("Unable to generate real zero determinant antisymmetric matrices in even dimensions")
        # And a handful of cases that don't exist
        if self.config['determinant'] == 1:
            if self.config['dimension'] == 2 and self.config['traceless']:
                if self.config['symmetry'] == 'diagonal' and not self.config['complex']:
                    raise ConfigError("No real, traceless, unit-determinant, diagonal 2x2 matrix exists")
                elif self.config['symmetry'] == 'symmetric' and not self.config['complex']:
                    raise ConfigError("No real, traceless, unit-determinant, symmetric 2x2 matrix exists")
                elif self.config['symmetry'] == 'hermitian':
                    raise ConfigError("No traceless, unit-determinant, Hermitian 2x2 matrix exists")
            if self.config['dimension'] % 2 == 1:  # Odd dimension
                if self.config['symmetry'] == 'antisymmetric':
                    # Eigenvalues are all imaginary, so determinant is imaginary
                    raise ConfigError("No unit-determinant antisymmetric matrix exists in odd dimensions")
                if self.config['symmetry'] == 'antihermitian':
                    # Eigenvalues are all imaginary, so determinant is imaginary
                    raise ConfigError("No unit-determinant antihermitian matrix exists in odd dimensions")
Beispiel #10
0
        def random_function(*args):
            """Function that generates the random values"""
            # Check that the dimensions are correct
            if len(args) != input_dim:
                msg = "Expected {} arguments, but received {}".format(
                    input_dim, len(args))
                raise ConfigError(msg)

            # Turn the inputs into an array
            xvec = np.array(args)
            # Repeat it into the shape of A, B and C
            xarray = np.tile(xvec, (output_dim, num_terms, 1))
            # Compute the output matrix
            output = A * np.sin(B * xarray + C)
            # Sum over the j and k terms
            # We have an old version of numpy going here, so we can't use
            # fullsum = np.sum(output, axis=(1, 2))
            fullsum = np.sum(np.sum(output, axis=2), axis=1)

            # Scale and translate to fit within center and amplitude
            fullsum = fullsum * self.config["amplitude"] / self.config[
                "num_terms"]
            fullsum += self.config["center"]

            # Return the result
            return MathArray(fullsum) if output_dim > 1 else fullsum[0]
Beispiel #11
0
    def check_response(self, answer, student_input, **kwargs):
        """Check student_input against a given answer list"""
        # Split the student response
        student_input = student_input.strip()
        if len(student_input) < 5:
            raise ConfigError('Unable to read interval from answer: "{}"'.format(student_input))
        s_opening = student_input[0]
        s_closing = student_input[-1]
        s_middle = student_input[1:-1]

        # Ensure that the opening and closing brackets are valid
        if s_opening not in self.config['opening_brackets']:
            raise InvalidInput("Invalid opening bracket: '{}'. Valid options are: '".format(s_opening)
                              + "', '".join(char for char in self.config['opening_brackets']) + "'.")
        if s_closing not in self.config['closing_brackets']:
            raise InvalidInput("Invalid closing bracket: '{}'. Valid options are: '".format(s_closing)
                              + "', '".join(char for char in self.config['closing_brackets']) + "'.")

        # Let SingleListGrader do the grading of the middle bit
        middle_answer = {
            'expect': answer['expect'][1:3],
            'ok': answer['ok'],
            'msg': answer['msg'],
            'grade_decimal': answer['grade_decimal']
        }
        result = super(IntervalGrader, self).check_response(middle_answer, s_middle, **kwargs)
        grade_list = result['individual']

        # Grade the opening bracket
        self.grade_bracket(answer['expect'][0], s_opening, grade_list[0])
        # Grade the closing bracket
        self.grade_bracket(answer['expect'][3], s_closing, grade_list[1])

        # Convert the grade list to a single return result
        return self.process_grade_list(grade_list, 2, answer['msg'], answer['grade_decimal'])
Beispiel #12
0
    def create_grouping_map(grouping):
        """Creates an array mapping groups to input index

        Usage
        =====
        >>> grouping = [3, 1, 1, 2, 2, 1, 2]
        >>> expect = [
        ...     [1, 2, 5],
        ...     [3, 4, 6],
        ...     [0]
        ... ]
        >>> expect == ListGrader.create_grouping_map(grouping)
        True
        """
        # Validate the list of groups
        group_nums = set(grouping)
        if not group_nums == set(range(1, max(group_nums) + 1)):
            msg = "Grouping should be a list of contiguous positive integers starting at 1."
            raise ConfigError(msg)

        # Create the grouping map
        group_map = [[] for group in group_nums]
        for index, group_num in enumerate(grouping):
            group_map[group_num - 1].append(index)

        return group_map
Beispiel #13
0
    def schema_answers(self, answer_tuple):
        """
        Defines the schema to validate an answer tuple against.

        This will transform the input to a tuple as necessary, and then attempt to
        validate the answer_tuple using the defined subgraders.

        Two forms for the answer tuple are acceptable:

        1. A list of answers
        2. A tuple of lists of answers
        """
        # Rename from the ItemGrader argument
        answers_tuple = answer_tuple

        # Turn answers_tuple into a tuple if it isn't already
        if isinstance(answers_tuple, list):
            if not answers_tuple:  # empty list
                # Nothing further to check here. This must be a nested grader, which will
                # be called upon to check answers again a bit later.
                return tuple()
            answers_tuple = (answers_tuple,)
        elif not isinstance(answers_tuple, tuple):  # pragma: no cover
            # Should not get here; voluptuous should catch this beforehand
            raise ConfigError("Answer list must be a list or a tuple of lists")

        # Check that all lists in the tuple have the same length
        for answer_list in answers_tuple:
            if len(answer_list) != len(answers_tuple[0]):
                raise ConfigError("All possible list answers must have the same length")

        # If subgrader is a SingleListGrader, check that it uses a different delimiter
        # TODO This does not check past the first level of nesting.
        if isinstance(self.config['subgrader'], SingleListGrader):
            subgrader = self.config['subgrader']
            if self.config['delimiter'] == subgrader.config['delimiter']:
                raise ConfigError("Nested SingleListGraders must use different delimiters.")

        # Validate answer_list using the subgrader
        for answer_list in answers_tuple:
            for index, answer in enumerate(answer_list):
                answer_list[index] = self.config['subgrader'].schema_answers(answer)
            if not answer_list:
                raise ConfigError("Cannot have an empty list of answers")

        return answers_tuple
Beispiel #14
0
    def validate_input_positions(input_positions):
        used_positions_list = [
            input_positions[key] for key in input_positions
            if input_positions[key] is not None
        ]
        used_positions_set = set(used_positions_list)
        if len(used_positions_list) > len(used_positions_set):
            raise ConfigError("Key input_positions has repeated indices.")
        if used_positions_set != set(range(1, len(used_positions_set) + 1)):
            msg = "Key input_positions values must be consecutive positive integers starting at 1"
            raise ConfigError(msg)

        return {
            key: input_positions[key] -
            1  # Turn 1-based indexing into 0-based indexing
            if input_positions[key] is not None else None
            for key in input_positions
        }
Beispiel #15
0
    def apply_attempt_based_credit(self, result, attempt_number):
        """
        Apply attempt-based credit maximums to grading.
        Mutates result directly.
        """
        if attempt_number is None:
            msg = (
                "Attempt number not passed to grader as keyword argument 'attempt'. "
                'The attribute <code>cfn_extra_args="attempt"</code> may need to be '
                "set in the <code>customresponse</code> tag.")
            raise ConfigError(msg)

        if attempt_number < 1:  # Just in case edX has issues
            attempt_number = 1
        self.log("Attempt number {}".format(attempt_number))

        # Compute the maximum credit
        credit = self.config['attempt_based_credit'](attempt_number)
        credit = float(credit)  # In case graders return integers 0 or 1
        credit = round(credit, 4)
        if credit == 1:
            # Don't do any modifications
            return
        self.log("Maximum credit is {}".format(credit))

        # Multiply all grades by credit, updating from 'ok'=True to 'partial' as needed
        changed_result = False
        if "input_list" in result:
            for results_dict in result['input_list']:
                if results_dict['grade_decimal'] > 0:
                    grade = results_dict['grade_decimal'] * credit
                    results_dict['grade_decimal'] = grade
                    results_dict['ok'] = self.grade_decimal_to_ok(grade)
                    changed_result = True
        else:
            if result['grade_decimal'] > 0:
                grade = result['grade_decimal'] * credit
                result['grade_decimal'] = grade
                result['ok'] = self.grade_decimal_to_ok(grade)
                changed_result = True

        # Append the message if credit was reduced
        if self.config['attempt_based_credit_msg'] and changed_result:
            credit_decimal = Decimal(credit * 100).quantize(Decimal('.1'))
            if credit_decimal == int(credit_decimal):
                # Used to get rid of .0 appearing in percentages
                credit_decimal = int(credit_decimal)
            msg = "Maximum credit for attempt #{} is {}%."
            if "input_list" in result:
                key = 'overall_message'
            else:
                key = 'msg'
            if result[key]:
                result[key] += '\n\n'
            result[key] += msg.format(attempt_number, credit_decimal)
Beispiel #16
0
    def __init__(self, config=None, **kwargs):
        """Perform initialization"""
        super(DependentSampler, self).__init__(config, **kwargs)

        # Construct the 'depends' list (overwrites whatever was provided, as this does it better!)
        try:
            parsed = PARSER.parse(self.config['formula'])
            self.config['depends'] = list(parsed.variables_used)
        except CalcError:
            raise ConfigError("Formula error in dependent sampling formula: " +
                              self.config["formula"])
Beispiel #17
0
    def compute_sample(self, sample_dict, functions, suffixes):
        """Compute the value of this sample"""
        try:
            result, _ = evaluator(formula=self.config['formula'],
                                  variables=sample_dict,
                                  functions=functions,
                                  suffixes=suffixes)
        except CalcError:
            raise ConfigError("Formula error in dependent sampling formula: " +
                              self.config["formula"])

        return result
Beispiel #18
0
    def check(self, answers, student_input, **kwargs):
        """Checks student_input against answers, which may be provided"""
        # If no answers provided, use the internal configuration
        answers = self.config['answers'] if answers is None else answers

        # answers should now be a tuple of answers
        # Check that there is at least one answer to compare to
        if not isinstance(answers, tuple):  # pragma: no cover
            msg = "Expected answers to be a tuple of answers, instead received {}"
            raise ConfigError(msg.format(type(answers)))
        if not answers:
            raise ConfigError("Expected at least one answer in answers")

        # Pass our debuglog to the subgraders, so that any that have debug=True can use it
        if self.subgrader_list:
            for subgrader in self.config['subgraders']:
                subgrader.debuglog = self.debuglog
        else:
            self.config['subgraders'].debuglog = self.debuglog

        # Perform the check against each possible list of answers and select the best
        # result for the student
        results = [
            self.perform_check(answer_list, student_input)
            for answer_list in answers
        ]
        best_result = self.get_best_result(results)

        # If no partial credit is to be awarded, zero out all scores if not perfect
        if not self.config['partial_credit']:
            perfect = all(entry['ok'] is True
                          for entry in best_result['input_list'])
            if not perfect:
                for entry in best_result['input_list']:
                    entry['ok'] = False
                    entry['grade_decimal'] = 0

        return best_result
def validate_no_collisions(config, keys):
    """
    Validates no collisions between iterable config fields specified by keys.

    Usage
    =====

    Duplicate entries raise a ConfigError:
    >>> keys = ['variables', 'user_constants', 'numbered_vars']
    >>> try:
    ...     validate_no_collisions({
    ...         'variables':['a', 'b', 'c', 'x', 'y'],
    ...         'user_constants':{'x': 5, 'y': 10},
    ...         'numbered_vars':['phi', 'psi']
    ...         }, keys)
    ... except ConfigError as error:
    ...     print(error)
    'user_constants' and 'variables' contain duplicate entries: ['x', 'y']

    >>> try:
    ...     validate_no_collisions({
    ...         'variables':['a', 'psi', 'phi', 'X', 'Y'],
    ...         'user_constants':{'x': 5, 'y': 10},
    ...         'numbered_vars':['phi', 'psi']
    ...         }, keys)
    ... except ConfigError as error:
    ...     print(error)
    'numbered_vars' and 'variables' contain duplicate entries: ['phi', 'psi']

    Without duplicates, return True
    >>> validate_no_collisions({
    ...     'variables':['a', 'b', 'c', 'F', 'G'],
    ...     'user_constants':{'x': 5, 'y': 10},
    ...     'numbered_vars':['phi', 'psi']
    ... }, keys)
    True
    """
    dict_of_sets = {k: set(config[k]) for k in keys}
    msg = "'{iter1}' and '{iter2}' contain duplicate entries: {duplicates}"

    for k1, k2 in itertools.combinations(dict_of_sets, r=2):
        k1, k2 = sorted([k1, k2])
        duplicates = dict_of_sets[k1].intersection(dict_of_sets[k2])
        if duplicates:
            sorted_dups = list(sorted(duplicates))
            raise ConfigError(
                msg.format(iter1=k1, iter2=k2, duplicates=sorted_dups))
    return True
    def __init__(self, config=None, **kwargs):
        super(SpecifyDomain, self).__init__(config, **kwargs)

        shapes = self.config['input_shapes']

        # Check that min_length is compatible with the provided shapes
        if self.config['min_length'] is not None and len(shapes) != 1:
            raise ConfigError(
                "SpecifyDomain was called with a specified min_length, which "
                "requires input_shapes to specify only a single shape. "
                "However, {} shapes were provided.".format(len(shapes)))

        self.decorate = self.make_decorator(
            *shapes,
            display_name=self.config['display_name'],
            min_length=self.config['min_length'])
Beispiel #21
0
    def __init__(self, config=None, **kwargs):
        """
        Validate the SingleListGrader's configuration.
        """
        # Step 1: Validate the configuration of this list using the usual routines
        super(SingleListGrader, self).__init__(config, **kwargs)

        # Step 2: Ensure that nested SingleListGraders all use different delimiters
        if isinstance(self.config['subgrader'], SingleListGrader):
            delimiters = [self.config['delimiter']]
            subgrader = self.config['subgrader']
            while isinstance(subgrader, SingleListGrader):
                if subgrader.config['delimiter'] in delimiters:
                    raise ConfigError(
                        "Nested SingleListGraders must use different delimiters."
                    )
                delimiters.append(subgrader.config['delimiter'])
                subgrader = subgrader.config['subgrader']
Beispiel #22
0
def demand_no_empty(obj):
    """
    Recursively search through all tuples, lists and dictionaries in obj,
    ensuring that all expect strings are non-empty.
    """
    if isinstance(obj, list) or isinstance(obj, tuple):
        for item in obj:
            demand_no_empty(item)
    elif isinstance(obj, dict) and 'expect' in obj:
        demand_no_empty(obj['expect'])
    elif isinstance(obj, six.string_types):
        msg = (
            "There is a problem with the author's problem configuration: "
            "Empty entry detected in answer list. Students receive an error "
            "when supplying an empty entry. Set 'missing_error' to False in "
            "order to allow such entries.")
        if obj.strip() == '':
            raise ConfigError(msg)
    def __call__(self, comparer_params_evals, student_evals, utils):
        student_evals_norm = np.linalg.norm(student_evals)

        # Validate student input shape...only needed for MatrixGrader
        if hasattr(utils, 'validate_shape'):
            # in numpy, scalars have empty tuples as their shapes
            expected_0 = comparer_params_evals[0][0]
            scalar_expected = isinstance(expected_0, Number)
            shape = tuple() if scalar_expected else expected_0.shape
            utils.validate_shape(student_evals[0], shape)

        # Raise an error if there is less than 3 samples
        if len(student_evals) < 3:
            msg = 'Cannot perform linear comparison with less than 3 samples'
            raise ConfigError(msg)

        is_comparing_zero = self.check_comparing_zero(comparer_params_evals,
                                                      student_evals,
                                                      utils.tolerance)
        filtered_modes = self.get_valid_modes(is_comparing_zero)

        # Get the result for each mode
        # flatten in case individual evals are arrays (as in MatrixGrader)
        student = np.array(student_evals).flatten()
        expected = np.array(comparer_params_evals).flatten()
        errors = [
            self.error_calculators[mode](student, expected)
            for mode in filtered_modes
        ]

        results = [{
            'grade_decimal': self.config[mode],
            'msg': self.config[mode + '_msg']
        } if is_nearly_zero(
            error, utils.tolerance, reference=student_evals_norm) else {
                'grade_decimal': 0,
                'msg': ''
            } for mode, error in zip(filtered_modes, errors)]

        # Get the best result using max.
        # For a list of pairs, max compares by 1st index and uses 2nd to break ties
        key = lambda result: (result['grade_decimal'], result['msg'])
        return max(results, key=key)
Beispiel #24
0
    def infer_from_expect(self, expect):
        """
        Infer answers from the expect parameter. Returns the resulting answers key.

        Shadows the SingleListGrader infer_from_expect function.

        For example, we want to turn '[a, b)' -> ['[', 'a', 'b', ')'].
        """
        expect = expect.strip()
        # Check that the answer has at least 2 characters
        if len(expect) < 5:
            raise ConfigError('Unable to read interval from answer: "{}"'.format(expect))

        # Parse the middle bit using SingleListGrader
        middle = super(IntervalGrader, self).infer_from_expect(expect[1:-1])

        # Make a list: open_bracket, lower, upper, close_bracket
        answers = [expect[0]] + middle + [expect[-1]]
        return answers
Beispiel #25
0
    def ensure_text_inputs(student_input, allow_lists=True, allow_single=True):
        """
        Ensures that student_input is a text string or a list of text strings,
        depending on arguments. Called by ItemGrader and ListGrader with
        appropriate arguments. Defaults are set to be friendly to user-defined
        grading classes.
        """
        # Try to perform validation
        try:
            if allow_lists and isinstance(student_input, list):
                return Schema([text_string])(student_input)
            elif allow_single and not isinstance(student_input, list):
                return Schema(text_string)(student_input)
        except MultipleInvalid as error:
            if allow_lists:
                pos = error.path[0] if error.path else None

        # The given student_input is invalid, so raise the appropriate error message
        if allow_lists and allow_single:
            msg = (
                "The student_input passed to a grader should be:\n"
                " - a text string for problems with a single input box\n"
                " - a list of text strings for problems with multiple input boxes\n"
                "Received student_input of {}").format(type(student_input))
        elif allow_lists and not isinstance(student_input, list):
            msg = ("Expected student_input to be a list of text strings, but "
                   "received {}").format(type(student_input))
        elif allow_lists:
            msg = ("Expected a list of text strings for student_input, but "
                   "item at position {pos} has {thetype}").format(
                       pos=pos, thetype=type(student_input[pos]))
        elif allow_single:
            msg = ("Expected string for student_input, received {}").format(
                type(student_input))
        else:
            raise ValueError(
                'At least one of (allow_lists, allow_single) must be True.')

        raise ConfigError(msg)
Beispiel #26
0
    def structure_and_validate_input(self, student_input):
        used_inputs = [
            key for key in self.true_input_positions
            if self.true_input_positions[key] is not None
        ]
        if len(used_inputs) != len(student_input):
            # This is a ConfigError because it should only be trigged if author
            # included wrong number of inputs in the <customresponse> problem.
            sorted_inputs = sorted(used_inputs,
                                   key=lambda x: self.true_input_positions[x])
            msg = ("Expected {expected} student inputs but found {found}. "
                   "Inputs should  appear in order {order}.")
            raise ConfigError(
                msg.format(expected=len(used_inputs),
                           found=len(student_input),
                           order=sorted_inputs))

        structured_input = transform_list_to_dict(student_input,
                                                  self.config['answers'],
                                                  self.true_input_positions)

        return structured_input
def warn_if_override(config, key, defaults):
    """
    Raise an error if config[key] overlaps with defaults unless config['suppress_warnings'] is True.

    Notes:
        - config[key] and defaults must both be iterable.

    Usage
    =====

    >>> config = {'vars': ['a', 'b', 'cat', 'psi', 'pi']}
    >>> defaults = {'cat': 1, 'pi': 2}
    >>> try:
    ...     warn_if_override(config, 'vars', defaults) # doctest: +ELLIPSIS
    ... except ConfigError as error:
    ...     print(error)
    Warning: 'vars' contains entries 'cat', 'pi' ...

    >>> config = {'vars': ['a', 'b', 'cat', 'psi', 'pi'], 'suppress_warnings': True}
    >>> warn_if_override(
    ... config,
    ... 'vars',
    ... {'cat': 1, 'pi': 2}
    ... ) == config
    True

    """
    duplicates = set(defaults).intersection(set(config[key]))
    if duplicates and not config.get('suppress_warnings', False):
        text_dups = ', '.join(
            sorted(("'{}'".format(dup) for dup in duplicates)))
        msg = (
            "Warning: '{key}' contains entries {duplicates} which will override default "
            "values. If you intend to override defaults, you may suppress "
            "this warning by adding 'suppress_warnings=True' to the grader configuration."
        )
        raise ConfigError(msg.format(key=key, duplicates=text_dups))
    return config
def warn_if_override(config, key, defaults):
    """
    Raise an error if config[key] overlaps with defaults unless config['suppress_warnings'] is True.

    Notes:
        - config[key] and defaults must both be iterable.

    Usage
    =====

    >>> config = {'vars': ['a', 'b', 'cat', 'psi', 'pi']}
    >>> warn_if_override(
    ... config,
    ... 'vars',
    ... {'cat': 1, 'pi': 2}
    ... ) # doctest: +ELLIPSIS
    Traceback (most recent call last):
    ConfigError: Warning: 'vars' contains entries '['cat', 'pi']' ...

    >>> config = {'vars': ['a', 'b', 'cat', 'psi', 'pi'], 'suppress_warnings': True}
    >>> warn_if_override(
    ... config,
    ... 'vars',
    ... {'cat': 1, 'pi': 2}
    ... ) == config
    True

    """
    duplicates = set(defaults).intersection(set(config[key]))
    if duplicates and not config.get('suppress_warnings', False):
        sorted_dups = list(sorted(duplicates))
        msg = (
            "Warning: '{key}' contains entries '{duplicates}' which will override default "
            "values. If you intend to override defaults, you may suppress "
            "this warning by adding 'suppress_warnings=True' to the grader configuration."
        )
        raise ConfigError(msg.format(key=key, duplicates=sorted_dups))
    return config
Beispiel #29
0
    def post_schema_ans_val(self, answer_tuple):
        """
        Used to validate the individual 'expect' lists in the 'answers' key.
        This must be done after the schema has finished validation, as we need access
        to the 'subgraders' configuration key to perform this validation.
        """
        # The structure of answer_tuple at this stage is:
        # tuple(dict('expect', 'grade_decimal', 'ok', 'msg'))
        # where 'expect' is a list that needs validation.

        # If 'expect' is a string, use infer_from_expect to convert it to a list.
        for entry in answer_tuple:
            if isinstance(entry['expect'], six.string_types):
                entry['expect'] = self.infer_from_expect(entry['expect'])

        # Assert that all answers have length 4
        for answer_list in answer_tuple:
            if len(answer_list['expect']) != 4:
                raise ConfigError("Answer list must have 4 entries: opening bracket, lower bound, "
                                  "upper bound, closing bracket.")

        # Make sure that no entries are empty
        demand_no_empty(answer_tuple)

        # Validate the first and last entries (the brackets)
        # We use a StringGrader to run appropriate schema coercion
        grader = StringGrader()
        for answer_list in answer_tuple:
            expect = answer_list['expect']
            for index, answer in zip((0, 3), (expect[0], expect[3])):
                # Run the answers through the generic schema and post-schema validation
                expect[index] = grader.schema_answers(answer)
                expect[index] = grader.post_schema_ans_val(expect[index])

        # Validate the second and third entries (lower and upper limits)
        grader = self.config['subgrader']
        for answer_list in answer_tuple:
            expect = answer_list['expect']
            for index, answer in zip((1, 2), expect[1:3]):
                # Run the answers through the subgrader schema and the post-schema validation
                expect[index] = grader.schema_answers(answer)
                expect[index] = grader.post_schema_ans_val(expect[index])

        # Assert that the first and last entries are single characters that
        # exist in the opening_brackets and closing_brackets configuration options
        for answer_list in answer_tuple:
            # Opening brackets
            for entry in answer_list['expect'][0]:
                if len(entry['expect']) != 1:
                    raise ConfigError("Opening bracket must be a single character.")
                if entry['expect'] not in self.config['opening_brackets']:
                    raise ConfigError("Invalid opening bracket. The opening_brackets configuration allows for '"
                                      + "', '".join(char for char in self.config['opening_brackets'])
                                      + "' as opening brackets.")

            # Closing brackets
            for entry in answer_list['expect'][3]:
                if len(entry['expect']) != 1:
                    raise ConfigError("Closing bracket must be a single character.")
                if entry['expect'] not in self.config['closing_brackets']:
                    raise ConfigError("Invalid closing bracket. The closing_brackets configuration allows for '"
                                      + "', '".join(char for char in self.config['closing_brackets'])
                                      + "' as closing brackets.")

        return answer_tuple
Beispiel #30
0
    def check_response(self, answer, student_input, **kwargs):
        """
        Grades a student response against a given answer

        Arguments:
            answer (dict): Dictionary describing the expected answer,
                           its point value, and any associated message
            student_input (str): The student's input passed by edX
        """
        expect = self.clean_input(answer['expect'])
        student = self.clean_input(student_input)

        # Figure out if we are accepting any input
        accept_any = self.config['accept_any'] or self.config['accept_nonempty']
        min_length = self.config['min_length']
        if self.config['accept_nonempty'] and min_length == 0:
            min_length = 1

        # Apply the validation pattern
        pattern = self.config['validation_pattern']
        if pattern is not None:
            # Make sure that the pattern matches the entire input
            testpattern = pattern
            if not pattern.endswith("^"):
                testpattern += "$"

            if not accept_any:
                # Make sure that expect matches the pattern
                # If it doesn't, a student can never get this right
                if re.match(testpattern, expect) is None:
                    msg = "The provided answer '{}' does not match the validation pattern '{}'"
                    raise ConfigError(msg.format(answer['expect'], pattern))

            # Check to see if the student input matches the validation pattern
            if re.match(testpattern, student) is None:
                return self.construct_message(
                    self.config['invalid_msg'],
                    self.config['explain_validation'])

        # Perform the comparison
        if not accept_any:
            # Check for a match to expect
            if student != expect:
                return {'ok': False, 'grade_decimal': 0, 'msg': ''}
        else:
            # Check for the minimum length
            msg = None
            chars = len(student)
            if chars < min_length:
                msg = ('Your response is too short ({chars}/{min} characters)'
                       ).format(chars=chars, min=min_length)

            # Check for minimum word count (more important than character count)
            words = len(student.split())
            if words < self.config['min_words']:
                msg = (
                    'Your response is too short ({words}/{min} words)').format(
                        words=words, min=self.config['min_words'])

            # Give student feedback
            if msg:
                return self.construct_message(msg,
                                              self.config['explain_minimums'])

        # If we got here, everything is correct
        return {
            'ok': answer['ok'],
            'grade_decimal': answer['grade_decimal'],
            'msg': answer['msg']
        }