コード例 #1
0
ファイル: formulagrader.py プロジェクト: bbadzioch/309_edX
 def schema_config(self):
     """Define the configuration options for FormulaGrader"""
     # Construct the default ItemGrader schema
     schema = super(FormulaGrader, self).schema_config
     # Append options
     forbidden_default = "Invalid Input: This particular answer is forbidden"
     return schema.extend({
         Required('user_functions', default={}): schema_user_functions,
         Required('user_constants', default={}): validate_user_constants(
             Number, MathArray),
         # Blacklist/Whitelist have additional validation that can't happen here, because
         # their validation is correlated with each other
         Required('blacklist', default=[]): [str],
         Required('whitelist', default=[]): Any(
             All([None], Length(min=1, max=1)),
             [str]
         ),
         Required('forbidden_strings', default=[]): [str],
         Required('forbidden_message', default=forbidden_default): str,
         Required('required_functions', default=[]): [str],
         Required('tolerance', default='0.01%'): Any(PercentageString, NonNegative(Number)),
         Required('metric_suffixes', default=False): bool,
         Required('samples', default=5): Positive(int),
         Required('variables', default=[]): All([str], all_unique),
         Required('numbered_vars', default=[]): All([str], all_unique),
         Required('sample_from', default={}): dict,
         Required('failable_evals', default=0): NonNegative(int),
         Required('max_array_dim', default=0): NonNegative(int)
     })
コード例 #2
0
 def schema_config(self):
     """Define the configuration options for StringGrader"""
     # Construct the default ItemGrader schema
     schema = super(StringGrader, self).schema_config
     # Append options
     return schema.extend({
         Required('case_sensitive', default=True):
         bool,
         Required('strip', default=True):
         bool,
         Required('strip_all', default=False):
         bool,
         Required('clean_spaces', default=True):
         bool,
         Required('accept_any', default=False):
         bool,
         Required('accept_nonempty', default=False):
         bool,
         Required('min_length', default=0):
         NonNegative(int),
         Required('min_words', default=0):
         NonNegative(int),
         Required('explain_minimums', default='err'):
         Any('err', 'msg', None),
         Required('validation_pattern', default=None):
         Any(text_string, None),
         Required('explain_validation', default='err'):
         Any('err', 'msg', None),
         Required('invalid_msg',
                  default='Your input is not in the expected format'):
         text_string
     })
コード例 #3
0
 def schema_config(self):
     schema = super(MatrixGrader, self).schema_config
     return schema.extend({
         Required('identity_dim', default=None):
         Nullable(NonNegative(int)),
         Required('max_array_dim', default=1):
         Nullable(NonNegative(int)),
         Required('negative_powers', default=True):
         bool,
         Required('shape_errors', default=True):
         bool,
         Required('suppress_matrix_messages', default=False):
         bool,
         Required('answer_shape_mismatch',
                  default={
                      'is_raised': True,
                      'msg_detail': 'type'
                  }): {
             Required('is_raised', default=True): bool,
             Required('msg_detail', default='type'):
             Any(None, 'type', 'shape')
         },
         Optional('entry_partial_credit'):
         Any(All(Number, Range(0, 1)), 'proportional'),
         Optional('entry_partial_msg'):
         text_string,
         Required('allow_inf', default=False):
         False,  # Ensure that this is turned off
     })
コード例 #4
0
 def schema_config(self):
     """Define the configuration options for IntegralGrader"""
     # Construct the default AbstractGrader schema
     schema = super(IntegralGrader, self).schema_config
     default_input_positions = {
         'lower': 1,
         'upper': 2,
         'integrand': 3,
         'integration_variable': 4
     }
     # Append options
     return schema.extend({
         Required('answers'): {
             Required('lower'): str,
             Required('upper'): str,
             Required('integrand'): str,
             Required('integration_variable'): str
         },
         Required('input_positions', default=default_input_positions): {
             Required('lower', default=None):
             Any(None, Positive(int)),
             Required('upper', default=None):
             Any(None, Positive(int)),
             Required('integrand', default=None):
             Any(None, Positive(int)),
             Required('integration_variable', default=None):
             Any(None, Positive(int)),
         },
         Required('integrator_options', default={'full_output': 1}): {
             Required('full_output', default=1): 1,
             Extra: object
         },
         Required('complex_integrand', default=False):
         bool,
         # Most of the below are copied from FormulaGrader
         Required('user_functions', default={}): {
             Extra: Any(is_callable, [is_callable], FunctionSamplingSet)
         },
         Required('user_constants', default={}): {
             Extra: Number
         },
         Required('blacklist', default=[]): [str],
         Required('whitelist', default=[]): [Any(str, None)],
         Required('tolerance', default='0.01%'):
         Any(PercentageString, NonNegative(Number)),
         Required('case_sensitive', default=True):
         bool,
         Required('samples', default=1):
         Positive(int),  # default changed to 1
         Required('variables', default=[]): [str],
         Required('sample_from', default={}):
         dict,
         Required('failable_evals', default=0):
         NonNegative(int)
     })
コード例 #5
0
 def schema_config(self):
     """Define the configuration options for IntegralGrader"""
     # Construct the default AbstractGrader schema
     schema = super(IntegralGrader, self).schema_config
     default_input_positions = {
         'lower': 1,
         'upper': 2,
         'integrand': 3,
         'integration_variable': 4
     }
     # Append options
     return schema.extend({
         Required('answers'): {
             Required('lower'): str,
             Required('upper'): str,
             Required('integrand'): str,
             Required('integration_variable'): str
         },
         Required('input_positions', default=default_input_positions): {
             Required('lower', default=None):
             Any(None, Positive(int)),
             Required('upper', default=None):
             Any(None, Positive(int)),
             Required('integrand', default=None):
             Any(None, Positive(int)),
             Required('integration_variable', default=None):
             Any(None, Positive(int)),
         },
         Required('integrator_options', default={'full_output': 1}): {
             Required('full_output', default=1): 1,
             Extra: object
         },
         Required('complex_integrand', default=False):
         bool,
         # Most of the below are copied from FormulaGrader
         Required('user_functions', default={}):
         schema_user_functions,
         Required('user_constants', default={}):
         validate_user_constants(Number),
         # Blacklist/Whitelist have additional validation that can't happen here, because
         # their validation is correlated with each other
         Required('blacklist', default=[]): [str],
         Required('whitelist', default=[]):
         Any(All([None], Length(min=1, max=1)), [str]),
         Required('tolerance', default='0.01%'):
         Any(PercentageString, NonNegative(Number)),
         Required('samples', default=1):
         Positive(int),  # default changed to 1
         Required('variables', default=[]):
         All([str], all_unique),
         Required('sample_from', default={}):
         dict,
         Required('failable_evals', default=0):
         NonNegative(int)
     })
コード例 #6
0
ファイル: matrixgrader.py プロジェクト: bbadzioch/309_edX
 def schema_config(self):
     schema = super(MatrixGrader, self).schema_config
     return schema.extend({
         Required('identity_dim', default=None): NonNegative(int),
         Required('max_array_dim', default=1): NonNegative(int),
         Required('negative_powers', default=True): bool,
         Required('shape_errors', default=True): bool,
         Required('suppress_matrix_messages', default=False): bool,
         Required('answer_shape_mismatch', default={
             'is_raised': True,
             'msg_detail': 'type'
         }): {
             Required('is_raised', default=True): bool,
             Required('msg_detail', default='type'): Any(None, 'type', 'shape')
         }
     })
コード例 #7
0
 def schema_config(self):
     """Define the configuration options for FormulaGrader"""
     # Construct the default ItemGrader schema
     schema = super(FormulaGrader, self).schema_config
     # Apply the default math schema
     schema = schema.extend(self.math_config_options)
     # Append FormulaGrader-specific options
     return schema.extend({
         Required('allow_inf', default=False): bool,
         Required('max_array_dim', default=0): NonNegative(int)  # Do not use this; use MatrixGrader instead
     })
コード例 #8
0
 def schema_config(self):
     """Define the configuration options for NumericalGrader"""
     # Construct the default FormulaGrader schema
     schema = super(NumericalGrader, self).schema_config
     # Modify the default FormulaGrader options
     return schema.extend({
         Required('user_functions', default={}): {Extra: is_callable},
         Required('tolerance', default='5%'): Any(PercentageString, NonNegative(Number)),
         Required('samples', default=1): 1,
         Required('variables', default=[]): [],
         Required('sample_from', default={}): {},
         Required('failable_evals', default=0): 0
     })
コード例 #9
0
 def schema_config(self):
     """Define the configuration options for FormulaGrader"""
     # Construct the default ItemGrader schema
     schema = super(FormulaGrader, self).schema_config
     # Append options
     forbidden_default = "Invalid Input: This particular answer is forbidden"
     return schema.extend({
         Required('user_functions', default={}):
             {Extra: Any(is_callable, [is_callable], FunctionSamplingSet)},
         Required('user_constants', default={}): {Extra: Number},
         Required('blacklist', default=[]): [str],
         Required('whitelist', default=[]): [Any(str, None)],
         Required('forbidden_strings', default=[]): [str],
         Required('forbidden_message', default=forbidden_default): str,
         Required('required_functions', default=[]): [str],
         Required('tolerance', default='0.01%'): Any(PercentageString, NonNegative(Number)),
         Required('case_sensitive', default=True): bool,
         Required('metric_suffixes', default=False): bool,
         Required('samples', default=5): Positive(int),
         Required('variables', default=[]): [str],
         Required('sample_from', default={}): dict,
         Required('failable_evals', default=0): NonNegative(int)
     })
コード例 #10
0
 def schema_config(self):
     """Define the configuration options for SumGrader"""
     # Construct the default AbstractGrader schema
     schema = super(SumGrader, self).schema_config
     # Apply the default math schema
     schema = schema.extend(self.math_config_options)
     # Append SumGrader-specific options
     default_input_positions = {
         'lower': 1,
         'upper': 2,
         'summand': 3,
         'summation_variable': 4
     }
     return schema.extend({
         Required('answers'): {
             Required('lower'): text_string,
             Required('upper'): text_string,
             Required('summand'): text_string,
             Required('summation_variable'): text_string
         },
         Required('input_positions', default=default_input_positions): {
             Required('lower', default=None):
             Any(None, Positive(int)),
             Required('upper', default=None):
             Any(None, Positive(int)),
             Required('summand', default=None):
             Any(None, Positive(int)),
             Required('summation_variable', default=None):
             Any(None, Positive(int)),
         },
         Required('infty_val', default=1e3):
         Positive(Number),
         Required('infty_val_fact', default=80):
         Positive(Number),
         Required('even_odd', default=0):
         Any(0, 1, 2),
         Required('samples', default=2):
         Positive(int),  # default changed to 2
         Required('tolerance', default=1e-12):
         Any(PercentageString,
             NonNegative(Number)),  # default changed to 1e-12
     })
コード例 #11
0
class MathMixin(object):
    """This is a mixin class that provides generic math handling capabilities"""
    # Set up a bunch of defaults
    default_variables = DEFAULT_VARIABLES.copy()
    default_functions = DEFAULT_FUNCTIONS.copy()
    default_suffixes = DEFAULT_SUFFIXES.copy()

    # Set up some debugging templates
    debug_appendix_eval_header_template = (
        "\n"
        "==============================================================\n"
        "{grader} Debug Info\n"
        "==============================================================\n"
        "Functions available during evaluation and allowed in answer:\n"
        "{functions_allowed}\n"
        "Functions available during evaluation and disallowed in answer:\n"
        "{functions_disallowed}\n")
    debug_appendix_comparison_template = (
        "\n"
        "==========================================\n"
        "Comparison Data for All {samples_total} Samples\n"
        "==========================================\n"
        "Comparer Function: {comparer}\n"
        "Comparison Results:\n"
        "{comparer_results}\n"
        "")

    # Set up the comparison utilities
    Utils = namedtuple('Utils', ['tolerance', 'within_tolerance'])

    def get_comparer_utils(self):
        """Get the utils for comparer function."""
        def _within_tolerance(x, y):
            return within_tolerance(x, y, self.config['tolerance'])

        return self.Utils(tolerance=self.config['tolerance'],
                          within_tolerance=_within_tolerance)

    # Set up a bunch of configuration options
    math_config_options = {
        Required('user_functions', default={}):
        schema_user_functions,
        Required('user_constants', default={}):
        validate_user_constants(Number, MathArray),
        # Blacklist/Whitelist have additional validation that can't happen here, because
        # their validation is correlated with each other
        Required('blacklist', default=[]): [text_string],
        Required('whitelist', default=[]):
        Any(All([None], Length(min=1, max=1)), [text_string]),
        Required('tolerance', default='0.01%'):
        Any(PercentageString, NonNegative(Number)),
        Required('samples', default=5):
        Positive(int),
        Required('variables', default=[]):
        All([text_string], all_unique),
        Required('numbered_vars', default=[]):
        All([text_string], all_unique),
        Required('sample_from', default={}):
        dict,
        Required('failable_evals', default=0):
        NonNegative(int),
        Required('forbidden_strings', default=[]): [text_string],
        Required('forbidden_message',
                 default="Invalid Input: This particular answer is forbidden"):
        text_string,
        Required('metric_suffixes', default=False):
        bool,
        Required('required_functions', default=[]): [text_string],
        Required('instructor_vars', default=[]): [text_string],
    }

    def validate_math_config(self):
        """Performs generic math configuration validation"""
        validate_blacklist_whitelist_config(self.default_functions,
                                            self.config['blacklist'],
                                            self.config['whitelist'])

        warn_if_override(self.config, 'variables', self.default_variables)
        warn_if_override(self.config, 'numbered_vars', self.default_variables)
        warn_if_override(self.config, 'user_constants', self.default_variables)
        warn_if_override(self.config, 'user_functions', self.default_functions)

        validate_no_collisions(self.config,
                               keys=['variables', 'user_constants'])

        self.permitted_functions = get_permitted_functions(
            self.default_functions, self.config['whitelist'],
            self.config['blacklist'], self.config['user_functions'])

        # Set up the various lists we use
        self.functions, self.random_funcs = construct_functions(
            self.default_functions, self.config["user_functions"])
        self.constants = construct_constants(self.default_variables,
                                             self.config["user_constants"])
        self.suffixes = construct_suffixes(self.default_suffixes,
                                           self.config["metric_suffixes"])

        # Construct the schema for sample_from
        # First, accept all VariableSamplingSets
        # Then, accept any list that RealInterval can interpret
        # Finally, single numbers or tuples of numbers will be handled by DiscreteSet
        schema_sample_from = Schema({
            Required(varname, default=RealInterval()):
            Any(VariableSamplingSet, All(list, Coerce(RealInterval)),
                Coerce(DiscreteSet))
            for varname in (self.config['variables'] +
                            self.config['numbered_vars'])
        })
        self.config['sample_from'] = schema_sample_from(
            self.config['sample_from'])
        # Note that voluptuous ensures that there are no orphaned entries in sample_from

    def check_math_response(self, answer, student_input, **kwargs):
        """Check the student response against a given answer"""
        result, used_funcs = self.raw_check(answer, student_input, **kwargs)

        if result['ok'] is True or result['ok'] == 'partial':
            self.post_eval_validation(student_input, used_funcs)
        return result

    def post_eval_validation(self, expr, used_funcs):
        """Runs several post-evaluation validator functions"""
        validate_forbidden_strings_not_used(expr,
                                            self.config['forbidden_strings'],
                                            self.config['forbidden_message'])

        validate_required_functions_used(used_funcs,
                                         self.config['required_functions'])

        validate_only_permitted_functions_used(used_funcs,
                                               self.permitted_functions)

    @staticmethod
    def get_used_vars(expressions):
        """
        Get the variables used in expressions

        Arguments:
            expressions: an iterable collection of expressions

        Returns:
            vars_used ({str}): set of variables used
        """
        is_empty = lambda x: x is None or x.strip() == ''
        expressions = [expr for expr in expressions if not is_empty(expr)]
        # Pre-parse all expressions (these all get cached)
        parsed_expressions = [parse(expr) for expr in expressions]
        # Create a list of all variables used in the expressions
        vars_used = set().union(
            *[p.variables_used for p in parsed_expressions])
        return vars_used

    def gen_var_and_func_samples(self, *args):
        """
        Generate a list of variable/function sampling dictionaries from the supplied arguments.
        Arguments may be strings, lists of strings, or dictionaries with string values.
        Does not flag any bad variables.
        """
        # Make a list of all expressions to check for variables
        expressions = []
        for entry in args:
            if isinstance(entry, six.text_type):
                expressions.append(entry)
            elif isinstance(entry, list):
                expressions += entry
            elif isinstance(entry, dict):
                expressions += [v for k, v in entry.items()]

        # Generate the variable list
        variables, sample_from_dict = self.generate_variable_list(expressions)

        # Generate the samples
        var_samples = gen_symbols_samples(variables, self.config['samples'],
                                          sample_from_dict, self.functions,
                                          self.suffixes, self.constants)

        func_samples = gen_symbols_samples(list(self.random_funcs.keys()),
                                           self.config['samples'],
                                           self.random_funcs, self.functions,
                                           self.suffixes, {})

        return var_samples, func_samples

    def generate_variable_list(self, expressions):
        """
        Generates the list of variables required to perform a comparison and the
        corresponding sampling dictionary, taking into account any numbered variables.
        Bad variables are not flagged here.

        Returns variable_list, sample_from_dict
        """
        vars_used = self.get_used_vars(expressions)

        # Seed the variables list with all allowed variables
        variable_list = list(self.config['variables'])
        # Make a copy of the sample_from dictionary, so we can add numbered variables to it
        sample_from_dict = self.config['sample_from'].copy()

        # Find all unassigned variables
        bad_vars = set(var for var in vars_used if var not in variable_list)

        # Check to see if any unassigned variables are numbered_vars
        regexp = numbered_vars_regexp(self.config['numbered_vars'])
        for var in bad_vars:
            match = regexp.match(var)  # Returns None if no match
            if match:
                # This variable is a numbered_variable
                # Go and add it to variable_list with the appropriate sampler
                (full_string, head) = match.groups()
                variable_list.append(full_string)
                sample_from_dict[full_string] = sample_from_dict[head]

        return variable_list, sample_from_dict

    def log_comparison_info(self, comparer, comparer_results):
        """Add sample comparison information to debug log"""
        msg = self.debug_appendix_comparison_template.format(
            samples_total=self.config['samples'],
            comparer=re.sub(r"0x[0-9a-fA-F]+", "0x...",
                            six.text_type(comparer)),
            comparer_results=pprint.pformat(comparer_results))
        msg = msg.replace("<", "&lt;").replace(">", "&gt;")
        self.log(msg)

    @staticmethod
    def consolidate_results(results, answer, failable_evals):
        """
        Consolidate comparer result(s) into just one result.

        Arguments:
            results: a list of results dicts
            answer (dict): correctness data for the expected answer, or None for all correct
            failable_evals: int
        """
        # Handle an empty answer
        if answer is None:
            answer = {'ok': True, 'grade_decimal': 1, 'msg': ''}

        # answer can contain extra keys, so prune them
        pruned_answer = {
            key: answer[key]
            for key in ['ok', 'grade_decimal', 'msg']
        }

        # Check each result for correctness
        num_failures = 0
        for result in results:
            if result['ok'] != True:
                num_failures += 1
                if len(results) == 1 or num_failures > failable_evals:
                    return result

        # This response appears to agree with the expected answer
        return pruned_answer

    def compare_evaluations(self, compare_params_evals, student_evals,
                            comparer, utils):
        """
        Compare the student evaluations to the expected results.
        """
        results = []
        if isinstance(comparer, CorrelatedComparer):
            result = comparer(compare_params_evals, student_evals, utils)
            results.append(ItemGrader.standardize_cfn_return(result))
        else:
            for compare_params_eval, student_eval in zip(
                    compare_params_evals, student_evals):
                result = comparer(compare_params_eval, student_eval, utils)
                results.append(ItemGrader.standardize_cfn_return(result))

        # TODO: Take out this if statement - should always work.
        # However, presently doesn't, because subgraders don't have access to the master debuglog.
        if self.config['debug']:
            self.log_comparison_info(comparer, results)

        return results

    def log_eval_info(self, index, varlist, funclist, **kwargs):
        """Add sample information to debug log"""

        if index == 0:
            header = self.debug_appendix_eval_header_template.format(
                grader=self.__class__.__name__,
                # The regexp replaces memory locations, e.g., 0x10eb1e848 -> 0x...
                functions_allowed=pprint.pformat({
                    f: funclist[f]
                    for f in funclist if f in self.permitted_functions
                }),
                functions_disallowed=pprint.pformat({
                    f: funclist[f]
                    for f in funclist if f not in self.permitted_functions
                }),
            )
            header = re.sub(r"0x[0-9a-fA-F]+", "0x...", header)
            header = header.replace('RandomFunction.gen_sample.<locals>.', '')
            header = header.replace("<", "&lt;").replace(">", "&gt;")
            self.log(header)
        msg = self.debug_appendix_eval_template.format(
            sample_num=index + 1,  # to account for 0 index
            samples_total=self.config['samples'],
            variables=pprint.pformat(varlist),
            **kwargs)
        msg = msg.replace("<", "&lt;").replace(">", "&gt;")
        self.log(msg)