Ejemplo n.º 1
0
    def __init__(self, config=None, **kwargs):
        super(IntegralGrader, self).__init__(config, **kwargs)
        self.true_input_positions = self.validate_input_positions(
            self.config['input_positions'])

        # The below are copied from FormulaGrader.__init__

        # Set up the various lists we use
        self.functions, self.random_funcs = construct_functions(
            self.config["whitelist"], self.config["blacklist"],
            self.config["user_functions"])
        self.constants = construct_constants(self.config["user_constants"])
        # TODO I would like to move this into construct_constants at some point,
        # perhaps giving construct_constants and optional argument specifying additional defaults
        if 'infty' not in self.constants:
            self.constants['infty'] = float('inf')

        # Construct the schema for sample_from
        # First, accept all VariableSamplingSets
        # Then, accept any list that RealInterval can interpret
        # Finally, single numbers or tuples of numbers will be handled by DiscreteSet
        schema_sample_from = Schema({
            Required(varname, default=RealInterval()):
            Any(VariableSamplingSet, All(list,
                                         lambda pair: RealInterval(pair)),
                lambda tup: DiscreteSet(tup))
            for varname in self.config['variables']
        })
        self.config['sample_from'] = schema_sample_from(
            self.config['sample_from'])
Ejemplo n.º 2
0
class IdentityMatrixMultiples(SquareMatrixSamplingSet):
    """
    Class representing a collection of multiples of the identity matrix
    of a given dimension.

    Config:
    =======
        Same as MatrixSamplingSet, but:
            - sampler: A scalar sampling set for the multiplicative constant
                (default RealInterval([1, 5]))

    Note that the 'complex' and 'norm' properties are ignored.

    Usage:
    ======

    By default, we generate 2x2 matrices:
    >>> matrices = IdentityMatrixMultiples()
    >>> matrices.gen_sample().shape
    (2, 2)

    We can generate NxN matrices by specifying the dimension:
    >>> matrices = IdentityMatrixMultiples(dimension=4)
    >>> matrices.gen_sample().shape
    (4, 4)

    The scalar multiple can be generated in a number of ways:
    >>> from mitxgraders import ComplexSector
    >>> matrices = IdentityMatrixMultiples(sampler=[1,3])
    >>> sect = ComplexSector(modulus=[0,1], argument=[-np.pi,np.pi])
    >>> matrices = IdentityMatrixMultiples(sampler=sect)

    The resulting samples are simply a scalar times the identity matrix:
    >>> matrices = IdentityMatrixMultiples()
    >>> m = matrices.gen_sample()
    >>> np.array_equal(m, m[0, 0] * np.eye(2))
    True

    """
    # Sampling set for the multiplicative constant
    # Accept anything that FormulaGrader would accept for a sampling set, restricted to
    # scalar sampling sets. Hence, ScalarSamplingSets and ranges are allowed.
    # Note: Does not support DependentSampler or DiscreteSet, as they are not guaranteed
    # to return a scalar value.
    schema_config = SquareMatrixSamplingSet.schema_config.extend({
        Required('sampler', default=RealInterval()): Any(ScalarSamplingSet,
                                                         All(list, Coerce(RealInterval)))
    })

    def generate_sample(self):
        """
        Generates an identity matrix of specified dimension multiplied by a random scalar
        """
        # Sample the multiplicative constant
        scaling = self.config['sampler'].gen_sample()
        # Create the numpy matrix
        array = scaling * np.eye(self.config['dimension'])
        # Return the result
        return array
Ejemplo n.º 3
0
    def validate_math_config(self):
        """Performs generic math configuration validation"""
        validate_blacklist_whitelist_config(self.default_functions,
                                            self.config['blacklist'],
                                            self.config['whitelist'])

        # Make a copy of self.default_variables, so we don't change the base version
        self.default_variables = self.default_variables.copy()

        # Remove any deleted user constants from self.default_variables
        remove_keys = [
            key for key in self.config['user_constants']
            if self.config['user_constants'][key] is None
        ]
        for entry in remove_keys:
            if entry in self.default_variables:
                del self.default_variables[entry]
            del self.config['user_constants'][entry]

        warn_if_override(self.config, 'variables', self.default_variables)
        warn_if_override(self.config, 'numbered_vars', self.default_variables)
        warn_if_override(self.config, 'user_constants', self.default_variables)
        warn_if_override(self.config, 'user_functions', self.default_functions)

        validate_no_collisions(self.config,
                               keys=['variables', 'user_constants'])

        self.permitted_functions = get_permitted_functions(
            self.default_functions, self.config['whitelist'],
            self.config['blacklist'], self.config['user_functions'])

        # Set up the various lists we use
        self.functions, self.random_funcs = construct_functions(
            self.default_functions, self.config["user_functions"])
        self.constants = construct_constants(self.default_variables,
                                             self.config["user_constants"])
        self.suffixes = construct_suffixes(self.default_suffixes,
                                           self.config["metric_suffixes"])

        # Construct the schema for sample_from
        # First, accept all VariableSamplingSets
        # Then, accept any list that RealInterval can interpret
        # Finally, single numbers or tuples of numbers will be handled by DiscreteSet
        schema_sample_from = Schema({
            Required(varname, default=RealInterval()):
            Any(VariableSamplingSet, All(list, Coerce(RealInterval)),
                Coerce(DiscreteSet))
            for varname in (self.config['variables'] +
                            self.config['numbered_vars'])
        })
        self.config['sample_from'] = schema_sample_from(
            self.config['sample_from'])
    def __init__(self, config=None, **kwargs):
        """
        Validate the FormulaGrader's configuration.
        First, we allow the ItemGrader initializer to construct the function list.
        We then construct the lists of functions, suffixes and constants.
        Finally, we refine the sample_from entry.
        """
        super(FormulaGrader, self).__init__(config, **kwargs)

        # finish validating
        validate_blacklist_whitelist_config(self.default_functions,
                                            self.config['blacklist'],
                                            self.config['whitelist'])
        validate_no_collisions(self.config,
                               keys=['variables', 'user_constants'])
        warn_if_override(self.config, 'variables', self.default_variables)
        warn_if_override(self.config, 'numbered_vars', self.default_variables)
        warn_if_override(self.config, 'user_constants', self.default_variables)
        warn_if_override(self.config, 'user_functions', self.default_functions)

        self.permitted_functions = get_permitted_functions(
            self.default_functions, self.config['whitelist'],
            self.config['blacklist'], self.config['user_functions'])

        # store the comparer utils
        self.comparer_utils = self.get_comparer_utils()

        # Set up the various lists we use
        self.functions, self.random_funcs = construct_functions(
            self.default_functions, self.config["user_functions"])
        self.constants = construct_constants(self.default_variables,
                                             self.config["user_constants"])
        self.suffixes = construct_suffixes(self.default_suffixes,
                                           self.config["metric_suffixes"])

        # Construct the schema for sample_from
        # First, accept all VariableSamplingSets
        # Then, accept any list that RealInterval can interpret
        # Finally, single numbers or tuples of numbers will be handled by DiscreteSet
        schema_sample_from = Schema({
            Required(varname, default=RealInterval()):
            Any(VariableSamplingSet, All(list, Coerce(RealInterval)),
                Coerce(DiscreteSet))
            for varname in (self.config['variables'] +
                            self.config['numbered_vars'])
        })
        self.config['sample_from'] = schema_sample_from(
            self.config['sample_from'])
Ejemplo n.º 5
0
    def __init__(self, config=None, **kwargs):
        super(IntegralGrader, self).__init__(config, **kwargs)
        self.true_input_positions = self.validate_input_positions(
            self.config['input_positions'])

        # The below are copied from FormulaGrader.__init__
        validate_blacklist_whitelist_config(self.default_functions,
                                            self.config['blacklist'],
                                            self.config['whitelist'])

        validate_no_collisions(self.config,
                               keys=['variables', 'user_constants'])
        warn_if_override(self.config, 'variables', self.default_variables)
        warn_if_override(self.config, 'user_constants', self.default_variables)
        warn_if_override(self.config, 'user_functions', self.default_functions)

        self.permitted_functions = get_permitted_functions(
            self.default_functions, self.config['whitelist'],
            self.config['blacklist'], self.config['user_functions'])

        self.functions, self.random_funcs = construct_functions(
            self.default_functions, self.config["user_functions"])
        self.constants = construct_constants(self.default_variables,
                                             self.config["user_constants"])

        # Construct the schema for sample_from
        # First, accept all VariableSamplingSets
        # Then, accept any list that RealInterval can interpret
        # Finally, single numbers or tuples of numbers will be handled by DiscreteSet
        schema_sample_from = Schema({
            Required(varname, default=RealInterval()):
            Any(VariableSamplingSet, All(list, Coerce(RealInterval)),
                Coerce(DiscreteSet))
            for varname in self.config['variables']
        })
        self.config['sample_from'] = schema_sample_from(
            self.config['sample_from'])
Ejemplo n.º 6
0
 def __init__(self, config=None, **kwargs):
     """
     Configure the class as normal, then set up norm as a RealInterval
     """
     super(ArraySamplingSet, self).__init__(config, **kwargs)
     self.norm = RealInterval(self.config['norm'])
Ejemplo n.º 7
0
class ArraySamplingSet(VariableSamplingSet):
    """
    Represents a set from which random array variable samples are taken.

    The norm used is standard Euclidean norm: root-square-sum of all entries in the array.

    This is the most low-level array sampling set we have, and is subclassed for various
    specific purposes. While we cannot make this class abstract, we strongly discourage
    its use.

    Config:
    =======
        - shape (int|(int)|[int]): Dimensions of the array, specified as a list or tuple of
            the dimensions in each index as (n_1, n_2, ...). Can also use an integer
            to select a vector of that length. (required; no default)
        - norm ([start, stop]): Range for the overall norm of the array. Can be a
            list [start, stop] or a dictionary {'start':start, 'stop':stop}.
            (default [1, 5])
        - complex (bool): Whether or not the matrix is complex (default False)
    """

    schema_config = Schema({
        Required('shape'): is_shape_specification(min_dim=1),
        Required('norm', default=[1, 5]): NumberRange(),
        Required('complex', default=False): bool
    })

    def __init__(self, config=None, **kwargs):
        """
        Configure the class as normal, then set up norm as a RealInterval
        """
        super(ArraySamplingSet, self).__init__(config, **kwargs)
        self.norm = RealInterval(self.config['norm'])

    def gen_sample(self):
        """
        Generates an array sample and returns it as a MathArray.

        This calls generate_sample, which is the routine that should be subclassed if
        needed, rather than this one.
        """
        array = self.generate_sample()
        return MathArray(array)

    def generate_sample(self):
        """
        Generates a random array of shape and norm determined by config. After
        generation, the apply_symmetry and normalize functions are applied to the result.
        These functions may be shadowed by a subclass.

        If apply_symmetry or normalize raise the Retry exception, a new sample is
        generated, and the procedure starts anew.

        Returns a numpy array.
        """
        # Loop until a good sample is found
        loops = 0
        while loops < 100:
            loops += 1

            # Construct an array with entries in [-0.5, 0.5)
            array = np.random.random_sample(self.config['shape']) - 0.5
            # Make the array complex if needed
            if self.config['complex']:
                imarray = np.random.random_sample(self.config['shape']) - 0.5
                array = array + 1j*imarray

            try:
                # Apply any symmetries to the array
                array = self.apply_symmetry(array)

                # Normalize the result
                array = self.normalize(array)

                # Return the result
                return array
            except Retry:
                continue

        raise ValueError('Unable to construct sample for {}'
                         .format(type(self).__name__))  # pragma: no cover

    def apply_symmetry(self, array):
        """
        Applies the required symmetries to the array.

        This method exists to be shadowed by subclasses.
        """
        return array

    def normalize(self, array):
        """
        Normalizes the array to fall into the desired norm.

        This method can be shadowed by subclasses.
        """
        actual_norm = np.linalg.norm(array)
        desired_norm = self.norm.gen_sample()
        return array * desired_norm / actual_norm