Exemple #1
0
def _metavariance(p, exponent):
    """
    _metavariance(p, exponent) -> alpha
    
    Returns alpha := E[ product_i (X_i - mu_i)**alpha ], where
    i ranges over the dimension of the keys of p.
    """

    states, probabilities = domain.from_mapping(p)
    mu = expectation((states, probabilities))
    diffs = (states - numpy.asarray(mu)[:, numpy.newaxis])
    if exponent != 1:
        diffs **= exponent
    product = numpy.multiply.reduce(diffs, axis=0)
    alpha = expectation((product, probabilities))
    assert alpha.shape == (1, )
    return alpha[0]
Exemple #2
0
def _metavariance(p, exponent):
    """
    _metavariance(p, exponent) -> alpha
    
    Returns alpha := E[ product_i (X_i - mu_i)**alpha ], where
    i ranges over the dimension of the keys of p.
    """
    
    states, probabilities = domain.from_mapping(p)
    mu = expectation((states, probabilities))
    diffs = (states - numpy.asarray(mu)[:, numpy.newaxis])
    if exponent != 1:
        diffs **= exponent
    product = numpy.multiply.reduce(diffs, axis = 0)
    alpha = expectation((product, probabilities))
    assert alpha.shape == (1, )
    return alpha[0]
Exemple #3
0
def expectation(p):
    """
    expectation(p) -> mu
    
    Returns the expected value mu, treating the mapping p as a distribution
    p : states -> probabilities.
    """

    if type(p) is tuple:
        assert len(p) == 2
        states, probabilities = p
    else:
        states, probabilities = domain.from_mapping(p)
    weighted_states = states * probabilities[numpy.newaxis, :]
    mu = numpy.add.reduce(weighted_states, axis=1)

    return mu
Exemple #4
0
def expectation(p):
    """
    expectation(p) -> mu
    
    Returns the expected value mu, treating the mapping p as a distribution
    p : states -> probabilities.
    """
    
    if type(p) is tuple:
        assert len(p) == 2
        states, probabilities = p
    else:
        states, probabilities = domain.from_mapping(p)
    weighted_states = states * probabilities[numpy.newaxis, :]
    mu = numpy.add.reduce(weighted_states, axis=1)
    
    return mu
Exemple #5
0
def compress(p, epsilon):
    """
    compress(p, epsilon) -> compressed epsilon-approximation of p
    
    Returns an approximation of the mapping p, treating p as a distribution
    p : states -> probabilities. The returned approximation is *compressed*,
    in the sense that it is the approximation with the smallest support, while
    the error between p and the approximation is within epsilon (L1 norm).
    """

    if not (0.0 <= epsilon <= 1.0):
        raise ValueError('epsilon must be within range: 0.0 <= epsilon <= 1.0')

    p_compressed = {}

    if len(p) > 0:
        # create array representation of distribution
        states, probabilities = domain.from_mapping(p)

        # order entries with respect to increasing probability
        order = numpy.argsort(probabilities)
        states = states.transpose()[order]
        probabilities = probabilities[order]

        # discard the largest number of states while keeping the
        # corresponding net probability discarded below epsilon
        cumulative_probability = numpy.add.accumulate(probabilities)
        approximation = (cumulative_probability >= epsilon)
        states = states[approximation]
        probabilities = probabilities[approximation]

        assert len(states) == len(probabilities)

        # convert approximation back to a sparse dictionary format
        for state, probability in itertools.izip(states, probabilities):
            p_compressed[tuple(state)] = probability

    return p_compressed
Exemple #6
0
def compress(p, epsilon):
    """
    compress(p, epsilon) -> compressed epsilon-approximation of p
    
    Returns an approximation of the mapping p, treating p as a distribution
    p : states -> probabilities. The returned approximation is *compressed*,
    in the sense that it is the approximation with the smallest support, while
    the error between p and the approximation is within epsilon (L1 norm).
    """
    
    if not (0.0 <= epsilon <= 1.0):
        raise ValueError('epsilon must be within range: 0.0 <= epsilon <= 1.0')
    
    p_compressed = {}
    
    if len(p) > 0:
        # create array representation of distribution
        states, probabilities = domain.from_mapping(p)
        
        # order entries with respect to increasing probability
        order = numpy.argsort(probabilities)
        states = states.transpose()[order]
        probabilities = probabilities[order]
        
        # discard the largest number of states while keeping the
        # corresponding net probability discarded below epsilon
        cumulative_probability = numpy.add.accumulate(probabilities)
        approximation = (cumulative_probability >= epsilon)
        states = states[approximation]
        probabilities = probabilities[approximation]
        
        assert len(states) == len(probabilities)
        
        # convert approximation back to a sparse dictionary format
        for state, probability in itertools.izip(states, probabilities):
            p_compressed[tuple(state)] = probability
        
    return p_compressed
Exemple #7
0
 def pack_distribution(self, p_sparse, p_dense=None):
     """
     convenience routine to translate a distribution from a dictionary to
     a dense array, using this state enumeration 
     """
     
     if p_dense is None:
         p_dense = numpy.zeros((self.size, ), dtype=numpy.float)
     
     # guard against case where p_sparse is empty
     if len(p_sparse) == 0:
         return p_dense
     
     p_states, p_values = domain.from_mapping(p_sparse)
     
     # now sort the states, keeping them synchronised with the
     # ordering of the values
     order = numpy.lexsort(p_states)
     p_states = p_states[:, order]
     p_values = p_values[order]
     p_indices = self.indices(p_states)
     p_dense[p_indices] = p_values
     return p_dense
Exemple #8
0
def map_distribution(f, p, g=None):
    """
    map_distribution(f, p [, g]) -> mapping
    
    Returns a copy of the mapping p, with each key replaced by its
    image under f. Any duplicate image keys are merged, with the value of
    the merged key equal to the sum of the values.
    
    It is expected that f returns tuples or scalars, and behaves in a
    reasonable way when given vector state array arguments.
    
    If g is supplied, it is used instead of addition to reduce the values of
    duplicate image keys. If given, g must have a reduce method of the form
        g.reduce(probabilities) -> reduced_probability
        
    for example, setting g to a numpy ufunc would be fine.
    """

    # all this nonsense actually does something fairly straight forward
    # see 'map_distribution_simple' for a reference implementation that
    # avoids numpy operations

    num_items = len(p)

    if num_items == 0:
        return {}

    if g is None:
        g = numpy.add

    s, v = domain.from_mapping(p)
    fs = numpy.asarray(f(s))

    # handle case where f returns scalar arguments, say
    # this might be a touch flakey
    if len(fs.shape) != 2:
        fs = fs * numpy.ones((1, s.shape[1]))

    # sort image states using lexical ordering on coords, then
    # apply same ordering to values

    order = numpy.lexsort(fs)
    sfs = fs[:, order]
    sv = v[order]

    # figure out the indices of the first instance of each state
    not_equal_adj = numpy.logical_or.reduce(sfs[:, :-1] != sfs[:, 1:])
    not_equal_adj = numpy.concatenate(([True], not_equal_adj))

    # extract the unique image states under f
    usfs = sfs[:, not_equal_adj]

    # convert back from arrya representation to iterator of state tuples
    unique_image_states = domain.to_iter(usfs)

    # determine start and end indices of each equivalence class of
    # values in the sorted values array, where values are equivalent if
    # they are associated with states that agree under the transform f
    class_begin = numpy.nonzero(not_equal_adj)[0]
    class_end = numpy.concatenate((class_begin[1:], [num_items]))

    # construct the resulting mapped probability distribution
    # each image state s maps to the values in its equivalence class,
    # reduced by g
    p_mapped = {}
    for s, i, j in itertools.izip(unique_image_states, class_begin, class_end):
        p_mapped[s] = g.reduce(sv[i:j])

    return p_mapped
Exemple #9
0
def map_distribution(f, p, g=None):
    """
    map_distribution(f, p [, g]) -> mapping
    
    Returns a copy of the mapping p, with each key replaced by its
    image under f. Any duplicate image keys are merged, with the value of
    the merged key equal to the sum of the values.
    
    It is expected that f returns tuples or scalars, and behaves in a
    reasonable way when given vector state array arguments.
    
    If g is supplied, it is used instead of addition to reduce the values of
    duplicate image keys. If given, g must have a reduce method of the form
        g.reduce(probabilities) -> reduced_probability
        
    for example, setting g to a numpy ufunc would be fine.
    """
    
    # all this nonsense actually does something fairly straight forward
    # see 'map_distribution_simple' for a reference implementation that
    # avoids numpy operations
    
    num_items = len(p)
    
    if num_items == 0:
        return {}
    
    if g is None:
        g = numpy.add
    
    s, v = domain.from_mapping(p)
    fs = numpy.asarray(f(s))
    
    # handle case where f returns scalar arguments, say
    # this might be a touch flakey
    if len(fs.shape) != 2:
        fs = fs*numpy.ones((1, s.shape[1]))
    
    # sort image states using lexical ordering on coords, then
    # apply same ordering to values
    
    order = numpy.lexsort(fs)
    sfs = fs[:, order]
    sv = v[order]
    
    # figure out the indices of the first instance of each state
    not_equal_adj = numpy.logical_or.reduce(sfs[:, :-1] != sfs[:, 1:])
    not_equal_adj = numpy.concatenate(([True], not_equal_adj))
    
    # extract the unique image states under f
    usfs = sfs[:, not_equal_adj]
    
    # convert back from arrya representation to iterator of state tuples
    unique_image_states = domain.to_iter(usfs)
    
    # determine start and end indices of each equivalence class of
    # values in the sorted values array, where values are equivalent if
    # they are associated with states that agree under the transform f
    class_begin = numpy.nonzero(not_equal_adj)[0]
    class_end = numpy.concatenate((class_begin[1:], [num_items]))
    
    # construct the resulting mapped probability distribution
    # each image state s maps to the values in its equivalence class,
    # reduced by g
    p_mapped = {}
    for s, i, j in itertools.izip(unique_image_states, class_begin, class_end):
        p_mapped[s] = g.reduce(sv[i:j])
    
    return p_mapped