Пример #1
0
 def unpack_distribution(self, p_dense, p_sparse=None):
     """
     convenience routine to translate a distribution from a dense array
     to a dictionary, using this state enumeration
     """
     p_indices = numpy.arange(numpy.size(p_dense))
     # convert from list of coordinate vectors to list of states
     p_states = domain.to_iter(self.states(p_indices))
     if p_sparse is None:
         p_sparse = statistics.Distribution()
     for index, state in itertools.izip(p_indices, p_states):
         value = p_dense[index]
         if value != 0.0:
             p_sparse[state] = value
     return p_sparse
Пример #2
0
    def to_dense(self, shape, origin=None):
        """
        Returns dense version of distribution for given array shape and origin
        """

        if origin is None:
            origin = (0, ) * len(shape)

        states = set(domain.to_iter(domain.from_rect(shape, origin=origin)))
        states &= set(self.iterkeys())
        p_dense = numpy.zeros(shape, dtype=numpy.float)
        origin = numpy.asarray(origin)
        for state in states:
            probability = self[state]
            shifted_state = tuple(numpy.asarray(state) + origin)
            p_dense[shifted_state] += probability
        return p_dense
Пример #3
0
    def to_dense(self, shape, origin=None):
        """
        Returns dense version of distribution for given array shape and origin
        """
        
        if origin is None:
            origin = (0, )*len(shape)

        states = set(domain.to_iter(domain.from_rect(shape, origin=origin)))
        states &= set(self.iterkeys())
        p_dense = numpy.zeros(shape, dtype=numpy.float)
        origin = numpy.asarray(origin)
        for state in states:
            probability = self[state]
            shifted_state = tuple(numpy.asarray(state) + origin)
            p_dense[shifted_state] += probability
        return p_dense
Пример #4
0
def map_distribution(f, p, g=None):
    """
    map_distribution(f, p [, g]) -> mapping
    
    Returns a copy of the mapping p, with each key replaced by its
    image under f. Any duplicate image keys are merged, with the value of
    the merged key equal to the sum of the values.
    
    It is expected that f returns tuples or scalars, and behaves in a
    reasonable way when given vector state array arguments.
    
    If g is supplied, it is used instead of addition to reduce the values of
    duplicate image keys. If given, g must have a reduce method of the form
        g.reduce(probabilities) -> reduced_probability
        
    for example, setting g to a numpy ufunc would be fine.
    """

    # all this nonsense actually does something fairly straight forward
    # see 'map_distribution_simple' for a reference implementation that
    # avoids numpy operations

    num_items = len(p)

    if num_items == 0:
        return {}

    if g is None:
        g = numpy.add

    s, v = domain.from_mapping(p)
    fs = numpy.asarray(f(s))

    # handle case where f returns scalar arguments, say
    # this might be a touch flakey
    if len(fs.shape) != 2:
        fs = fs * numpy.ones((1, s.shape[1]))

    # sort image states using lexical ordering on coords, then
    # apply same ordering to values

    order = numpy.lexsort(fs)
    sfs = fs[:, order]
    sv = v[order]

    # figure out the indices of the first instance of each state
    not_equal_adj = numpy.logical_or.reduce(sfs[:, :-1] != sfs[:, 1:])
    not_equal_adj = numpy.concatenate(([True], not_equal_adj))

    # extract the unique image states under f
    usfs = sfs[:, not_equal_adj]

    # convert back from arrya representation to iterator of state tuples
    unique_image_states = domain.to_iter(usfs)

    # determine start and end indices of each equivalence class of
    # values in the sorted values array, where values are equivalent if
    # they are associated with states that agree under the transform f
    class_begin = numpy.nonzero(not_equal_adj)[0]
    class_end = numpy.concatenate((class_begin[1:], [num_items]))

    # construct the resulting mapped probability distribution
    # each image state s maps to the values in its equivalence class,
    # reduced by g
    p_mapped = {}
    for s, i, j in itertools.izip(unique_image_states, class_begin, class_end):
        p_mapped[s] = g.reduce(sv[i:j])

    return p_mapped
Пример #5
0
def map_distribution(f, p, g=None):
    """
    map_distribution(f, p [, g]) -> mapping
    
    Returns a copy of the mapping p, with each key replaced by its
    image under f. Any duplicate image keys are merged, with the value of
    the merged key equal to the sum of the values.
    
    It is expected that f returns tuples or scalars, and behaves in a
    reasonable way when given vector state array arguments.
    
    If g is supplied, it is used instead of addition to reduce the values of
    duplicate image keys. If given, g must have a reduce method of the form
        g.reduce(probabilities) -> reduced_probability
        
    for example, setting g to a numpy ufunc would be fine.
    """
    
    # all this nonsense actually does something fairly straight forward
    # see 'map_distribution_simple' for a reference implementation that
    # avoids numpy operations
    
    num_items = len(p)
    
    if num_items == 0:
        return {}
    
    if g is None:
        g = numpy.add
    
    s, v = domain.from_mapping(p)
    fs = numpy.asarray(f(s))
    
    # handle case where f returns scalar arguments, say
    # this might be a touch flakey
    if len(fs.shape) != 2:
        fs = fs*numpy.ones((1, s.shape[1]))
    
    # sort image states using lexical ordering on coords, then
    # apply same ordering to values
    
    order = numpy.lexsort(fs)
    sfs = fs[:, order]
    sv = v[order]
    
    # figure out the indices of the first instance of each state
    not_equal_adj = numpy.logical_or.reduce(sfs[:, :-1] != sfs[:, 1:])
    not_equal_adj = numpy.concatenate(([True], not_equal_adj))
    
    # extract the unique image states under f
    usfs = sfs[:, not_equal_adj]
    
    # convert back from arrya representation to iterator of state tuples
    unique_image_states = domain.to_iter(usfs)
    
    # determine start and end indices of each equivalence class of
    # values in the sorted values array, where values are equivalent if
    # they are associated with states that agree under the transform f
    class_begin = numpy.nonzero(not_equal_adj)[0]
    class_end = numpy.concatenate((class_begin[1:], [num_items]))
    
    # construct the resulting mapped probability distribution
    # each image state s maps to the values in its equivalence class,
    # reduced by g
    p_mapped = {}
    for s, i, j in itertools.izip(unique_image_states, class_begin, class_end):
        p_mapped[s] = g.reduce(sv[i:j])
    
    return p_mapped