Esempio n. 1
0
def binomial(random_state,
             size=None,
             n=1,
             p=0.5,
             ndim=None,
             dtype='int64',
             prob=None):
    """
    Sample n times with probability of success prob for each trial,
    return the number of successes.

    If the size argument is ambiguous on the number of dimensions, ndim
    may be a plain integer to supplement the missing information.

    If size is None, the output shape will be determined by the shapes
    of n and prob.
    """
    if prob is not None:
        p = prob
        print >> sys.stderr, "DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy."
    n = tensor.as_tensor_variable(n)
    p = tensor.as_tensor_variable(p)
    ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, p)
    if n.dtype == 'int64':
        ### THIS WORKS AROUND A NUMPY BUG on 32bit machine
        ###  Erase when the following works on a 32bit machine:
        ###  numpy.random.binomial(
        #          n=numpy.asarray([2,3,4], dtype='int64'),
        #          p=numpy.asarray([.1, .2, .3], dtype='float64'))
        n = tensor.cast(n, 'int32')
    op = RandomFunction(
        'binomial',
        tensor.TensorType(dtype=dtype, broadcastable=(False, ) * ndim))
    return op(random_state, size, n, p)
Esempio n. 2
0
def binomial(random_state, size=None, n=1, p=0.5, ndim=None, dtype='int64', prob=None):
    """
    Sample n times with probability of success prob for each trial,
    return the number of successes.

    If the size argument is ambiguous on the number of dimensions, ndim
    may be a plain integer to supplement the missing information.

    If size is None, the output shape will be determined by the shapes
    of n and prob.
    """
    if prob is not None:
        p = prob
        print >> sys.stderr, "DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy."
    n = tensor.as_tensor_variable(n)
    p = tensor.as_tensor_variable(p)
    ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, p)
    if n.dtype=='int64':
        ### THIS WORKS AROUND A NUMPY BUG on 32bit machine
        ###  Erase when the following works on a 32bit machine:
        ###  numpy.random.binomial(
        #          n=numpy.asarray([2,3,4], dtype='int64'),
        #          p=numpy.asarray([.1, .2, .3], dtype='float64'))
        n = tensor.cast(n, 'int32')
    op = RandomFunction('binomial',
            tensor.TensorType(dtype = dtype, broadcastable = (False,)*ndim) )
    return op(random_state, size, n, p)
Esempio n. 3
0
 def make_node(self, a, val):
     a = tensor.as_tensor_variable(a)
     val = tensor.as_tensor_variable(val)
     if a.ndim < 2:
         raise TypeError('%s: first parameter must have at least'
                         ' two dimensions' % self.__class__.__name__)
     elif val.ndim != 0:
         raise TypeError('%s: second parameter must be a scalar'
                         % self.__class__.__name__)
     val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
     if val.dtype != a.dtype:
         raise TypeError('%s: type of second parameter must be compatible'
                       ' with first\'s' % self.__class__.__name__)
     return gof.Apply(self, [a, val], [a.type()])
Esempio n. 4
0
 def make_node(self, a, val):
     a = tensor.as_tensor_variable(a)
     val = tensor.as_tensor_variable(val)
     if a.ndim < 2:
         raise TypeError('%s: first parameter must have at least'
                         ' two dimensions' % self.__class__.__name__)
     elif val.ndim != 0:
         raise TypeError('%s: second parameter must be a scalar' %
                         self.__class__.__name__)
     val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
     if val.dtype != a.dtype:
         raise TypeError('%s: type of second parameter must be compatible'
                         ' with first\'s' % self.__class__.__name__)
     return gof.Apply(self, [a, val], [a.type()])
Esempio n. 5
0
 def infer_shape(self, node, in_shapes):
     temp = node.inputs[0]
     M = tensor.switch(tensor.lt(temp, 0),
         tensor.cast(0, temp.dtype), temp)
     return [[M]]
Esempio n. 6
0
def _infer_ndim_bcast(ndim, shape, *args):
    """
    Infer the number of dimensions from the shape or the other arguments.

    :rtype: (int, variable, tuple) triple, where the variable is an integer
    vector, and the tuple contains Booleans.
    :returns: the first element returned is the inferred number of dimensions.
    The second element is the shape inferred (combining symbolic and constant
    informations from shape and args).
    The third element is a broadcasting pattern corresponding to that shape.
    """

    # Find the minimum value of ndim required by the *args
    if args:
        args_ndim = max(arg.ndim for arg in args)
    else:
        args_ndim = 0

    # there is a convention that -1 means the corresponding shape of a
    # potentially-broadcasted symbolic arg
    if (isinstance(shape, (tuple, list))
            and numpy.all(numpy.asarray(shape) >= 0)):
        bcast = [(s == 1) for s in shape]
        v_shape = tensor.TensorConstant(type=tensor.lvector,
                                        data=theano._asarray(shape,
                                                             dtype='int64'))
        shape_ndim = len(shape)
        if ndim is None:
            ndim = shape_ndim
        else:
            if shape_ndim != ndim:
                raise ValueError(
                    'ndim should be equal to len(shape), but\n',
                    'ndim = %s, len(shape) = %s, shape = %s' %
                    (ndim, shape_ndim, shape))
    elif isinstance(shape, (tuple, list)):
        # there is a convention that -1 means the corresponding shape of a
        # potentially-broadcasted symbolic arg
        #
        # This case combines together symbolic and non-symbolic shape
        # information
        if ndim is None:
            ndim = args_ndim
        else:
            ndim = max(args_ndim, ndim)
        ndim = max(args_ndim, len(shape))
        shape = [-1] * (ndim - len(shape)) + list(shape)
        bcast = []
        pre_v_shape = []
        for i, s in enumerate(shape):
            if hasattr(s, 'type'):  # s is symbolic
                bcast.append(False)  # todo - introspect further
                pre_v_shape.append(s)
            else:
                if s >= 0:
                    pre_v_shape.append(tensor.as_tensor_variable(s))
                    bcast.append((s == 1))
                elif s == -1:
                    n_a_i = 0
                    for a in args:
                        # ndim: _   _   _   _   _   _
                        # ashp:         s0  s1  s2  s3
                        #           i
                        if i >= ndim - a.ndim:
                            n_a_i += 1
                            a_i = i + a.ndim - ndim
                            if not a.broadcastable[a_i]:
                                pre_v_shape.append(a.shape[a_i])
                                bcast.append(False)
                                break
                    else:
                        if n_a_i == 0:
                            raise ValueError(
                                ('Auto-shape of -1 must overlap'
                                 'with the shape of one of the broadcastable'
                                 'inputs'))
                        else:
                            pre_v_shape.append(tensor.as_tensor_variable(1))
                            bcast.append(True)
                else:
                    ValueError('negative shape', s)
        # post-condition: shape may still contain both symbolic and
        # non-symbolic things
        v_shape = tensor.stack(*pre_v_shape)

    elif shape is None:
        # The number of drawn samples will be determined automatically,
        # but we need to know ndim
        if not args:
            raise TypeError(('_infer_ndim_bcast cannot infer shape without'
                             ' either shape or args'))
        template = reduce(lambda a, b: a + b, args)
        v_shape = template.shape
        bcast = template.broadcastable
        ndim = template.ndim
    else:
        v_shape = tensor.as_tensor_variable(shape)
        if ndim is None:
            ndim = tensor.get_vector_length(v_shape)
        bcast = [False] * ndim

    if (not (v_shape.dtype.startswith('int')
             or v_shape.dtype.startswith('uint'))):
        raise TypeError('shape must be an integer vector or list',
                        v_shape.dtype)

    if args_ndim > ndim:
        raise ValueError(
            'ndim should be at least as big as required by args value',
            (ndim, args_ndim), args)

    assert ndim == len(bcast)
    return ndim, tensor.cast(v_shape, 'int32'), tuple(bcast)
Esempio n. 7
0
def _infer_ndim_bcast(ndim, shape, *args):
    """
    Infer the number of dimensions from the shape or the other arguments.

    :rtype: (int, variable, tuple) triple, where the variable is an integer
    vector, and the tuple contains Booleans.
    :returns: the first element returned is the inferred number of dimensions.
    The second element is the shape inferred (combining symbolic and constant
    informations from shape and args).
    The third element is a broadcasting pattern corresponding to that shape.
    """

    # Find the minimum value of ndim required by the *args
    if args:
        args_ndim = max(arg.ndim for arg in args)
    else:
        args_ndim = 0

    # there is a convention that -1 means the corresponding shape of a
    # potentially-broadcasted symbolic arg
    if (isinstance(shape, (tuple, list))
            and numpy.all(numpy.asarray(shape)>=0)):
        bcast = [(s==1) for s in shape]
        v_shape = tensor.TensorConstant(type=tensor.lvector, data=theano._asarray(shape, dtype='int64'))
        shape_ndim = len(shape)
        if ndim is None:
            ndim = shape_ndim
        else:
            if shape_ndim != ndim:
                raise ValueError('ndim should be equal to len(shape), but\n',
                            'ndim = %s, len(shape) = %s, shape = %s'
                            % (ndim, shape_ndim, shape))
    elif isinstance(shape, (tuple, list)):
        # there is a convention that -1 means the corresponding shape of a
        # potentially-broadcasted symbolic arg
        #
        # This case combines together symbolic and non-symbolic shape
        # information
        if ndim is None:
            ndim=args_ndim
        else:
            ndim = max(args_ndim, ndim)
        ndim = max(args_ndim, len(shape))
        shape = [-1]*(ndim - len(shape))+list(shape)
        bcast = []
        pre_v_shape = []
        for i,s in enumerate(shape):
            if hasattr(s, 'type'): # s is symbolic
                bcast.append(False) # todo - introspect further
                pre_v_shape.append(s)
            else:
                if s >= 0:
                    pre_v_shape.append(tensor.as_tensor_variable(s))
                    bcast.append((s==1))
                elif s == -1:
                    n_a_i = 0
                    for a in args:
                        # ndim: _   _   _   _   _   _
                        # ashp:         s0  s1  s2  s3
                        #           i
                        if i >= ndim - a.ndim:
                            n_a_i += 1
                            a_i = i + a.ndim -ndim
                            if not a.broadcastable[a_i]:
                                pre_v_shape.append(a.shape[a_i])
                                bcast.append(False)
                                break
                    else:
                        if n_a_i == 0:
                            raise ValueError(('Auto-shape of -1 must overlap'
                                'with the shape of one of the broadcastable'
                                'inputs'))
                        else:
                            pre_v_shape.append(tensor.as_tensor_variable(1))
                            bcast.append(True)
                else:
                    ValueError('negative shape', s)
        # post-condition: shape may still contain both symbolic and non-symbolic things
        v_shape = tensor.stack(*pre_v_shape)

    elif shape is None:
        # The number of drawn samples will be determined automatically,
        # but we need to know ndim
        if not args:
            raise TypeError(('_infer_ndim_bcast cannot infer shape without'
                ' either shape or args'))
        template = reduce(lambda a,b:a+b, args)
        v_shape = template.shape
        bcast = template.broadcastable
        ndim = template.ndim
    else:
        v_shape = tensor.as_tensor_variable(shape)
        if ndim is None:
            ndim = tensor.get_vector_length(v_shape)
        bcast = [False]*ndim

    if not (v_shape.dtype.startswith('int') or v_shape.dtype.startswith('uint')):
        raise TypeError('shape must be an integer vector or list', v_shape.dtype)

    if args_ndim > ndim:
        raise ValueError('ndim should be at least as big as required by args value',
                    (ndim, args_ndim), args)

    assert ndim == len(bcast)
    return ndim, tensor.cast(v_shape, 'int32'), tuple(bcast)
Esempio n. 8
0
 def infer_shape(self, node, in_shapes):
     temp = node.inputs[0]
     M = tensor.switch(tensor.lt(temp, 0), tensor.cast(0, temp.dtype), temp)
     return [[M]]