Example #1
0
 def make_node(self, x, gz):
     x = tensor.as_tensor_variable(x)
     if x.type.ndim != 4:
         raise TypeError('Expect a 4D tensor, but actually got %dD tensor' %
                         x.type.ndim)
     x = tensor.as_tensor_variable(x)
     return gof.Apply(self, [x, gz], [x.type()])
Example #2
0
 def make_node(self, x, x2, x3, x4, x5):
     # check that the theano version has support for __props__.
     # This next line looks like it has a typo,
     # but it's actually a way to detect the theano version
     # is sufficiently recent to support the use of __props__.
     assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
     x = tensor.as_tensor_variable(x)
     x2 = tensor.as_tensor_variable(x2)
     x3 = tensor.as_tensor_variable(x3)
     x4 = tensor.as_tensor_variable(x4)
     x5 = tensor.as_tensor_variable(x5)
     
     if prm.att_doc:
         if prm.compute_emb:
             td = tensor.itensor4().type()
         else:
             td = tensor.ftensor4().type()
         tm = tensor.ftensor3().type()
     else:
         if prm.compute_emb:
             td = tensor.itensor3().type()
         else:
             td = tensor.ftensor3().type()
         tm = tensor.fmatrix().type()
     return theano.Apply(self, [x,x2,x3,x4,x5], [td, tm, \
                                        tensor.fmatrix().type(), tensor.ivector().type()])
Example #3
0
    def _traverse(node):
        if node is None:
            return None
        else:
            op     = node.op
            inputs = node.inputs
            if not hasattr(op, 'R_op'):
                raise Exception((' R_op was not implemented for %s'
                                      ' operation. Email the mailing list'
                                      ' for help') % op.__class__.__name__)
            # Compute the evaluation points corresponding to each of the
            # inputs of the node
            local_eval_points = []
            for inp in inputs:
                if inp in wrt:
                    local_eval_points.append( eval_points[wrt.index(inp)] )
                elif inp.owner is None:
                    local_eval_points.append( zeros_like(inp) )
                elif inp.owner in seen_nodes:

                    local_eval_points.append(
                        seen_nodes[inp.owner][inp.owner.outputs.index(inp) ] )

                else:
                    # We actually need to compute the R_op for this node

                    _traverse(inp.owner)
                    local_eval_points.append(
                        seen_nodes[inp.owner][inp.owner.outputs.index(inp) ])
            for x,y in zip(inputs, local_eval_points):
                if y is not None:
                    assert (as_tensor_variable(x).type == as_tensor_variable(y).type)

            seen_nodes[node] = op.R_op(node.inputs, local_eval_points)
            return None
Example #4
0
def test_downsample():
    shps = [
        (1, 1, 1, 12),
        (1, 1, 2, 2),
        (1, 1, 1, 1),
        (1, 1, 4, 4),
        (1, 1, 10, 11),
        (1, 2, 2, 2),
        (3, 5, 4, 4),
        (25, 1, 7, 7),
        (1, 1, 12, 12),
        (1, 1, 2, 14),
        (1, 1, 12, 14),
        (1, 1, 14, 14),
        (1, 1, 16, 16),
        (1, 1, 18, 18),
        (1, 1, 24, 24),
        (1, 6, 24, 24),
        (10, 1, 24, 24),
        (10, 6, 24, 24),
        (30, 6, 12, 12),
        (30, 2, 24, 24),
        (30, 6, 24, 24),
        (10, 10, 10, 11),
        (1, 1, 10, 1025),
        (1, 1, 10, 1023),
        (1, 1, 1025, 10),
        (1, 1, 1023, 10),
    ]

    numpy.random.RandomState(unittest_tools.fetch_seed()).shuffle(shps)

    for shp in shps:
        for ds in (2, 2), (3, 2), (1, 1):
            if ds[0] > shp[2]:
                continue
            if ds[1] > shp[3]:
                continue
            # GpuDownsampleFactorMax doesn't like having more than 512 columns
            # in the output tensor.
            if float(shp[3]) / ds[1] > 512:
                continue
            for ignore_border in (True, False):
                print "test_downsample", shp, ds, ignore_border
                ds_op = DownsampleFactorMax(ds, ignore_border=ignore_border)

                a = tcn.shared_constructor(my_rand(*shp), "a")
                f = pfunc([], ds_op(tensor.as_tensor_variable(a)), mode=mode_with_gpu)
                f2 = pfunc([], ds_op(tensor.as_tensor_variable(a)), mode=mode_without_gpu)
                assert any([isinstance(node.op, tcn.blas.GpuDownsampleFactorMax) for node in f.maker.env.toposort()])
                assert any([isinstance(node.op, DownsampleFactorMax) for node in f2.maker.env.toposort()])
                assert numpy.allclose(f(), f2())

                g = pfunc([], tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(), a), mode=mode_with_gpu)
                g2 = pfunc([], tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(), a), mode=mode_without_gpu)
                assert any(
                    [isinstance(node.op, tcn.blas.GpuDownsampleFactorMaxGrad) for node in g.maker.env.toposort()]
                )
                assert any([isinstance(node.op, DownsampleFactorMaxGrad) for node in g2.maker.env.toposort()])
                assert numpy.allclose(g(), g2())
Example #5
0
    def __init__(self, distribution, lower, upper, transform="infer", *args, **kwargs):
        dtype = kwargs.get("dtype", theano.config.floatX)

        if lower is not None:
            lower = tt.as_tensor_variable(lower).astype(dtype)
        if upper is not None:
            upper = tt.as_tensor_variable(upper).astype(dtype)

        if transform == "infer":
            if lower is None and upper is None:
                transform = None
                default = None
            elif lower is not None and upper is not None:
                transform = transforms.interval(lower, upper)
                default = 0.5 * (lower + upper)
            elif upper is not None:
                transform = transforms.upperbound(upper)
                default = upper - 1
            else:
                transform = transforms.lowerbound(lower)
                default = lower + 1
        else:
            default = None

        super().__init__(
            distribution, lower, upper, default, *args, transform=transform, **kwargs
        )
Example #6
0
def binomial(random_state, size=None, n=1, p=0.5, ndim=None,
             dtype='int64', prob=None):
    """
    Sample n times with probability of success prob for each trial,
    return the number of successes.

    If the size argument is ambiguous on the number of dimensions, ndim
    may be a plain integer to supplement the missing information.

    If size is None, the output shape will be determined by the shapes
    of n and prob.

    """
    if prob is not None:
        p = prob
        print("DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy.", file=sys.stderr)
    n = tensor.as_tensor_variable(n)
    p = tensor.as_tensor_variable(p)
    ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, p)
    if n.dtype == 'int64':
        try:
            numpy.random.binomial(n=numpy.asarray([2, 3, 4], dtype='int64'), p=numpy.asarray([.1, .2, .3], dtype='float64'))
        except TypeError:
            # THIS WORKS AROUND A NUMPY BUG on 32bit machine
            n = tensor.cast(n, 'int32')
    op = RandomFunction('binomial',
                        tensor.TensorType(dtype=dtype,
                                          broadcastable=(False,) * ndim))
    return op(random_state, size, n, p)
Example #7
0
 def make_node(self, a, n, axis):
     a = tensor.as_tensor_variable(a)
     if a.ndim < 1:
         raise TypeError('%s: input must be an array, not a scalar' %
                         self.__class__.__name__)
     if axis is None:
         axis = a.ndim - 1
         axis = tensor.as_tensor_variable(axis)
     else:
         axis = tensor.as_tensor_variable(axis)
         if (not axis.dtype.startswith('int')) and \
            (not axis.dtype.startswith('uint')):
             raise TypeError('%s: index of the transformed axis must be'
                             ' of type integer' % self.__class__.__name__)
         elif axis.ndim != 0 or (isinstance(axis, tensor.TensorConstant) and
                                 (axis.data < 0 or axis.data > a.ndim - 1)):
             raise TypeError('%s: index of the transformed axis must be'
                             ' a scalar not smaller than 0 and smaller than'
                             ' dimension of array' % self.__class__.__name__)
     if n is None:
         n = a.shape[axis]
         n = tensor.as_tensor_variable(n)
     else:
         n = tensor.as_tensor_variable(n)
         if (not n.dtype.startswith('int')) and \
            (not n.dtype.startswith('uint')):
             raise TypeError('%s: length of the transformed axis must be'
                             ' of type integer' % self.__class__.__name__)
         elif n.ndim != 0 or (isinstance(n, tensor.TensorConstant) and
                              n.data < 1):
             raise TypeError('%s: length of the transformed axis must be a'
                             ' strictly positive scalar'
                             % self.__class__.__name__)
     return gof.Apply(self, [a, n, axis], [tensor.TensorType('complex128',
                      a.type.broadcastable)()])
Example #8
0
def neibs2images(neibs, neib_shape, original_shape, mode='valid'):
    """
    Inverse of images2neib.

    neibs : matrix like the one obtained by images2neib
    neib_shape : neib_shape that was used in images2neib
    original_shape : original shape of the 4d tensor given to images2neib

    Return a 4d tensor of shape `original_shape`.
    """
    neibs = T.as_tensor_variable(neibs)
    neib_shape = T.as_tensor_variable(neib_shape)
    original_shape = T.as_tensor_variable(original_shape)

    new_neib_shape = T.stack(original_shape[-1] // neib_shape[1],
                             neib_shape[1])
    output_2d = images2neibs(neibs.dimshuffle('x', 'x', 0, 1),
                             new_neib_shape, mode=mode)

    if mode == 'ignore_borders':
        valid_shape = list(original_shape)
        valid_shape[2] = (valid_shape[2] // neib_shape[0]) * neib_shape[0]
        valid_shape[3] = (valid_shape[3] // neib_shape[1]) * neib_shape[1]
        output_4d = output_2d.reshape(valid_shape)
        #padding the borders with zeros
        for d in [2, 3]:
            pad_shape = list(output_4d.shape)
            pad_shape[d] = original_shape[d] - valid_shape[d]
            output_4d = T.concatenate([output_4d, T.zeros(pad_shape)], axis=d)
    else:
        output_4d = output_2d.reshape(original_shape)

    return output_4d
Example #9
0
    def make_node(self, activations, labels, input_lengths):
        t_activations = T.as_tensor_variable(activations)
        # Ensure activations array is C-contiguous
        t_activations = cpu_contiguous(t_activations)

        t_labels = T.as_tensor_variable(labels)
        t_input_lengths = T.as_tensor_variable(input_lengths)

        if t_activations.type.dtype != 'float32':
            raise TypeError('activations must use the float32 type!')

        if t_activations.ndim != 3:
            raise ValueError('activations must have 3 dimensions.')

        if t_labels.type.dtype != 'int32':
            raise TypeError('labels must use the int32 type!')

        if t_labels.ndim != 2:
            raise ValueError('labels must have 2 dimensions.')

        if t_input_lengths.type.dtype != 'int32':
            raise TypeError('input_lengths must use the int32 type!')

        if t_input_lengths.ndim != 1:
            raise ValueError('input_lengths must have 1 dimension.')

        costs = T.fvector(name="ctc_cost")
        outputs = [costs]
        if self.compute_grad:
            gradients = T.ftensor3(name="ctc_grad")
            outputs += [gradients]

        return gof.Apply(self, inputs=[t_activations, t_labels, t_input_lengths],
                         outputs=outputs)
Example #10
0
 def make_node(self, n, p, shape):
     n = tensor.as_tensor_variable(n)
     p = tensor.as_tensor_variable(p)
     shape = tensor.as_tensor_variable(shape)
     return gof.Apply(self, [n, p, shape],
                      [SparseType(dtype=self.dtype,
                                  format=self.format).make_variable()])
Example #11
0
def quantized_lognormal_sampler(
    rstream, mu=0.0, sigma=1.0, step=1, draw_shape=None, ndim=None, dtype=theano.config.floatX
):
    """
    Sample from a quantized log-normal distribution centered on avg with
    the specified standard deviation (std).

    If the size argument is ambiguous on the number of dimensions, ndim
    may be a plain integer to supplement the missing information.

    If size is None, the output shape will be determined by the shapes
    of avg and std.

    If dtype is not specified, it will be inferred from the dtype of
    avg and std, but will be at least as precise as floatX.
    """

    mu = tensor.as_tensor_variable(mu)
    sigma = tensor.as_tensor_variable(sigma)
    step = tensor.as_tensor_variable(step)

    if dtype == None:
        dtype = tensor.scal.upcast(theano.config.floatX, mu.dtype, sigma.dtype, step.dtype)
    rstate = rstream.new_shared_rstate()
    ndim, draw_shape, bcast = tensor.raw_random._infer_ndim_bcast(ndim, draw_shape, mu, sigma)
    op = QuantizedLognormal(otype=tensor.TensorType(dtype=dtype, broadcastable=bcast))
    new_rstate, out = op(rstate, draw_shape, mu, sigma, step)
    rstream.add_default_update(out, rstate, new_rstate)
    return out
Example #12
0
    def make_node(self, x, new_length, insert_at):
        """
        .. todo::

            WRITEME
        """
        x_ = tensor.as_tensor_variable(x)
        new_length_ = tensor.as_tensor_variable(new_length)
        insert_at_ = tensor.as_tensor_variable(insert_at)
        assert x_.ndim == self.ndim, (
            "%s instance expected x.ndim = %d, got %d" %
            (self.__class__.__name__, self.ndim, x.ndim)
        )
        assert new_length_.ndim == 0, "new_length must be a scalar"
        assert insert_at_.ndim == 1, "insert_at must be vector"
        assert (new_length_.dtype.startswith('int') or
                new_length.dtype.startswith('uint')), (
                    "new_length must be integer type"
                )
        assert (insert_at_.dtype.startswith('int') or
                insert_at_.dtype.startswith('uint')), (
                    "insert_at must be integer type"
                )
        return theano.Apply(self,
          inputs=[x_, new_length_, insert_at_],
          outputs=[x_.type()])
Example #13
0
    def test_neibs_bad_shape_wrap_centered(self):
        shape = (2, 3, 10, 10)

        for dtype in self.dtypes:
            images = shared(numpy.arange(
                numpy.prod(shape), dtype=dtype
                ).reshape(shape))

            for neib_shape in [(3, 2), (2, 3)]:
                neib_shape = T.as_tensor_variable(neib_shape)

                f = function([], images2neibs(images, neib_shape,
                                              mode="wrap_centered"),
                             mode=self.mode)
                self.assertRaises(TypeError, f)

            for shape in [(2, 3, 2, 3), (2, 3, 3, 2)]:
                images = shared(numpy.arange(numpy.prod(shape)).reshape(shape))
                neib_shape = T.as_tensor_variable((3, 3))
                f = function([], images2neibs(images, neib_shape,
                                              mode="wrap_centered"),
                             mode=self.mode)
                self.assertRaises(TypeError, f)

            # Test a valid shapes
            shape = (2, 3, 3, 3)
            images = shared(numpy.arange(numpy.prod(shape)).reshape(shape))
            neib_shape = T.as_tensor_variable((3, 3))

            f = function([],
                         images2neibs(images, neib_shape, mode="wrap_centered"),
                         mode=self.mode)
            f()
Example #14
0
    def make_node(self, softmaxes, y_idxes, y_lengths, y_startidxes, g_costs, **kwargs):
        softmaxes = T.as_tensor_variable(softmaxes)
        y_idxes = T.as_tensor_variable(y_idxes)
        y_lengths = T.as_tensor_variable(y_lengths)
        y_startidxes = T.as_tensor_variable(y_startidxes)
        g_costs = T.as_tensor_variable(g_costs)

        if (softmaxes.type.ndim != 3 or
            softmaxes.type.dtype not in T.float_dtypes):
            raise ValueError('dy must be 3-d tensor of floats', softmaxes.type)

        if (y_idxes.type.ndim != 2 or
            y_idxes.type.dtype not in T.discrete_dtypes):
            raise ValueError('y_idxes must be 2-d tensor of integers', y_idxes.type)

        if (y_lengths.type.ndim != 1 or
            y_lengths.type.dtype not in T.discrete_dtypes):
            raise ValueError('y_lengths must be 1-d tensor of integers', y_lengths.type)

        if (y_startidxes.type.ndim != 1 or
            y_startidxes.type.dtype not in T.discrete_dtypes):
            raise ValueError('y_startidxes must be 1-d tensor of integers', y_startidxes.type)

        if (g_costs.type.ndim != 1 or
            g_costs.type.dtype not in T.float_dtypes):
            raise ValueError('g_costs must be 1-d tensor of floats', g_costs.type)

        return Apply(self, [softmaxes, y_idxes, y_lengths, y_startidxes, g_costs],
                     [T.Tensor(dtype=softmaxes.dtype, broadcastable=softmaxes.type.broadcastable)()])
Example #15
0
 def make_node(self,x,y):
     if x.type.ndim != y.type.ndim:
         raise TypeError()
     # TODO: consider restricting the dtype?
     x = tensor.as_tensor_variable(x)
     y = tensor.as_tensor_variable(y)
     return gof.Apply(self, [x,y], [])
Example #16
0
def kron(a, b):
    """ Kronecker product

    Same as scipy.linalg.kron(a, b).

    :note: numpy.kron(a, b) != scipy.linalg.kron(a, b)!
        They don't have the same shape and order when
        a.ndim != b.ndim != 2.

    :param a: array_like
    :param b: array_like
    :return: array_like with a.ndim + b.ndim - 2 dimensions.

    """
    a = tensor.as_tensor_variable(a)
    b = tensor.as_tensor_variable(b)
    if (a.ndim + b.ndim <= 2):
        raise TypeError('kron: inputs dimensions must sum to 3 or more. '
                        'You passed %d and %d.' % (a.ndim, b.ndim))
    o = tensor.outer(a, b)
    o = o.reshape(tensor.concatenate((a.shape, b.shape)),
                  a.ndim + b.ndim)
    shf = o.dimshuffle(0, 2, 1, * range(3, o.ndim))
    if shf.ndim == 3:
        shf = o.dimshuffle(1, 0, 2)
        o = shf.flatten()
    else:
        o = shf.reshape((o.shape[0] * o.shape[2],
                         o.shape[1] * o.shape[3]) +
                        tuple([o.shape[i] for i in range(4, o.ndim)]))
    return o
Example #17
0
 def __init__(self, w, mu, *args, **kwargs):
     _, sd = get_tau_sd(tau=kwargs.pop('tau', None),
                        sd=kwargs.pop('sd', None))
     self.mu = mu = tt.as_tensor_variable(mu)
     self.sd = sd = tt.as_tensor_variable(sd)
     super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd),
                                         *args, **kwargs)
Example #18
0
def quantized_lognormal_mixture_sampler(rstream, weights, mus, sigmas, step, draw_shape=None, ndim=None, dtype=None):
    rstate = rstream.new_shared_rstate()
    # shape prep
    if draw_shape is None:
        raise NotImplementedError()
    elif draw_shape is tensor.as_tensor_variable(draw_shape):
        shape = draw_shape
        if ndim is None:
            ndim = tensor.get_vector_length(shape)
    elif tuple(draw_shape) == ():
        ndim = 0
        shape = tensor.as_tensor_variable(numpy.asarray([], dtype="int"))
    else:
        shape = tensor.stack(*draw_shape)
        if ndim is None:
            ndim = len(draw_shape)
        assert tensor.get_vector_length(shape) == ndim

    # XXX: be smarter about inferring broadcastable
    op = QuantizedLognormalMixture(
        tensor.TensorType(broadcastable=(False,) * ndim, dtype=theano.config.floatX if dtype is None else dtype)
    )
    rs, out = op(rstate, shape, weights, mus, sigmas, step)
    rstream.add_default_update(out, rstate, rs)
    return out
Example #19
0
    def __init__(self, name, data, distribution, model):
        """
        Parameters
        ----------

        type : theano type (optional)
        owner : theano owner (optional)

        name : str
        distribution : Distribution
        model : Model
        """
        self.name = name
        data = getattr(data, 'values', data) #handle pandas
        args = as_iterargs(data)

        if len(args) > 1:
            params = getargspec(distribution.logp).args
            args = [t.as_tensor_variable(d, name=name + "_" + param)
                    for d,param in zip(args,params) ]
        else:
            args = [t.as_tensor_variable(args[0], name=name)]

        self.logp_elemwiset = distribution.logp(*args)
        self.model = model
        self.distribution = distribution
Example #20
0
 def __init__(self, psi, mu, alpha, *args, **kwargs):
     super(ZeroInflatedNegativeBinomial, self).__init__(*args, **kwargs)
     self.mu = mu = tt.as_tensor_variable(mu)
     self.alpha = alpha = tt.as_tensor_variable(alpha)
     self.psi = psi = tt.as_tensor_variable(psi)
     self.nb = NegativeBinomial.dist(mu, alpha)
     self.mode = self.nb.mode
Example #21
0
    def __init__(self, rho, sigma=None, tau=None,
                 constant=False, init=Flat.dist(),
                 sd=None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if sd is not None:
            sigma = sd

        tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
        self.sigma = self.sd = tt.as_tensor_variable(sigma)
        self.tau = tt.as_tensor_variable(tau)

        self.mean = tt.as_tensor_variable(0.)

        if isinstance(rho, list):
            p = len(rho)
        else:
            try:
                shape_ = rho.shape.tag.test_value
            except AttributeError:
                shape_ = rho.shape

            if hasattr(shape_, "size") and shape_.size == 0:
                p = 1
            else:
                p = shape_[0]

        if constant:
            self.p = p - 1
        else:
            self.p = p

        self.constant = constant
        self.rho = rho = tt.as_tensor_variable(rho)
        self.init = init
Example #22
0
    def __init__(self, q, beta, *args, **kwargs):
        super(DiscreteWeibull, self).__init__(*args, defaults=['median'], **kwargs)

        self.q = q = tt.as_tensor_variable(q)
        self.beta = beta = tt.as_tensor_variable(beta)

        self.median = self._ppf(0.5)
Example #23
0
 def make_node(self, x, scale, shift):
     if x.type.ndim != 4:
         raise TypeError()
     x = tensor.as_tensor_variable(x)
     scale = tensor.as_tensor_variable(scale)
     shift = tensor.as_tensor_variable(shift)
     return gof.Apply(self, [x, scale, shift], [x.type()])
Example #24
0
    def _traverse(node):
        """ TODO: writeme """
        if node is None:
            return None
        else:
            op = node.op
            inputs = node.inputs

            # Compute the evaluation points corresponding to each of the
            # inputs of the node
            local_eval_points = []
            for inp in inputs:
                if inp in wrt:
                    local_eval_points.append(eval_points[wrt.index(inp)])
                elif inp.owner is None:
                    local_eval_points.append(inp.zeros_like())
                elif inp.owner in seen_nodes:

                    local_eval_points.append(seen_nodes[inp.owner][inp.owner.outputs.index(inp)])

                else:
                    # We actually need to compute the R_op for this node

                    _traverse(inp.owner)
                    local_eval_points.append(seen_nodes[inp.owner][inp.owner.outputs.index(inp)])
            for x, y in zip(inputs, local_eval_points):
                if y is not None:
                    assert as_tensor_variable(x).type == as_tensor_variable(y).type

            seen_nodes[node] = op.R_op(node.inputs, local_eval_points)
            return None
Example #25
0
    def make_node(self, kern, topgrad, shape=None):
        kern = as_tensor_variable(kern)
        topgrad = as_tensor_variable(topgrad)
        kern, topgrad = self.as_common_dtype(kern, topgrad)
        if kern.type.ndim != 5:
            raise TypeError('kern must be 5D tensor')
        if topgrad.type.ndim != 5:
            raise TypeError('topgrad must be 5D tensor')
        if shape is None:
            if self.subsample != (1, 1, 1):
                raise ValueError('shape must be given if subsample != (1, 1, 1)')
            height_width_depth = []
        else:
            height_width_depth = [as_tensor_variable(shape[0]).astype('int64'),
                                  as_tensor_variable(shape[1]).astype('int64'),
                                  as_tensor_variable(shape[2]).astype('int64')]

        if self.num_groups > 1:
            broadcastable = [topgrad.type.broadcastable[0], False,
                             False, False, False]
        else:
            broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1],
                             False, False, False]
        dtype = kern.type.dtype
        return Apply(self, [kern, topgrad] + height_width_depth,
                     [TensorType(dtype, broadcastable)()])
Example #26
0
 def make_node(self, A, b):
     A = as_tensor_variable(A)
     b = as_tensor_variable(b)
     otype = tensor.tensor(
             broadcastable=b.broadcastable,
             dtype = (A*b).dtype)
     return Apply(self, [A,b], [otype])
Example #27
0
def choice(random_state, size=None, a=2, replace=True, p=None, ndim=None,
           dtype='int64'):
    """
    Choose values from `a` with or without replacement. `a` can be a 1-D array
    or a positive scalar. If `a` is a scalar, the samples are drawn from the
    range 0,...,a-1.

    If the size argument is ambiguous on the number of dimensions, ndim
    may be a plain integer to supplement the missing information.

    If size is None, a scalar will be returned.

    """
    # numpy.random.choice is only available for numpy versions >= 1.7
    major, minor, _ = numpy.version.short_version.split('.')
    if (int(major), int(minor)) < (1, 7):
        raise ImportError('choice requires at NumPy version >= 1.7 '
                          '(%s)' % numpy.__version__)
    a = tensor.as_tensor_variable(a)
    if isinstance(replace, bool):
        replace = tensor.constant(replace, dtype='int8')
    else:
        replace = tensor.as_tensor_variable(replace)
    # encode p=None as an empty vector
    p = tensor.as_tensor_variable(p or [])
    ndim, size, bcast = _infer_ndim_bcast(ndim, size)
    op = RandomFunction(choice_helper, tensor.TensorType(dtype=dtype,
                                                         broadcastable=bcast))
    return op(random_state, size, a, replace, p)
Example #28
0
    def __init__(self, distribution, lower, upper,
                 transform='infer', *args, **kwargs):
        dtype = kwargs.get('dtype', theano.config.floatX)

        if lower is not None:
            lower = tt.as_tensor_variable(lower).astype(dtype)
        if upper is not None:
            upper = tt.as_tensor_variable(upper).astype(dtype)

        if transform == 'infer':
            if lower is None and upper is None:
                transform = None
                default = None
            elif lower is not None and upper is not None:
                transform = transforms.interval(lower, upper)
                default = 0.5 * (lower + upper)
            elif upper is not None:
                transform = transforms.upperbound(upper)
                default = upper - 1
            else:
                transform = transforms.lowerbound(lower)
                default = lower + 1
        else:
            default = None

        super(_ContinuousBounded, self).__init__(
            distribution=distribution, lower=lower, upper=upper,
            transform=transform, default=default, *args, **kwargs)
Example #29
0
    def __init__(self, q, beta, *args, **kwargs):
        super().__init__(*args, defaults=('median',), **kwargs)

        self.q = q = tt.as_tensor_variable(q)
        self.beta = beta = tt.as_tensor_variable(beta)

        self.median = self._ppf(0.5)
Example #30
0
 def __init__(self, psi, n, p, *args, **kwargs):
     super(ZeroInflatedBinomial, self).__init__(*args, **kwargs)
     self.n = n = tt.as_tensor_variable(n)
     self.p = p = tt.as_tensor_variable(p)
     self.psi = psi = tt.as_tensor_variable(psi)
     self.bin = Binomial.dist(n, p)
     self.mode = self.bin.mode
Example #31
0
 def make_node(self, x):
     assert imported_scipy, (
         "Scipy not available. Scipy is needed for the Cholesky op")
     x = as_tensor_variable(x)
     return Apply(self, [x], [x.type()])
Example #32
0
 def __init__(self, alpha, beta, n, *args, **kwargs):
     super(BetaBinomial, self).__init__(*args, **kwargs)
     self.alpha = alpha = tt.as_tensor_variable(alpha)
     self.beta = beta = tt.as_tensor_variable(beta)
     self.n = n = tt.as_tensor_variable(n)
     self.mode = tt.cast(tround(alpha / (alpha + beta)), 'int8')
Example #33
0
        def test_specify_shape_partial(self):
            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            rng = numpy.random.RandomState(utt.fetch_seed())
            x1_1 = numpy.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
            x1_1 = self.cast_value(x1_1)
            x1_2 = numpy.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)
            x1_2 = self.cast_value(x1_2)
            x2 = numpy.asarray(rng.uniform(1, 2, [5, 2]), dtype=dtype)
            x2 = self.cast_value(x2)

            # Test that we can replace with values of the same shape
            x1_shared = self.shared_constructor(x1_1)
            x1_specify_shape = tensor.specify_shape(
                x1_shared,
                (tensor.as_tensor_variable(x1_1.shape[0]), x1_shared.shape[1]))
            x1_shared.set_value(x1_2)
            assert numpy.allclose(
                self.ref_fct(x1_shared.get_value(borrow=True)),
                self.ref_fct(x1_2))
            shape_op_fct = theano.function([], x1_shared.shape)
            topo = shape_op_fct.maker.fgraph.toposort()
            shape_op_fct()
            if theano.config.mode != 'FAST_COMPILE':
                assert len(topo) == 3
                assert isinstance(topo[0].op, tensor.opt.Shape_i)
                assert isinstance(topo[1].op, tensor.opt.Shape_i)
                assert isinstance(topo[2].op, tensor.opt.MakeVector)

            # Test that we forward the input
            specify_shape_fct = theano.function([], x1_specify_shape)
            specify_shape_fct()
            # theano.printing.debugprint(specify_shape_fct)
            assert numpy.all(
                self.ref_fct(specify_shape_fct()) == self.ref_fct(x1_2))
            topo_specify = specify_shape_fct.maker.fgraph.toposort()
            if theano.config.mode != 'FAST_COMPILE':
                assert len(topo_specify) == 4

            # Test that we put the shape info into the graph
            shape_constant_fct = theano.function([], x1_specify_shape.shape)
            # theano.printing.debugprint(shape_constant_fct)
            assert numpy.all(shape_constant_fct() == shape_op_fct())
            topo_cst = shape_constant_fct.maker.fgraph.toposort()
            if theano.config.mode != 'FAST_COMPILE':
                assert len(topo_cst) == 2

            # Test that we can replace with values of the different shape
            # but that will raise an error in some case, but not all
            x1_shared.set_value(x2)
            self.assertRaises(AssertionError, specify_shape_fct)

            # No assertion will be raised as the Op is removed from the graph
            if theano.config.mode not in [
                    'FAST_COMPILE', 'DebugMode', 'DEBUG_MODE'
            ]:
                shape_constant_fct()
            else:
                self.assertRaises(AssertionError, shape_constant_fct)
Example #34
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     o = theano.tensor.scalar(dtype=x.dtype)
     return Apply(self, [x], [o])
Example #35
0
 def make_node(self, _x):
     x = as_tensor_variable(_x)
     if x.type.ndim != 1:
         raise TypeError('AllocDiag only works on vectors', _x)
     return Apply(self, [x], [tensor.matrix(dtype=x.type.dtype)])
Example #36
0
 def make_node(self, _x):
     x = as_tensor_variable(_x)
     if x.type.ndim != 2:
         raise TypeError('ExtractDiag only works on matrices', _x)
     return Apply(self, [x], [tensor.vector(dtype=x.type.dtype)])
Example #37
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     return Apply(self, [x], [x.type()])
Example #38
0
 def make_node(self, x):
     x_ = tensor.as_tensor_variable(x).astype(theano.config.floatX)
     return theano.Apply(self, inputs=[x_], outputs=[x_.type()])
Example #39
0
 def make_node(self, x, i0, i1):
     _i0 = tensor.as_tensor_variable(i0)
     _i1 = tensor.as_tensor_variable(i1)
     return Apply(self, [x, _i0, _i1], [x.type()])
Example #40
0
 def make_node(self, s_rstate, alpha):
     alpha = tensor.as_tensor_variable(alpha)
     return theano.gof.Apply(
         self, [s_rstate, alpha],
         [s_rstate.type(), self.otype()])
Example #41
0
    def choice(self,
               size=1,
               a=None,
               replace=True,
               p=None,
               ndim=None,
               dtype='int64',
               nstreams=None,
               **kwargs):
        """
        Sample `size` times from a multinomial distribution defined by
        probabilities `p`, and returns the indices of the sampled elements.
        Sampled values are between 0 and `p.shape[1]-1`.
        Only sampling without replacement is implemented for now.

        Parameters
        ----------
        size: integer or integer tensor (default 1)
            The number of samples. It should be between 1 and `p.shape[1]-1`.
        a: int or None (default None)
            For now, a should be None. This function will sample
            values between 0 and `p.shape[1]-1`. When a != None will be
            implemented, if `a` is a scalar, the samples are drawn from the
            range 0,...,a-1. We default to 2 as to have the same interface as
            RandomStream.
        replace: bool (default True)
            Whether the sample is with or without replacement.
            Only replace=False is implemented for now.
        p: 2d numpy array or theano tensor
            the probabilities of the distribution, corresponding to values
            0 to `p.shape[1]-1`.

        Example : p = [[.98, .01, .01], [.01, .49, .50]] and size=1 will
        probably result in [[0],[2]]. When setting size=2, this
        will probably result in [[0,1],[2,1]].

        Notes
        -----
        -`ndim` is only there keep the same signature as other
        uniform, binomial, normal, etc.

        -Does not do any value checking on pvals, i.e. there is no
        check that the elements are non-negative, less than 1, or
        sum to 1. passing pvals = [[-2., 2.]] will result in
        sampling [[0, 0]]

        -Only replace=False is implemented for now.

        """
        if replace:
            raise NotImplementedError(
                "MRG_RandomStreams.choice only works without replacement "
                "for now.")

        if a is not None:
            raise TypeError("For now, a has to be None in "
                            "MRG_RandomStreams.choice. Sampled values are "
                            "between 0 and p.shape[1]-1")

        if p is None:
            raise TypeError("For now, p has to be specified in "
                            "MRG_RandomStreams.choice.")
        p = as_tensor_variable(p)
        p = undefined_grad(p)

        if ndim is not None:
            raise ValueError("ndim argument to "
                             "MRG_RandomStreams.choice "
                             "is not used.")

        if p.ndim != 2:
            raise NotImplementedError(
                "MRG_RandomStreams.choice is only implemented for p.ndim = 2")

        shape = p[:, 0].shape * size
        unis = self.uniform(size=shape, ndim=1, nstreams=nstreams, **kwargs)
        op = multinomial.ChoiceFromUniform(odtype=dtype)
        return op(p, unis, as_tensor_variable(size))
Example #42
0
 def make_node(self, s_rstate, p, draw_shape):
     p = tensor.as_tensor_variable(p)
     draw_shape = tensor.as_tensor_variable(draw_shape)
     return theano.gof.Apply(
         self, [s_rstate, p, draw_shape],
         [s_rstate.type(), self.otype()])
Example #43
0
 def new(cls, rstate, ndim, dtype, size):
     v_size = as_tensor_variable(size)
     if ndim is None:
         ndim = get_vector_length(v_size)
     op = cls(TensorType(dtype, (False, ) * ndim))
     return op(rstate, v_size)
Example #44
0
 def make_node(self, x):
     assert (imported_scipy
             ), "Scipy not available. Scipy is needed for the Cholesky op"
     x = as_tensor_variable(x)
     assert x.ndim == 2
     return Apply(self, [x], [x.type()])
Example #45
0
def safe_new(x, tag='', dtype=None):
    """
    Internal function that constructs a new variable from x with the same
    type, but with a different name (old name + tag). This function is used
    by gradient, or the R-op to construct new variables for the inputs of
    the inner graph such that there is no interference between the original
    graph and the newly constructed graph.

    """
    if hasattr(x, 'name') and x.name is not None:
        nw_name = x.name + tag
    else:
        nw_name = None

    if isinstance(x, theano.Constant):
        if dtype and x.dtype != dtype:
            casted_x = x.astype(dtype)
            nwx = x.__class__(casted_x.type, x.data, x.name)
            nwx.tag = copy(x.tag)
            return nwx
        else:
            return x.clone()
    # Note, as_tensor_variable will convert the Scalar into a
    # TensorScalar that will require a ScalarFromTensor op,
    # making the pushout optimization fail
    elif isinstance(x, scalar.ScalarVariable):
        if dtype:
            nw_x = scalar.get_scalar_type(dtype=dtype)()
        else:
            nw_x = x.type()
        nw_x.name = nw_name
        if theano.config.compute_test_value != 'off':
            # Copy test value, cast it if necessary
            try:
                x_test_value = gof.op.get_test_value(x)
            except AttributeError:
                # There is no test value
                pass
            else:
                # This clause is executed if no exception was raised
                nw_x.tag.test_value = nw_x.type.filter(x_test_value)
        return nw_x
    else:
        try:
            x = tensor.as_tensor_variable(x)
        except TypeError:
            # This could happen for example for random states, and I really
            # want to avoid the convoluted logic that checks for cuda
            # ndarrays
            pass

    # Cast x if needed. If x has a test value, this will also cast it.
    if dtype and x.dtype != dtype:
        x = x.astype(dtype)

    nw_x = x.type()
    nw_x.name = nw_name
    # Preserve test values so that the 'compute_test_value' option can be used.
    # The test value is deep-copied to ensure there can be no interactions
    # between test values, due to inplace operations for instance. This may
    # not be the most efficient memory-wise, though.
    if theano.config.compute_test_value != 'off':
        try:
            nw_x.tag.test_value = copy.deepcopy(gof.op.get_test_value(x))
        except AttributeError:
            # This means `x` has no test value.
            pass

    return nw_x
Example #46
0
    def uniform(self,
                size,
                low=0.0,
                high=1.0,
                ndim=None,
                dtype=None,
                nstreams=None,
                **kwargs):
        # TODO : need description for parameter 'size', 'ndim', 'nstreams'
        """
        Sample a tensor of given size whose element from a uniform
        distribution between low and high.

        If the size argument is ambiguous on the number of dimensions,
        ndim may be a plain integer to supplement the missing information.

        Parameters
        ----------
        low
            Lower bound of the interval on which values are sampled.
            If the ``dtype`` arg is provided, ``low`` will be cast into
            dtype. This bound is excluded.
        high
            Higher bound of the interval on which values are sampled.
            If the ``dtype`` arg is provided, ``high`` will be cast into
            dtype. This bound is excluded.
        size
          Can be a list of integer or Theano variable (ex: the shape
          of other Theano Variable).
        dtype
            The output data type. If dtype is not specified, it will be
            inferred from the dtype of low and high, but will be at
            least as precise as floatX.

        """
        low = as_tensor_variable(low)
        high = as_tensor_variable(high)

        if dtype is None:
            dtype = scal.upcast(config.floatX, low.dtype, high.dtype)

        low = cast(low, dtype=dtype)
        high = cast(high, dtype=dtype)

        low = undefined_grad(low)
        high = undefined_grad(high)

        if isinstance(size, tuple):
            msg = "size must be a tuple of int or a Theano variable"
            assert all([
                isinstance(i, (np.integer, integer_types, Variable))
                for i in size
            ]), msg
            if any([
                    isinstance(i, (np.integer, integer_types)) and i <= 0
                    for i in size
            ]):
                raise ValueError(
                    "The specified size contains a dimension with value <= 0",
                    size)

        else:
            if not (isinstance(size, Variable) and size.ndim == 1):
                raise TypeError("size must be a tuple of int or a Theano "
                                "Variable with 1 dimension, got " + str(size) +
                                " of type " + str(type(size)))
        orig_nstreams = nstreams
        if nstreams is None:
            nstreams = self.n_streams(size)
        rstates = self.get_substream_rstates(nstreams, dtype)

        d = {}
        if 'target' in kwargs:
            d = dict(target=kwargs.pop('target'))
        if len(kwargs) > 0:
            raise TypeError("uniform() got unexpected keyword arguments %s" %
                            (str(kwargs.keys())))
        node_rstate = shared(rstates, **d)
        u = self.pretty_return(node_rstate,
                               *mrg_uniform.new(node_rstate, ndim, dtype,
                                                size),
                               size=size,
                               nstreams=orig_nstreams)
        # Add a reference to distinguish from other shared variables
        node_rstate.tag.is_rng = True
        r = u * (high - low) + low

        if u.type.broadcastable != r.type.broadcastable:
            raise NotImplementedError(
                'Increase the size to match the broadcasting pattern of '
                '`low` and `high` arguments')

        assert r.dtype == dtype
        return r
Example #47
0
 def _comp_modes(self):
     try:
         return tt.as_tensor_variable(self.comp_dists.mode)
     except AttributeError:
         return tt.stack([comp_dist.mode for comp_dist in self.comp_dists],
                         axis=1)
Example #48
0
    def normal(self,
               size,
               avg=0.0,
               std=1.0,
               ndim=None,
               dtype=None,
               nstreams=None,
               truncate=False,
               **kwargs):
        """
        Sample a tensor of values from a normal distribution.

        Parameters
        ----------
        size : int_vector_like
            Array dimensions for the output tensor.
        avg : float_like, optional
            The mean value for the truncated normal to sample from (defaults to 0.0).
        std : float_like, optional
            The standard deviation for the truncated normal to sample from (defaults to 1.0).
        truncate : bool, optional
            Truncates the normal distribution at 2 standard deviations if True (defaults to False).
            When this flag is set, the standard deviation of the result will be less than the one specified.
        ndim : int, optional
            The number of dimensions for the output tensor (defaults to None).
            This argument is necessary if the size argument is ambiguous on the number of dimensions.
        dtype : str, optional
            The data-type for the output tensor. If not specified,
            the dtype is inferred from avg and std, but it is at least as precise as floatX.
        kwargs
            Other keyword arguments for random number generation (see uniform).

        Returns
        -------
        samples : TensorVariable
            A Theano tensor of samples randomly drawn from a normal distribution.

        """
        size = _check_size(size)
        avg = undefined_grad(as_tensor_variable(avg))
        std = undefined_grad(as_tensor_variable(std))

        if dtype is None:
            dtype = scal.upcast(config.floatX, avg.dtype, std.dtype)

        avg = tensor.cast(avg, dtype=dtype)
        std = tensor.cast(std, dtype=dtype)

        # generate even number of uniform samples
        # Do manual constant folding to lower optiimizer work.
        if isinstance(size, theano.Constant):
            n_odd_samples = size.prod(dtype='int64')
        else:
            n_odd_samples = tensor.prod(size, dtype='int64')
        n_even_samples = n_odd_samples + n_odd_samples % 2
        uniform = self.uniform((n_even_samples, ),
                               low=0.,
                               high=1.,
                               ndim=1,
                               dtype=dtype,
                               nstreams=nstreams,
                               **kwargs)

        # box-muller transform
        u1 = uniform[:n_even_samples // 2]
        u2 = uniform[n_even_samples // 2:]
        r = tensor.sqrt(-2.0 * tensor.log(u1))
        theta = np.array(2.0 * np.pi, dtype=dtype) * u2
        cos_theta, sin_theta = tensor.cos(theta), tensor.sin(theta)
        z0 = r * cos_theta
        z1 = r * sin_theta

        if truncate:
            # use valid samples
            to_fix0 = (z0 < -2.) | (z0 > 2.)
            to_fix1 = (z1 < -2.) | (z1 > 2.)
            z0_valid = z0[tensor.nonzero(~to_fix0)]
            z1_valid = z1[tensor.nonzero(~to_fix1)]

            # re-sample invalid samples
            to_fix0 = tensor.nonzero(to_fix0)[0]
            to_fix1 = tensor.nonzero(to_fix1)[0]
            n_fix_samples = to_fix0.size + to_fix1.size
            lower = tensor.constant(1. / np.e**2, dtype=dtype)
            u_fix = self.uniform((n_fix_samples, ),
                                 low=lower,
                                 high=1.,
                                 ndim=1,
                                 dtype=dtype,
                                 nstreams=nstreams,
                                 **kwargs)
            r_fix = tensor.sqrt(-2. * tensor.log(u_fix))
            z0_fixed = r_fix[:to_fix0.size] * cos_theta[to_fix0]
            z1_fixed = r_fix[to_fix0.size:] * sin_theta[to_fix1]

            # pack everything together to a useful result
            norm_samples = tensor.join(0, z0_valid, z0_fixed, z1_valid,
                                       z1_fixed)
        else:
            norm_samples = tensor.join(0, z0, z1)
        if isinstance(n_odd_samples, theano.Variable):
            samples = norm_samples[:n_odd_samples]
        elif n_odd_samples % 2 == 1:
            samples = norm_samples[:-1]
        else:
            samples = norm_samples
        samples = tensor.reshape(samples, newshape=size, ndim=ndim)
        samples *= std
        samples += avg

        return samples
def multinomial(random_state,
                size=None,
                n=1,
                pvals=[0.5, 0.5],
                ndim=None,
                dtype='int64'):
    """
    Sample from one or more multinomial distributions defined by
    one-dimensional slices in pvals.

    Parameters
    ----------
    pvals
        A tensor of shape "nmulti+(L,)" describing each multinomial
        distribution.  This tensor must have the property that
        numpy.allclose(pvals.sum(axis=-1), 1) is true.
    size
        A vector of shape information for the output; this can also
        specify the "nmulti" part of pvals' shape.  A -1 in the k'th position
        from the right means to borrow the k'th position from the
        right in nmulti. (See examples below.)
        Default ``None`` means size=nmulti.
    n
        The number of experiments to simulate for each
        multinomial. This can be a scalar, or tensor, it will be
        broadcasted to have shape "nmulti".
    dtype
        The dtype of the return value (which will represent counts)

    Returns
    -------
    tensor
        Tensor of len(size)+1 dimensions, and shape[-1]==L, with
        the specified ``dtype``, with the experiment counts. See
        examples to understand the shape of the return value, which is
        derived from both size and pvals.shape. In return value rval,
        "numpy.allclose(rval.sum(axis=-1), n)" will be true.

    Extended Summary
    ----------------
    For example, to simulate n experiments from each multinomial in a batch of
    size B:

        size=None, pvals.shape=(B,L) --> rval.shape=[B,L]

        rval[i,j] is the count of possibility j in the i'th distribution (row)
        in pvals.

    Using size:

        size=(1,-1), pvals.shape=(A,B,L)
        --> rval.shape=[1,B,L], and requires that A==1.

        rval[k,i,j] is the count of possibility j in the distribution specified
        by pvals[k,i].

    Using size for broadcasting of pvals:

        size=(10, 1, -1), pvals.shape=(A, B, L)
        --> rval.shape=[10,1,B,L], and requires that A==1.

        rval[l,k,i,j] is the count of possibility j in the
        distribution specified by pvals[k,i], in the l'th of 10
        draws.

    """
    n = tensor.as_tensor_variable(n)
    pvals = tensor.as_tensor_variable(pvals)
    # until ellipsis is implemented (argh)
    tmp = pvals.T[0].T
    ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, tmp)
    bcast = bcast + (pvals.type.broadcastable[-1], )
    op = RandomFunction(multinomial_helper,
                        tensor.TensorType(dtype=dtype, broadcastable=bcast),
                        ndim_added=1)
    return op(random_state, size, n, pvals)
Example #50
0
 def __init__(self, condition):
     self.condition = tensor.as_tensor_variable(condition)
     assert self.condition.ndim == 0
    y0=T.fmatrix('y0')
    latents = T.fmatrix('latent0')

    tmp_out0 = model.layers[0].fprop(x0)
    tmp_out11, idx_below, latents = model.layers[1].tree_fprop(tmp_out0,idx_below,latents)
    tmp_out12, idx_below, latents = model.layers[2].tree_fprop(tmp_out11,idx_below,latents)
    tmp_out13, idx_below, latents = model.layers[3].tree_fprop(tmp_out12,idx_below,latents)


    tmp_out20 = model.layers[4].myfprop(tmp_out13)
    tmp_out21 = model.layers[5].myfprop(tmp_out20)
    tmp_out_y = model.layers[6].myfprop(tmp_out21)


    cost_list = model.layers[6].cost(y0, tmp_out_y,idx_below)
    cost_vector = T.as_tensor_variable(cost_list)
    fn = theano.function([x0,y0,latents], [tmp_out_y,cost_vector,idx_below])

    for j in xrange(0, model.batch_size*nbatch, model.batch_size):
        print j
        tmp_in = data_tmp.get_topological_view(data_tmp.X[j:j+model.batch_size, :])
        tmp_y = data_tmp.y[j:j+model.batch_size, :]
        tmp_latent = data_tmp.latent[j:j+model.batch_size, :]

        tmp_out,tmp_cost_list, tmp_idx = fn(tmp_in,tmp_y,tmp_latent)
        print tmp_latent
        print tmp_idx

        if j==0:
            cost_sum_vect = tmp_cost_list
            tmp=tmp_out
Example #52
0
File: ops.py Project: errord/Theano
 def make_node(self, x):
     x = as_tensor_variable(x)
     assert x.ndim == 2
     w = theano.tensor.vector(dtype=x.dtype)
     v = theano.tensor.matrix(dtype=x.dtype)
     return Apply(self, [x], [w, v])
Example #53
0
 def make_node(self, n, p, shape):
     n = tensor.as_tensor_variable(n)
     p = tensor.as_tensor_variable(p)
     shape = tensor.as_tensor_variable(shape)
     return gof.Apply(self, [n, p, shape],
                      [SparseType(dtype=self.dtype, format=self.format)()])
def _infer_ndim_bcast(ndim, shape, *args):
    """
    Infer the number of dimensions from the shape or the other arguments.

    Returns
    -------
    (int, variable, tuple) triple, where the variable is an integer vector,
    and the tuple contains Booleans
        The first element returned is the inferred number of dimensions.
        The second element is the shape inferred (combining symbolic and
        constant informations from shape and args).
        The third element is a broadcasting pattern corresponding to that shape.

    """

    # Find the minimum value of ndim required by the *args
    if args:
        args_ndim = max(arg.ndim for arg in args)
    else:
        args_ndim = 0

    if isinstance(shape, (tuple, list)):
        # there is a convention that -1 means the corresponding shape of a
        # potentially-broadcasted symbolic arg
        #
        # This case combines together symbolic and non-symbolic shape
        # information
        shape_ndim = len(shape)
        if ndim is None:
            ndim = shape_ndim
        else:
            if shape_ndim != ndim:
                raise ValueError(
                    'ndim should be equal to len(shape), but\n',
                    'ndim = %s, len(shape) = %s, shape = %s' %
                    (ndim, shape_ndim, shape))

        bcast = []
        pre_v_shape = []
        for i, s in enumerate(shape):
            if hasattr(s, 'type'):  # s is symbolic
                bcast.append(False)  # todo - introspect further
                pre_v_shape.append(s)
            else:
                if s >= 0:
                    pre_v_shape.append(tensor.as_tensor_variable(s))
                    bcast.append((s == 1))
                elif s == -1:
                    n_a_i = 0
                    for a in args:
                        # ndim: _   _   _   _   _   _
                        # ashp:         s0  s1  s2  s3
                        #           i
                        if i >= ndim - a.ndim:
                            n_a_i += 1
                            a_i = i + a.ndim - ndim
                            if not a.broadcastable[a_i]:
                                pre_v_shape.append(a.shape[a_i])
                                bcast.append(False)
                                break
                    else:
                        if n_a_i == 0:
                            raise ValueError(
                                ('Auto-shape of -1 must overlap'
                                 'with the shape of one of the broadcastable'
                                 'inputs'))
                        else:
                            pre_v_shape.append(tensor.as_tensor_variable(1))
                            bcast.append(True)
                else:
                    ValueError('negative shape', s)
        # post-condition: shape may still contain both symbolic and
        # non-symbolic things
        if len(pre_v_shape) == 0:
            v_shape = tensor.constant([], dtype='int64')
        else:
            v_shape = tensor.stack(pre_v_shape)

    elif shape is None:
        # The number of drawn samples will be determined automatically,
        # but we need to know ndim
        if not args:
            raise TypeError(('_infer_ndim_bcast cannot infer shape without'
                             ' either shape or args'))
        template = reduce(lambda a, b: a + b, args)
        v_shape = template.shape
        bcast = template.broadcastable
        ndim = template.ndim
    else:
        v_shape = tensor.as_tensor_variable(shape)
        if v_shape.ndim != 1:
            raise TypeError(
                "shape must be a vector or list of scalar, got '%s'" % v_shape)

        if ndim is None:
            ndim = tensor.get_vector_length(v_shape)
        bcast = [False] * ndim

    if v_shape.ndim != 1:
        raise TypeError("shape must be a vector or list of scalar, got '%s'" %
                        v_shape)

    if (not (v_shape.dtype.startswith('int')
             or v_shape.dtype.startswith('uint'))):
        raise TypeError('shape must be an integer vector or list',
                        v_shape.dtype)

    if args_ndim > ndim:
        raise ValueError(
            'ndim should be at least as big as required by args value',
            (ndim, args_ndim), args)

    assert ndim == len(bcast)
    return ndim, tensor.cast(v_shape, 'int64'), tuple(bcast)
Example #55
0
 def make_node(self, a, b):
     a = as_tensor_variable(a)
     b = as_tensor_variable(b)
     out_dtype = theano.scalar.upcast(a.dtype, b.dtype)
     x = theano.tensor.matrix(dtype=out_dtype)
     return Apply(self, [a, b], [x])
Example #56
0
    def make_node(self, n, p):
        n = tensor.as_tensor_variable(n)
        p = as_sparse_variable(p)
        assert p.format in ["csr", "csc"]

        return gof.Apply(self, [n, p], [p.type()])
Example #57
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     assert x.ndim == 2
     return Apply(self, [x], [x.type()])
Example #58
0
def as_gpuarray_variable(x):
    if hasattr(x, '_as_GpuArrayVariable'):
        return x._as_GpuArrayVariable()
    # TODO we need to have the cuda -> gpu path taken care of.
    tensor_x = tensor.as_tensor_variable(x)
    return gpu_from_host(tensor_x)
Example #59
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     assert x.ndim == 2, "The input of qr function should be a matrix."
     q = theano.tensor.matrix(dtype=x.dtype)
     return Apply(self, [x], [q])
Example #60
0
 def make_node(self, a):
     a = as_tensor_variable(a)
     out = a.type()
     return Apply(self, [a], [out])