Example #1
0
    def make_node(self, inp, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp)
        inp = as_gpuarray_variable(inp, ctx_name)
        nd = self.ndim
        assert (inp.ndim == nd + 2)
        if stride is None:
            stride = ws
        if pad is None:
            pad = (0,) * nd
        elif isinstance(pad, (tuple, list)):
            if max(pad) != 0 and not self.ignore_border:
                raise ValueError('Padding works only with ignore_border=True')
            if isinstance(ws, (tuple, list)):
                if any(pad[i] >= ws[i] for i in range(nd)):
                    raise ValueError('Padding must be smaller than strides')

        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Window shape parameters must be ints.')
        if stride.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Stride parameters must be ints.')
        if pad.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Padding parameters must be ints.')

        ws = theano.tensor.cast(ws, 'int64')
        stride = theano.tensor.cast(stride, 'int64')
        pad = theano.tensor.cast(pad, 'int64')

        return Apply(self, [inp, ws, stride, pad], [inp.type()])
Example #2
0
    def make_node(self, inp, out, out_grad, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp, out, out_grad)
        nd = self.ndim
        inp = as_gpuarray_variable(inp, ctx_name)
        assert (inp.ndim == nd + 2)
        out = as_gpuarray_variable(out, ctx_name)
        assert (out_grad.ndim == nd + 2)
        out_grad = as_gpuarray_variable(out_grad, ctx_name)
        assert (out.ndim == nd + 2)

        assert (out_grad.ndim == inp.ndim)
        assert (inp.ndim == out.ndim)

        if stride is None:
            stride = ws
        if pad is None:
            pad = (0,) * nd
        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Window shape parameters must be ints.')
        if stride.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Stride parameters must be ints.')
        if pad.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Padding parameters must be ints.')

        ws = theano.tensor.cast(ws, 'int64')
        stride = theano.tensor.cast(stride, 'int64')
        pad = theano.tensor.cast(pad, 'int64')

        return Apply(self, [inp, out, out_grad, ws, stride, pad], [inp.type()])
Example #3
0
    def make_node(self, inp, out_grad, ws, stride=None, pad=None):
        ctx_name = infer_context_name(inp, out_grad)
        nd = self.ndim
        inp = as_gpuarray_variable(inp, ctx_name)
        assert (inp.ndim == nd + 2)
        out_grad = as_gpuarray_variable(out_grad, ctx_name)
        assert (out_grad.ndim == nd + 2)

        assert (out_grad.ndim == inp.ndim)

        if stride is None:
            stride = ws
        if pad is None:
            pad = (0,) * nd
        elif isinstance(pad, (tuple, list)):
            if max(pad) != 0 and not self.mode == 'average_exc_pad':
                raise ValueError('Padding must be zero for average_exc_pad')
        ws = as_tensor_variable(ws)
        stride = as_tensor_variable(stride)
        pad = as_tensor_variable(pad)
        assert ws.ndim == stride.ndim and ws.ndim == pad.ndim
        assert ws.ndim == 1
        if ws.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Window shape parameters must be ints.')
        if stride.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Stride parameters must be ints.')
        if pad.dtype not in theano.tensor.int_dtypes:
            raise TypeError('Padding parameters must be ints.')

        ws = theano.tensor.cast(ws, 'int64')
        stride = theano.tensor.cast(stride, 'int64')
        pad = theano.tensor.cast(pad, 'int64')

        return Apply(self, [inp, out_grad, ws, stride, pad], [inp.type()])
Example #4
0
    def make_node(self, V, W, b, d):
        """
        Parameters
        ----------
        V
            Visible unit, input(batch,row,column,time,in channel)
        W
            Weights, filter(out channel,row,column,time,in channel)
        b
            bias, shape == (W.shape[0],)
        d
            strides when moving the filter over the input(dx,dy,dt)

        """

        V_ = T.as_tensor_variable(V)
        W_ = T.as_tensor_variable(W)
        b_ = T.as_tensor_variable(b)
        d_ = T.as_tensor_variable(d)

        bcast = (V_.broadcastable[0], False, False, False, W_.broadcastable[0])

        node = theano.Apply(self, inputs=[V_, W_, b_, d_],
                            outputs=[T.TensorType(V_.dtype, bcast)()])

        return node
Example #5
0
    def make_node(self, V, d, WShape, dCdH):
        V_ = T.as_tensor_variable(V)
        d_ = T.as_tensor_variable(d)
        WShape_ = T.as_tensor_variable(WShape)
        dCdH_ = T.as_tensor_variable(dCdH)

        return theano.Apply(self, inputs=[V_, d_, WShape_, dCdH_], outputs = [ T.TensorType(V_.dtype, (False,False,False,False,False))() ] )
Example #6
0
 def make_node(self, C, alpha, A, B, beta):
     alpha = as_tensor_variable(alpha)
     beta = as_tensor_variable(beta)
     A = as_gpuarray_variable(A)
     B = as_gpuarray_variable(B)
     C = as_gpuarray_variable(C)
     assert A.dtype == B.dtype == C.dtype
     return Apply(self, [C, alpha, A, B, beta], [C.type()])
Example #7
0
    def make_node(self, a, *shape):
        a = basic.as_tensor_variable(a)
        shape = basic.as_tensor_variable(shape, ndim=1)

        shape, bcast = basic.alloc_validate_shape(shape)

        out = type(a.type)(dtype=a.type.dtype, broadcastable=bcast)()

        return theano.Apply(self, [a] + shape, [out])
Example #8
0
    def make_node(self, V, d, WShape, dCdH):
        V_ = T.as_tensor_variable(V)
        d_ = T.as_tensor_variable(d)
        WShape_ = T.as_tensor_variable(WShape)
        dCdH_ = T.as_tensor_variable(dCdH)

        return theano.Apply(
            self,
            inputs=[V_, d_, WShape_, dCdH_],
            outputs=[
                T.TensorType(V_.dtype, (False, False, False, False, False))()
            ])
 def make_node(self, dy, sm):
     dy = tensor.as_tensor_variable(dy)
     sm = tensor.as_tensor_variable(sm)
     if dy.type.ndim not in (1, 2) \
             or dy.type.dtype not in tensor.float_dtypes:
         raise ValueError('dy must be 1-d or 2-d tensor of floats. Got ',
                          dy.type)
     if dy.ndim == 1:
         dy = tensor.shape_padleft(dy, n_ones=1)
     if sm.ndim == 1:
         sm = tensor.shape_padleft(sm, n_ones=1)
     return Apply(self, [dy, sm], [sm.type()])
Example #10
0
 def make_node(self, a, val):
     a = basic.as_tensor_variable(a)
     val = basic.as_tensor_variable(val)
     if a.ndim < 2:
         raise TypeError("%s: first parameter must have at least"
                         " two dimensions" % self.__class__.__name__)
     elif val.ndim != 0:
         raise TypeError("%s: second parameter must be a scalar" %
                         self.__class__.__name__)
     val = basic.cast(val, dtype=upcast(a.dtype, val.dtype))
     if val.dtype != a.dtype:
         raise TypeError("%s: type of second parameter must be the same as"
                         " the first's" % self.__class__.__name__)
     return Apply(self, [a, val], [a.type()])
Example #11
0
 def make_node(self, C, alpha, A, B, beta):
     ctx_name = infer_context_name(C, A, B)
     A = as_gpuarray_variable(A, ctx_name)
     B = as_gpuarray_variable(B, ctx_name)
     C = as_gpuarray_variable(C, ctx_name)
     alpha = as_tensor_variable(alpha)
     beta = as_tensor_variable(beta)
     assert alpha.ndim == 0
     assert beta.ndim == 0
     assert A.ndim == 2
     assert B.ndim == 2
     assert C.ndim == 2
     assert A.dtype == B.dtype == C.dtype
     return Apply(self, [C, alpha, A, B, beta], [C.type()])
Example #12
0
 def make_node(self, y, alpha, A, x, beta):
     ctx_name = infer_context_name(y, A, x)
     A = as_gpuarray_variable(A, ctx_name)
     x = as_gpuarray_variable(x, ctx_name)
     y = as_gpuarray_variable(y, ctx_name)
     alpha = as_tensor_variable(alpha)
     beta = as_tensor_variable(beta)
     assert alpha.ndim == 0
     assert beta.ndim == 0
     assert A.ndim == 2
     assert x.ndim == 1
     assert y.ndim == 1
     assert A.dtype == x.dtype == y.dtype
     return Apply(self, [y, alpha, A, x, beta], [y.type()])
Example #13
0
 def make_node(self, y, alpha, A, x, beta):
     ctx_name = infer_context_name(y, A, x)
     A = as_gpuarray_variable(A, ctx_name)
     x = as_gpuarray_variable(x, ctx_name)
     y = as_gpuarray_variable(y, ctx_name)
     alpha = as_tensor_variable(alpha).astype('float64')
     beta = as_tensor_variable(beta).astype('float64')
     assert alpha.ndim == 0
     assert beta.ndim == 0
     assert A.ndim == 2
     assert x.ndim == 1
     assert y.ndim == 1
     assert A.dtype == x.dtype == y.dtype
     return Apply(self, [y, alpha, A, x, beta], [y.type()])
Example #14
0
 def make_node(self, C, alpha, A, B, beta):
     ctx_name = infer_context_name(C, A, B)
     A = as_gpuarray_variable(A, ctx_name)
     B = as_gpuarray_variable(B, ctx_name)
     C = as_gpuarray_variable(C, ctx_name)
     alpha = as_tensor_variable(alpha).astype('float64')
     beta = as_tensor_variable(beta).astype('float64')
     assert alpha.ndim == 0
     assert beta.ndim == 0
     assert A.ndim == 3
     assert B.ndim == 3
     assert C.ndim == 3
     assert A.dtype == B.dtype == C.dtype
     return Apply(self, [C, alpha, A, B, beta], [C.type()])
Example #15
0
    def make_node(self, indices, dims):
        indices = basic.as_tensor_variable(indices)
        dims = basic.as_tensor_variable(dims)

        if indices.dtype not in basic.int_dtypes:
            raise TypeError("'%s' object cannot be interpreted as an index" % str(indices.dtype))
        if dims.dtype not in basic.int_dtypes:
            raise TypeError("'%s' object cannot be interpreted as an index" % str(dims.dtype))
        if dims.ndim != 1:
            raise TypeError("dims must be a 1D array")

        return gof.Apply(
            self, [indices, dims],
            [basic.TensorType(dtype='int64', broadcastable=(False,) * indices.ndim)()
             for i in xrange(self.ndim)])
Example #16
0
    def make_node(self, *inp):
        multi_index = [basic.as_tensor_variable(i) for i in inp[:-1]]
        dims = basic.as_tensor_variable(inp[-1])

        for i in multi_index:
            if i.dtype not in basic.int_dtypes:
                raise TypeError("'%s' object cannot be interpreted as an index" % str(i.dtype))
        if dims.dtype not in basic.int_dtypes:
            raise TypeError("'%s' object cannot be interpreted as an index" % str(dims.dtype))
        if dims.ndim != 1:
            raise TypeError("dims must be a 1D array")

        return gof.Apply(
            self, multi_index + [dims],
            [basic.TensorType(dtype='int64', broadcastable=(False,) * multi_index[0].ndim)()])
Example #17
0
    def make_node(self, *inp):
        multi_index = [basic.as_tensor_variable(i) for i in inp[:-1]]
        dims = basic.as_tensor_variable(inp[-1])

        for i in multi_index:
            if i.dtype not in basic.int_dtypes:
                raise TypeError("'%s' object cannot be interpreted as an index" % str(i.dtype))
        if dims.dtype not in basic.int_dtypes:
            raise TypeError("'%s' object cannot be interpreted as an index" % str(dims.dtype))
        if dims.ndim != 1:
            raise TypeError("dims must be a 1D array")

        return gof.Apply(
            self, multi_index + [dims],
            [basic.TensorType(dtype='int64', broadcastable=(False,) * multi_index[0].ndim)()])
Example #18
0
    def make_node(self, indices, dims):
        indices = basic.as_tensor_variable(indices)
        dims = basic.as_tensor_variable(dims)

        if indices.dtype not in basic.int_dtypes:
            raise TypeError("'%s' object cannot be interpreted as an index" % str(indices.dtype))
        if dims.dtype not in basic.int_dtypes:
            raise TypeError("'%s' object cannot be interpreted as an index" % str(dims.dtype))
        if dims.ndim != 1:
            raise TypeError("dims must be a 1D array")

        return gof.Apply(
            self, [indices, dims],
            [basic.TensorType(dtype='int64', broadcastable=(False,) * indices.ndim)()
             for i in xrange(self.ndim)])
Example #19
0
def norm(x, ord):
    x = as_tensor_variable(x)
    ndim = x.ndim
    if ndim == 0:
        raise ValueError("'axis' entry is out of bounds.")
    elif ndim == 1:
        if ord is None:
            return tensor.sum(x**2)**0.5
        elif ord == "inf":
            return tensor.max(abs(x))
        elif ord == "-inf":
            return tensor.min(abs(x))
        elif ord == 0:
            return x[x.nonzero()].shape[0]
        else:
            try:
                z = tensor.sum(abs(x**ord))**(1.0 / ord)
            except TypeError:
                raise ValueError("Invalid norm order for vectors.")
            return z
    elif ndim == 2:
        if ord is None or ord == "fro":
            return tensor.sum(abs(x**2))**(0.5)
        elif ord == "inf":
            return tensor.max(tensor.sum(abs(x), 1))
        elif ord == "-inf":
            return tensor.min(tensor.sum(abs(x), 1))
        elif ord == 1:
            return tensor.max(tensor.sum(abs(x), 0))
        elif ord == -1:
            return tensor.min(tensor.sum(abs(x), 0))
        else:
            raise ValueError(0)
    elif ndim > 2:
        raise NotImplementedError("We don't support norm with ndim > 2")
Example #20
0
 def make_node(self, x):
     x = basic.as_tensor_variable(x)
     self_axis = self.axis
     if self_axis is None:
         broadcastable = [False]
     else:
         if self_axis < 0:
             self_axis += len(x.broadcastable)
         if self_axis < 0 or self_axis >= len(x.broadcastable):
             raise RuntimeError(
                 "Unique axis `{}` is outside of input ndim = "
                 "{}.".format(self.axis, len(x.broadcastable)))
         broadcastable = [
             b if axis != self_axis else False
             for axis, b in enumerate(x.broadcastable)
         ]
     outputs = [
         basic.TensorType(broadcastable=broadcastable, dtype=x.dtype)()
     ]
     typ = basic.TensorType(broadcastable=[False], dtype="int64")
     if self.return_index:
         outputs.append(typ())
     if self.return_inverse:
         outputs.append(typ())
     if self.return_counts:
         outputs.append(typ())
     return theano.Apply(self, [x], outputs)
Example #21
0
 def make_node(self, x):
     x = basic.as_tensor_variable(x)
     self_axis = self.axis
     if self_axis is None:
         broadcastable = [False]
     else:
         if self_axis < 0:
             self_axis += len(x.broadcastable)
         if self_axis < 0 or self_axis >= len(x.broadcastable):
             raise RuntimeError(
                 "Unique axis `{}` is outside of input ndim = "
                 "{}.".format(self.axis, len(x.broadcastable))
                 )
         broadcastable = [b if axis != self_axis else False
                          for axis, b in enumerate(x.broadcastable)]
     outputs = [basic.TensorType(broadcastable=broadcastable,
                                 dtype=x.dtype)()]
     typ = basic.TensorType(broadcastable=[False], dtype='int64')
     if self.return_index:
         outputs.append(typ())
     if self.return_inverse:
         outputs.append(typ())
     if self.return_counts:
         outputs.append(typ())
     return theano.Apply(self, [x], outputs)
Example #22
0
def broadcast_like(value, template, fgraph, dtype=None):
    """
    Return a Variable with the same shape and dtype as the template,
    filled by broadcasting value through it. `value` will be cast as
    necessary.

    """
    value = T.as_tensor_variable(value)
    if value.type == template.type:
        return value
    if template not in fgraph.variables:
        raise NotImplementedError('broadcast_like currently requires the '
                                  'template Variable to be in the fgraph already')
    if hasattr(fgraph, 'shape_feature'):
        new_shape = fgraph.shape_feature.shape_of[template]
    else:
        new_shape = template.shape
    if dtype is None:
        dtype = template.dtype
    rval = T.alloc(T.cast(value, dtype), *new_shape)
    # the template may have 1s in its shape without being broadcastable
    if rval.broadcastable != template.broadcastable:
        rval = T.unbroadcast(rval, *[i for i in xrange(rval.ndim)
                                     if rval.broadcastable[i] and
                                     not template.broadcastable[i]])
    assert rval.type.dtype == dtype

    if rval.type.broadcastable != template.broadcastable:
        raise AssertionError("rval.type.broadcastable is " +
                             str(rval.type.broadcastable) +
                             " but template.broadcastable is" +
                             str(template.broadcastable))

    return rval
Example #23
0
    def make_node(self, rv, val):
        """Make an `Observed` random variable.

        Parameters
        ----------
        rv: RandomVariable
            The distribution from which `val` is assumed to be a sample value.
        val: Variable
            The observed value.
        """
        val = as_tensor_variable(val)

        if rv is not None:
            if not hasattr(rv, "type") or rv.type.convert_variable(val) is None:
                raise TypeError(
                    (
                        "`rv` and `val` do not have compatible types:"
                        f" rv={rv}, val={val}"
                    )
                )
        else:
            rv = NoneConst.clone()

        inputs = [rv, val]

        return Apply(self, inputs, [val.type()])
Example #24
0
 def make_node(self, x):
     x = tensor.as_tensor_variable(x)
     if x.type.ndim not in (1, 2) \
             or x.type.dtype not in tensor.float_dtypes:
         raise ValueError('x must be 1-d or 2-d tensor of floats. Got ', x.type)
     if x.ndim == 1:
         x = tensor.shape_padleft(x, n_ones=1)
     return Apply(self, [x], [x.type()])
Example #25
0
    def make_node(self, x):
        x = basic.as_tensor_variable(x)
        out_type = x.type()

        if self.axis is None:
            out_type = theano.tensor.vector(dtype=x.dtype)  # Flatten

        return theano.Apply(self, [x], [out_type])
Example #26
0
    def make_node(self, x):
        x = basic.as_tensor_variable(x)
        out_type = x.type()

        if self.axis is None:
            out_type = theano.tensor.vector(dtype=x.dtype)  # Flatten

        return theano.Apply(self, [x], [out_type])
Example #27
0
    def make_node(self, V, W, b, d):
        """
            :param V: Visible unit, input(batch,row,column,time,in channel)
            :param W: Weights, filter(out channel,row,column,time,in channel)
            :param b: bias, shape == (W.shape[0],)
            :param d: strides when moving the filter over the input(dx,dy,dt)
        """

        V_ = T.as_tensor_variable(V)
        W_ = T.as_tensor_variable(W)
        b_ = T.as_tensor_variable(b)
        d_ = T.as_tensor_variable(d)

        node = theano.Apply(self, inputs=[V_, W_,b_,d_], outputs = [ T.TensorType(V_.dtype, (V_.broadcastable[0],False,False,False, W_.broadcastable[0]))() ] )


        return node
Example #28
0
    def make_node(self, img, kern):
        img = as_tensor_variable(img)
        kern = as_tensor_variable(kern)
        img, kern = self.as_common_dtype(img, kern)
        if img.type.ndim != 5:
            raise TypeError("img must be 5D tensor")
        if kern.type.ndim != 5:
            raise TypeError("kern must be 5D tensor")

        broadcastable = [
            img.type.broadcastable[0],
            kern.type.broadcastable[0],
            False,
            False,
            False,
        ]
        dtype = img.type.dtype
        return Apply(self, [img, kern], [TensorType(dtype, broadcastable)()])
Example #29
0
 def make_node(self, x):
     x = tensor.as_tensor_variable(x)
     if x.type.ndim not in (1, 2) \
             or x.type.dtype not in tensor.float_dtypes:
         raise ValueError('x must be 1-d or 2-d tensor of floats. Got ',
                          x.type)
     if x.ndim == 1:
         x = tensor.shape_padleft(x, n_ones=1)
     return Apply(self, [x], [x.type()])
Example #30
0
    def make_node(self, x):
        x = as_tensor_variable(x)
        assert x.ndim == 2, "The input of qr function should be a matrix."
        q = theano.tensor.matrix(dtype=x.dtype)
        if self.mode != "raw":
            r = theano.tensor.matrix(dtype=x.dtype)
        else:
            r = theano.tensor.vector(dtype=x.dtype)

        return Apply(self, [x], [q, r])
Example #31
0
    def make_node(self, x):
        x = basic.as_tensor_variable(x)
        out_type = x.type()

        if self.axis is None:
            out_type = theano.tensor.vector(dtype=x.dtype)  # Flatten
        elif self.axis >= x.ndim or self.axis < -x.ndim:
            raise ValueError('axis(={0}) out of bounds'.format(self.axis))

        return theano.Apply(self, [x], [out_type])
Example #32
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     assert x.ndim == 2, "The input of svd function should be a matrix."
     s = theano.tensor.vector(dtype=x.dtype)
     if self.compute_uv:
         u = theano.tensor.matrix(dtype=x.dtype)
         vt = theano.tensor.matrix(dtype=x.dtype)
         return Apply(self, [x], [u, s, vt])
     else:
         return Apply(self, [x], [s])
Example #33
0
 def make_node(self, M):
     M = basic.as_tensor_variable(M)
     if M.ndim != 0:
         raise TypeError(
             f"{self.__class__.__name__} only works on scalar input")
     elif M.dtype not in theano.tensor.integer_dtypes:
         # dtype is a theano attribute here
         raise TypeError(
             f"{self.__class__.__name__} only works on integer input")
     return Apply(self, [M], [basic.dvector()])
Example #34
0
    def make_node(self, points, dim):
        assert (points.ndim == 3)
        points = as_tensor_variable(points.astype("float32"))

        dim = get_scalar_constant_value(dim)
        if "int" not in str(dim.dtype):
            raise ValueError("dim must be an integer.")

        dim = constant(dim, dtype="int32", name="dim")

        entries_type = TensorType("int32", broadcastable=(False, ))
        keys_type = TensorType("int16", broadcastable=(False, False))
        neib_ent_type = TensorType("int32",
                                   broadcastable=(False, False, False))
        bary_type = TensorType("float32",
                               broadcastable=points.type.broadcastable)

        valid_entries_type = TensorType("int32", broadcastable=(False, ))
        n_valid_type = TensorType("int32", broadcastable=(False, ))

        out_vars = [
            entries_type(name="hash_entries"),
            keys_type(name="hash_keys"),
            neib_ent_type(name="neighbor_entries"),
            bary_type(name="barycentric_coords"),
            valid_entries_type(name="valid_entries"),
            n_valid_type(name="n_valid")
        ]

        # Two sets of entries can't be meaningfully compared without also
        # having the corresponding keys. Since we can only define per-output
        # comparisons, we have to hope that any time someone compares two
        # tables for equality, they will check all outputs.
        out_vars[0].tag.values_eq_approx = lambda e1, e2: True
        out_vars[2].tag.values_eq_approx = lambda e1, e2: True

        # The number of valid entries between two equivalent tables may be
        # different since it includes duplicates.
        out_vars[5].tag.values_eq_approx = lambda n1, n2: True

        def keys_comparison(k1, k2):
            k1 = [tuple(k) for k in np.asarray(k1)]
            k2 = [tuple(k) for k in np.asarray(k2)]
            return set(k1) == set(k2)

        out_vars[1].tag.values_eq_approx = keys_comparison

        def valid_entries_comparison(e1, e2):
            e1 = np.asarray(e1)
            e2 = np.asarray(e2)
            return len(np.unique(e1)) == len(np.unique(e2))

        out_vars[4].tag.values_eq_approx = valid_entries_comparison

        return Apply(self, [points, dim], out_vars)
Example #35
0
    def make_node(self, x, weights):
        warnings.warn((
            "Tile op is deprecated, use tile function instead."),
            stacklevel=3)

        x = basic.as_tensor_variable(x)

        if x.dtype not in BinCountOp.compatible_type:
            raise TypeError("Inputs dtype must be an integer.")

        # Some dtypes are not supported by numpy's implementation of bincount.
        # Until another one is available, we should fail at graph construction
        # time, not wait for execution.
        int_bitwidth = theano.gof.python_int_bitwidth()
        if int_bitwidth == 64:
            numpy_unsupported_dtypes = ('uint64',)
        if int_bitwidth == 32:
            numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
        intp_bitwidth = theano.gof.local_bitwidth()
        if intp_bitwidth == 32:
            out_type = basic.ivector()
        elif intp_bitwidth == 64:
            out_type = basic.lvector()

        if x.dtype in numpy_unsupported_dtypes:
            raise TypeError(
                ("Input dtypes %s are not supported by numpy.bincount, "
                 % numpy_unsupported_dtypes), x.dtype)

        if x.ndim != 1:
            raise TypeError("Inputs must be of dimension 1.")

        if weights is None:
            weights = theano.gof.Constant(theano.gof.Generic(), None)
        else:
            weights = basic.as_tensor_variable(weights)
            out_type = basic.dvector()
            if weights.ndim != 1:
                raise TypeError("Weights cannot have a number of"
                                "dimension different of 1.")

        return theano.Apply(self, [x, weights], [out_type])
Example #36
0
 def make_node(self, x):
     x = basic.as_tensor_variable(x)
     outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()]
     typ = basic.TensorType(broadcastable=[False], dtype='int64')
     if self.return_index:
         outputs.append(typ())
     if self.return_inverse:
         outputs.append(typ())
     if self.return_counts:
         outputs.append(typ())
     return theano.Apply(self, [x], outputs)
Example #37
0
    def __call__(self, a, size=None, replace=True, p=None, **kwargs):

        a = as_tensor_variable(a, ndim=1)

        if p is None:
            p = theano.tensor.type_other.NoneConst.clone()

        if isinstance(replace, bool):
            replace = theano.tensor.constant(np.array(replace))

        return super().__call__(a, p, replace, size=size, dtype=a.dtype, **kwargs)
Example #38
0
 def make_node(self, _x):
     warnings.warn(
         "DeprecationWarning: theano.tensor.nlinalg.AllocDiag"
         "is deprecated, please use theano.tensor.AllocDiag"
         "instead.",
         category=DeprecationWarning,
     )
     x = as_tensor_variable(_x)
     if x.type.ndim != 1:
         raise TypeError("AllocDiag only works on vectors", _x)
     return Apply(self, [x], [theano.tensor.matrix(dtype=x.type.dtype)])
Example #39
0
    def make_node(self, x, repeats):
        x = basic.as_tensor_variable(x)
        repeats = basic.as_tensor_variable(repeats)

        if repeats.dtype not in tensor.integer_dtypes:
            raise TypeError("repeats.dtype must be an integer.")

        # Some dtypes are not supported by numpy's implementation of repeat.
        # Until another one is available, we should fail at graph construction
        # time, not wait for execution.
        ptr_bitwidth = theano.configdefaults.local_bitwidth()
        if ptr_bitwidth == 64:
            numpy_unsupported_dtypes = ("uint64",)
        if ptr_bitwidth == 32:
            numpy_unsupported_dtypes = ("uint32", "int64", "uint64")

        if repeats.dtype in numpy_unsupported_dtypes:
            raise TypeError(
                (
                    "dtypes %s are not supported by numpy.repeat "
                    "for the 'repeats' parameter, " % str(numpy_unsupported_dtypes)
                ),
                repeats.dtype,
            )

        if self.axis is None:
            broadcastable = [False]
        else:
            try:
                const_reps = basic.get_scalar_constant_value(repeats)
            except basic.NotScalarConstantError:
                const_reps = None
            if const_reps == 1:
                broadcastable = x.broadcastable
            else:
                broadcastable = list(x.broadcastable)
                broadcastable[self.axis] = False

        out_type = theano.tensor.TensorType(x.dtype, broadcastable)

        return theano.Apply(self, [x, repeats], [out_type()])
Example #40
0
    def make_node(self, img, topgrad, shape=None):
        img = as_tensor_variable(img)
        topgrad = as_tensor_variable(topgrad)
        img, topgrad = self.as_common_dtype(img, topgrad)
        if img.type.ndim != 4:
            raise TypeError("img must be 4D tensor")
        if topgrad.type.ndim != 4:
            raise TypeError("topgrad must be 4D tensor")
        if shape is None:
            if self.subsample != (1, 1) or self.border_mode == "half":
                raise ValueError(
                    "shape must be given if subsample != (1, 1)"
                    ' or border_mode == "half"'
                )
            height_width = []
        else:
            height_width = [
                as_tensor_variable(shape[0]).astype("int64"),
                as_tensor_variable(shape[1]).astype("int64"),
            ]

        if self.unshared is True:
            broadcastable = [
                topgrad.type.broadcastable[1],
                False,
                False,
                img.type.broadcastable[1],
                False,
                False,
            ]
        else:
            broadcastable = [
                topgrad.type.broadcastable[1],
                img.type.broadcastable[1],
                False,
                False,
            ]
        dtype = img.type.dtype
        return Apply(
            self, [img, topgrad] + height_width, [TensorType(dtype, broadcastable)()]
        )
Example #41
0
    def make_node(self, x, repeats):
        x = basic.as_tensor_variable(x)
        repeats = basic.as_tensor_variable(repeats)

        if repeats.dtype not in tensor.integer_dtypes:
            raise TypeError("repeats.dtype must be an integer.")

        # Some dtypes are not supported by numpy's implementation of repeat.
        # Until another one is available, we should fail at graph construction
        # time, not wait for execution.
        ptr_bitwidth = theano.configdefaults.local_bitwidth()
        if ptr_bitwidth == 64:
            numpy_unsupported_dtypes = ("uint64",)
        if ptr_bitwidth == 32:
            numpy_unsupported_dtypes = ("uint32", "int64", "uint64")

        if repeats.dtype in numpy_unsupported_dtypes:
            raise TypeError(
                (
                    "dtypes %s are not supported by numpy.repeat "
                    "for the 'repeats' parameter, " % str(numpy_unsupported_dtypes)
                ),
                repeats.dtype,
            )

        if self.axis is None:
            broadcastable = [False]
        else:
            try:
                const_reps = basic.get_scalar_constant_value(repeats)
            except basic.NotScalarConstantError:
                const_reps = None
            if const_reps == 1:
                broadcastable = x.broadcastable
            else:
                broadcastable = list(x.broadcastable)
                broadcastable[self.axis] = False

        out_type = theano.tensor.TensorType(x.dtype, broadcastable)

        return theano.Apply(self, [x, repeats], [out_type()])
Example #42
0
 def make_node(self, A, alpha, x, y):
     ctx_name = infer_context_name(A, x, y)
     A = as_gpuarray_variable(A, ctx_name)
     x = as_gpuarray_variable(x, ctx_name)
     y = as_gpuarray_variable(y, ctx_name)
     alpha = as_tensor_variable(alpha).astype('float64')
     assert alpha.ndim == 0
     assert A.ndim == 2
     assert x.ndim == 1
     assert y.ndim == 1
     assert A.dtype == x.dtype == y.dtype
     return Apply(self, [A, alpha, x, y], [A.type()])
Example #43
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     assert x.ndim == 2
     # Numpy's linalg.eigh may return either double or single
     # presision eigenvalues depending on installed version of
     # LAPACK.  Rather than trying to reproduce the (rather
     # involved) logic, we just probe linalg.eigh with a trivial
     # input.
     w_dtype = self._numop([[np.dtype(x.dtype).type()]])[0].dtype.name
     w = theano.tensor.vector(dtype=w_dtype)
     v = theano.tensor.matrix(dtype=x.dtype)
     return Apply(self, [x], [w, v])
Example #44
0
 def make_node(self, A, alpha, x, y):
     ctx_name = infer_context_name(A, x, y)
     A = as_gpuarray_variable(A, ctx_name)
     x = as_gpuarray_variable(x, ctx_name)
     y = as_gpuarray_variable(y, ctx_name)
     alpha = as_tensor_variable(alpha)
     assert alpha.ndim == 0
     assert A.ndim == 2
     assert x.ndim == 1
     assert y.ndim == 1
     assert A.dtype == x.dtype == y.dtype
     return Apply(self, [A, alpha, x, y], [A.type()])
Example #45
0
    def make_node(
        self, x, scale, bias, estimated_mean, estimated_variance, epsilon=1e-4
    ):
        x = as_tensor_variable(x)
        scale = as_tensor_variable(scale)
        bias = as_tensor_variable(bias)
        estimated_mean = as_tensor_variable(estimated_mean)
        estimated_variance = as_tensor_variable(estimated_variance)
        epsilon = as_tensor_variable(epsilon)
        # Upcast to common dtype on the non-scalar
        # Keep as is dtype of scalar (epsilon)
        x, scale, bias, estimated_mean, estimated_variance = as_common_dtype(
            x, scale, bias, estimated_mean, estimated_variance
        )
        assert (
            x.ndim
            == scale.ndim
            == bias.ndim
            == estimated_mean.ndim
            == estimated_variance.ndim
        )

        return Apply(
            self,
            [x, scale, bias, estimated_mean, estimated_variance, epsilon],
            [x.type()],
        )
Example #46
0
    def make_node(self, W, b, d, H, RShape=None):
        """
        Parameters
        ----------
        W
            Weights, filter
        b
            Bias, shape == (W.shape[0],).
        d
            Strides when moving the filter over the input.
        H
            The output of Conv3D.

        """
        W_ = T.as_tensor_variable(W)
        b_ = T.as_tensor_variable(b)
        d_ = T.as_tensor_variable(d)
        H_ = T.as_tensor_variable(H)
        if RShape:
            RShape_ = T.as_tensor_variable(RShape)
        else:
            RShape_ = T.as_tensor_variable([-1, -1, -1])

        return theano.Apply(self,
                            inputs=[W_, b_, d_, H_, RShape_],
                            outputs=[T.TensorType(H_.dtype,
                                     (False, False, False, False, False))()])
Example #47
0
    def make_node(self, W, b, d, H, RShape=None):
        """
        Parameters
        ----------
        W
            Weights, filter
        b
            Bias, shape == (W.shape[0],).
        d
            Strides when moving the filter over the input.
        H
            The output of Conv3D.

        """
        W_ = T.as_tensor_variable(W)
        b_ = T.as_tensor_variable(b)
        d_ = T.as_tensor_variable(d)
        H_ = T.as_tensor_variable(H)
        if RShape:
            RShape_ = T.as_tensor_variable(RShape)
        else:
            RShape_ = T.as_tensor_variable([-1, -1, -1])

        return theano.Apply(
            self,
            inputs=[W_, b_, d_, H_, RShape_],
            outputs=[
                T.TensorType(H_.dtype, (False, False, False, False, False))()
            ])
Example #48
0
    def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64', nstreams=None):
        if pvals is None:
            raise TypeError('You have to specify pvals')
        pvals = as_tensor_variable(pvals)
        if size is not None:
            if any([isinstance(i, int) and i <= 0 for i in size]):
                raise ValueError('The specified size contains a dimension with value <= 0', size)

        if n == 1 and pvals.ndim == 1:
            if ndim is not None:
                raise ValueError('Provided an ndim argument to ' +
                        'MRG_RandomStreams2.multinomial, which does not use ' +
                        'the ndim argument.')
            unis = self.uniform(size=size, ndim=2, nstreams=nstreams)
            op = MultinomialFromUniform2(dtype)
            return op(pvals, unis)
        else:
            raise NotImplementedError('MRG_RandomStreams2.multinomial only ' +
                ' implemented with n == 1 and pvals.ndim = 2')
Example #49
0
    def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int32',
                    nstreams=None):
        """
        Sample `n` (currently `n` needs to be 1) times from a multinomial
        distribution defined by probabilities pvals.

        Example : pvals = [[.98, .01, .01], [.01, .98, .01]] will
        probably result in [[1,0,0],[0,1,0]].

        .. note::
            -`size` and `ndim` are only there keep the same signature as other
            uniform, binomial, normal, etc.
            todo : adapt multinomial to take that into account

            -Does not do any value checking on pvals, i.e. there is no
             check that the elements are non-negative, less than 1, or
             sum to 1. passing pvals = [[-2., 2.]] will result in
             sampling [[0, 0]]
        """
        if pvals is None:
            raise TypeError('You have to specify pvals')
        pvals = as_tensor_variable(pvals)
        if size is not None:
            if any([isinstance(i, int) and i <= 0 for i in size]):
                raise ValueError(
                    'The specified size contains a dimension with value <= 0',
                    size)

        if n == 1 and pvals.ndim == 1:
            if ndim is not None:
                raise ValueError('Provided an ndim argument to ' +
                        'MRG_RandomStreams2.multinomial, which does not use ' +
                        'the ndim argument.')
            unis = self.uniform(size=size, ndim=2, nstreams=nstreams)
            op = MultinomialFromUniform2(dtype)
            return op(pvals, unis)
        else:
            raise NotImplementedError('MRG_RandomStreams2.multinomial only ' +
                ' implemented with n == 1 and pvals.ndim = 2')
Example #50
0
 def make_node(self, x):
     x = basic.as_tensor_variable(x)
     return theano.Apply(self, [x], [x.type()])
Example #51
0
 def make_node(self, dy, sm, **kwargs):
     dy = tensor.as_tensor_variable(dy)
     sm = tensor.as_tensor_variable(sm)
     return Apply(self, [dy, sm], [sm.type.make_variable()])