示例#1
0
 def make_node(self, *inputs):
     assert len(inputs) == self.nin
     inputs = map(as_variable, inputs)
     for input in inputs:
         if input.type is not tdouble:
             raise Exception("Error 1")
     outputs = [double(self.name + "_R")]
     return Apply(self, inputs, outputs)
示例#2
0
 def make_node(self, *inputs):
     assert len(inputs) == self.nin
     inputs = list(map(as_variable, inputs))
     for input in inputs:
         if not isinstance(input.type, MyType):
             raise Exception("Error 1")
     outputs = [MyType(self.name + "_R")()]
     return Apply(self, inputs, outputs)
示例#3
0
 def make_node(self, *inputs):
     inputs = list(map(as_variable, inputs))
     for input in inputs:
         if not isinstance(input.type, MyType):
             print(input, input.type, type(input), type(input.type))
             raise Exception("Error 1")
     outputs = [MyVariable(sum([input.type.thingy for input in inputs]))]
     return Apply(self, inputs, outputs)
示例#4
0
 def make_node(self, x, index, toInsert):
     assert isinstance(x.type, TypedListType)
     assert x.ttype == toInsert.type
     if not isinstance(index, Variable):
         index = tt.constant(index, ndim=0, dtype="int64")
     else:
         assert index.dtype == "int64"
         assert isinstance(index, tt.TensorVariable) and index.ndim == 0
     return Apply(self, [x, index, toInsert], [x.type()])
示例#5
0
 def make_node(self, x, shape):
     if not isinstance(x, Variable):
         x = theano.tensor.as_tensor_variable(x)
     shape = theano.tensor.as_tensor_variable(shape)
     assert shape.ndim == 1
     assert shape.dtype in theano.tensor.integer_dtypes
     if isinstance(shape, theano.tensor.TensorConstant):
         assert shape.data.size == x.ndim
     return Apply(self, [x, shape], [x.type()])
示例#6
0
 def make_node(self, M):
     M = basic.as_tensor_variable(M)
     if M.ndim != 0:
         raise TypeError(
             f"{self.__class__.__name__} only works on scalar input")
     elif M.dtype not in theano.tensor.integer_dtypes:
         # dtype is a theano attribute here
         raise TypeError(
             f"{self.__class__.__name__} only works on integer input")
     return Apply(self, [M], [basic.dvector()])
示例#7
0
    def make_node(self, x):
        x = as_tensor_variable(x)
        assert x.ndim == 2, "The input of qr function should be a matrix."
        q = theano.tensor.matrix(dtype=x.dtype)
        if self.mode != "raw":
            r = theano.tensor.matrix(dtype=x.dtype)
        else:
            r = theano.tensor.vector(dtype=x.dtype)

        return Apply(self, [x], [q, r])
示例#8
0
    def make_node(self, points, dim):
        assert (points.ndim == 3)
        points = as_tensor_variable(points.astype("float32"))

        dim = get_scalar_constant_value(dim)
        if "int" not in str(dim.dtype):
            raise ValueError("dim must be an integer.")

        dim = constant(dim, dtype="int32", name="dim")

        entries_type = TensorType("int32", broadcastable=(False, ))
        keys_type = TensorType("int16", broadcastable=(False, False))
        neib_ent_type = TensorType("int32",
                                   broadcastable=(False, False, False))
        bary_type = TensorType("float32",
                               broadcastable=points.type.broadcastable)

        valid_entries_type = TensorType("int32", broadcastable=(False, ))
        n_valid_type = TensorType("int32", broadcastable=(False, ))

        out_vars = [
            entries_type(name="hash_entries"),
            keys_type(name="hash_keys"),
            neib_ent_type(name="neighbor_entries"),
            bary_type(name="barycentric_coords"),
            valid_entries_type(name="valid_entries"),
            n_valid_type(name="n_valid")
        ]

        # Two sets of entries can't be meaningfully compared without also
        # having the corresponding keys. Since we can only define per-output
        # comparisons, we have to hope that any time someone compares two
        # tables for equality, they will check all outputs.
        out_vars[0].tag.values_eq_approx = lambda e1, e2: True
        out_vars[2].tag.values_eq_approx = lambda e1, e2: True

        # The number of valid entries between two equivalent tables may be
        # different since it includes duplicates.
        out_vars[5].tag.values_eq_approx = lambda n1, n2: True

        def keys_comparison(k1, k2):
            k1 = [tuple(k) for k in np.asarray(k1)]
            k2 = [tuple(k) for k in np.asarray(k2)]
            return set(k1) == set(k2)

        out_vars[1].tag.values_eq_approx = keys_comparison

        def valid_entries_comparison(e1, e2):
            e1 = np.asarray(e1)
            e2 = np.asarray(e2)
            return len(np.unique(e1)) == len(np.unique(e2))

        out_vars[4].tag.values_eq_approx = valid_entries_comparison

        return Apply(self, [points, dim], out_vars)
 def make_node(self, x, y):
     x = theano.tensor.as_tensor_variable(x)
     y = theano.tensor.as_tensor_variable(y)
     outdim = x.ndim
     output1 = theano.tensor.TensorType(dtype=theano.scalar.upcast(
         x.dtype, y.dtype),
                                        broadcastable=[False] * outdim)()
     output2 = theano.tensor.TensorType(dtype=theano.scalar.upcast(
         x.dtype, y.dtype),
                                        broadcastable=[False] * outdim)()
     return Apply(self, inputs=[x, y], outputs=[output1, output2])
示例#10
0
 def make_node(self, x, w, v, gw, gv):
     x, w, v, gw, gv = map(as_tensor_variable, (x, w, v, gw, gv))
     assert x.ndim == 2
     assert w.ndim == 1
     assert v.ndim == 2
     assert gw.ndim == 1
     assert gv.ndim == 2
     out_dtype = theano.scalar.upcast(x.dtype, w.dtype, v.dtype, gw.dtype,
                                      gv.dtype)
     out = theano.tensor.matrix(dtype=out_dtype)
     return Apply(self, [x, w, v, gw, gv], [out])
示例#11
0
 def make_node(self, _x):
     warnings.warn(
         "DeprecationWarning: theano.tensor.nlinalg.AllocDiag"
         "is deprecated, please use theano.tensor.AllocDiag"
         "instead.",
         category=DeprecationWarning,
     )
     x = as_tensor_variable(_x)
     if x.type.ndim != 1:
         raise TypeError("AllocDiag only works on vectors", _x)
     return Apply(self, [x], [theano.tensor.matrix(dtype=x.type.dtype)])
示例#12
0
 def make_node(self, slc, stop=None, step=None):
     # We need to accept and handle in make_node inputs the node
     # inputs to allow redoing a new op elsewhere in the graph by
     # optimization.
     if isinstance(slc, slice):
         assert stop is None
         assert step is None
         inp = [slc.start, slc.stop, slc.step]
     else:
         inp = [slc, stop, step]
     return Apply(self, list(map(as_int_none_variable, inp)), [slicetype()])
示例#13
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     assert x.ndim == 2
     # Numpy's linalg.eigh may return either double or single
     # presision eigenvalues depending on installed version of
     # LAPACK.  Rather than trying to reproduce the (rather
     # involved) logic, we just probe linalg.eigh with a trivial
     # input.
     w_dtype = self._numop([[np.dtype(x.dtype).type()]])[0].dtype.name
     w = theano.tensor.vector(dtype=w_dtype)
     v = theano.tensor.matrix(dtype=x.dtype)
     return Apply(self, [x], [w, v])
示例#14
0
    def make_node(self, a):
        assert isinstance(a, (tuple, list))
        a2 = []
        for elem in a:
            if not isinstance(elem, Variable):
                elem = tt.as_tensor_variable(elem)
            a2.append(elem)
        if not all(a2[0].type == elem.type for elem in a2):
            raise TypeError(
                "MakeList need all input variable to be of the same type.")
        tl = TypedListType(a2[0].type)()

        return Apply(self, a2, [tl])
示例#15
0
 def make_node(self, pvals, unis, n=1):
     pvals = tt.as_tensor_variable(pvals)
     unis = tt.as_tensor_variable(unis)
     if pvals.ndim != 2:
         raise NotImplementedError("pvals ndim should be 2", pvals.ndim)
     if unis.ndim != 1:
         raise NotImplementedError("unis ndim should be 1", unis.ndim)
     if self.odtype == "auto":
         odtype = pvals.dtype
     else:
         odtype = self.odtype
     out = tt.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals, unis, as_scalar(n)], [out])
示例#16
0
 def make_node(self, a, val):
     a = basic.as_tensor_variable(a)
     val = basic.as_tensor_variable(val)
     if a.ndim < 2:
         raise TypeError("%s: first parameter must have at least"
                         " two dimensions" % self.__class__.__name__)
     elif val.ndim != 0:
         raise TypeError(
             f"{self.__class__.__name__}: second parameter must be a scalar"
         )
     val = basic.cast(val, dtype=upcast(a.dtype, val.dtype))
     if val.dtype != a.dtype:
         raise TypeError("%s: type of second parameter must be the same as"
                         " the first's" % self.__class__.__name__)
     return Apply(self, [a, val], [a.type()])
示例#17
0
    def make_node(self, a, s=None):
        a = tt.as_tensor_variable(a)
        if a.ndim < 2:
            raise TypeError(
                "%s: input must have dimension > 2, with first dimension batches"
                % self.__class__.__name__)

        if s is None:
            s = a.shape[1:]
            s = tt.as_tensor_variable(s)
        else:
            s = tt.as_tensor_variable(s)
            if s.dtype not in tt.integer_dtypes:
                raise TypeError("%s: length of the transformed axis must be"
                                " of type integer" % self.__class__.__name__)
        return Apply(self, [a, s], [self.output_type(a)()])
示例#18
0
    def make_node(self, ten4, neib_shape, neib_step=None):
        """
        Parameters
        ----------
        ten4 : a list of lists of images
            ten4 is of shape (list 1 dim, list 2 dim, row, col).
        neib_shape
            (r,c) where r is the height of the neighborhood in rows and c is
            the width of the neighborhood in columns.
        neib_step
            (dr,dc) where dr is the number of rows to skip between patch and dc
            is the number of columns. When None, this is the same as neib_shape
            (patch are disjoint).

        Returns
        -------
        matrix
            A 2D matrix, written using the following pattern::

                idx = 0
                for i in range(list 1 dim)
                    for j in range(list 2 dim)
                        for k in <image column coordinates>
                            for l in <image row coordinates>
                                output[idx,:]
                                     = flattened version of ten4[i,j,l:l+r,k:k+c]
                                idx += 1

            .. note:: The op isn't necessarily implemented internally with these
                for loops, they're just the easiest way to describe the output
                pattern.

        """
        ten4 = tt.as_tensor_variable(ten4)
        neib_shape = tt.as_tensor_variable(neib_shape)
        if neib_step is None:
            neib_step = neib_shape
        else:
            neib_step = tt.as_tensor_variable(neib_step)

        assert ten4.ndim == 4
        assert neib_shape.ndim == 1
        assert neib_step.ndim == 1

        return Apply(self, [ten4, neib_shape, neib_step],
                     [tt.matrix(dtype=ten4.type.dtype)])
示例#19
0
    def make_node(self, a, s=None):
        a = tt.as_tensor_variable(a)
        if a.ndim < 3:
            raise TypeError(
                f"{self.__class__.__name__}: input must have dimension >= 3,  with "
                + "first dimension batches and last real/imag parts")

        if s is None:
            s = a.shape[1:-1]
            s = tt.set_subtensor(s[-1], (s[-1] - 1) * 2)
            s = tt.as_tensor_variable(s)
        else:
            s = tt.as_tensor_variable(s)
            if s.dtype not in tt.integer_dtypes:
                raise TypeError("%s: length of the transformed axis must be"
                                " of type integer" % self.__class__.__name__)
        return Apply(self, [a, s], [self.output_type(a)()])
示例#20
0
    def make_node(self, x, dy, scale, x_mean, x_invstd, epsilon=1e-4):
        x = as_tensor_variable(x)
        dy = as_tensor_variable(dy)
        scale = as_tensor_variable(scale)
        x_mean = as_tensor_variable(x_mean)
        x_invstd = as_tensor_variable(x_invstd)
        epsilon = as_tensor_variable(epsilon)

        # Upcast to common dtype on the non-scalar
        # Keep as is dtype of scalar (epsilon)
        x, dy, scale, x_mean, x_invstd = as_common_dtype(x, dy, scale, x_mean, x_invstd)
        assert x.ndim == dy.ndim == scale.ndim == x_mean.ndim == x_invstd.ndim
        return Apply(
            self,
            [x, dy, scale, x_mean, x_invstd, epsilon],
            [x.type(), scale.type(), scale.type()],
        )
示例#21
0
    def make_node(self, o, x, y, xIdx, yIdx, alpha=None):
        """
        Compute the dot product of the specified pieces of vectors
        and matrices.

        The parameter types are actually their expected shapes
        relative to each other.

        Parameters
        ----------
        o : xBlocks, yBlocks, xSize, ySize
        x : batch, xWin, xSize
        y : batch, yWin, ySize
        xIdx : batch, iWin
            indexes of the x blocks
        yIdx : batch, oWin
            indexes of the y blocks

        Returns
        -------
        (xBlocks, yBlocks, xSize, ySize)
            outer(x[i], y[j]) + o[i, j]

        Notes
        -----
        - `batch` is the number of examples in a minibatch (batch size).
        - `xBlocks` is the total number of blocks in x.
        - `xSize` is the size of each of these x blocks.
        - `xWin` is the number of blocks that will be used as x. Which blocks
          will be used is specified in `xIdx`.
        - `yBlocks` is the number or possible y blocks.
        - `ySize` is the size of each of these y blocks.
        - `yWin` is the number of y blocks that will actually be computed.
          Which blocks will be computed is specified in `yIdx`.

        """
        one = theano.tensor.constant(np.asarray(1.0, dtype="float32"))
        o = theano.tensor.as_tensor_variable(o)
        x = theano.tensor.as_tensor_variable(x)
        y = theano.tensor.as_tensor_variable(y)

        if alpha is None:
            alpha = one

        return Apply(self, [o, x, y, xIdx, yIdx, alpha], [o.type()])
示例#22
0
    def make_node(self, img, kern):
        img = as_tensor_variable(img)
        kern = as_tensor_variable(kern)
        img, kern = self.as_common_dtype(img, kern)
        if img.type.ndim != 5:
            raise TypeError("img must be 5D tensor")
        if kern.type.ndim != 5:
            raise TypeError("kern must be 5D tensor")

        broadcastable = [
            img.type.broadcastable[0],
            kern.type.broadcastable[0],
            False,
            False,
            False,
        ]
        dtype = img.type.dtype
        return Apply(self, [img, kern], [TensorType(dtype, broadcastable)()])
示例#23
0
 def make_node(self, X):
     context_name = infer_context_name(X)
     # We keep the original broadcastable flags for dimensions on which
     # we do not perform the max / argmax.
     all_axes = set(self.axis)
     broadcastable = [
         b for i, b in enumerate(X.type.broadcastable) if i not in all_axes
     ]
     inputs = [as_gpuarray_variable(X, context_name)]
     outputs = [
         GpuArrayType(X.type.dtype,
                      broadcastable,
                      context_name=context_name)(),
         GpuArrayType(self.argmax_dtype,
                      broadcastable,
                      context_name=context_name)(),
     ]
     return Apply(self, inputs, outputs)
示例#24
0
 def make_node(
     self,
     x,
     scale,
     bias,
     epsilon=1e-4,
     running_average_factor=0.1,
     running_mean=None,
     running_var=None,
 ):
     x = as_tensor_variable(x)
     scale = as_tensor_variable(scale)
     bias = as_tensor_variable(bias)
     epsilon = as_tensor_variable(epsilon)
     running_average_factor = as_tensor_variable(running_average_factor)
     if running_mean is not None:
         running_mean = as_tensor_variable(running_mean)
     if running_var is not None:
         running_var = as_tensor_variable(running_var)
     assert x.ndim == scale.ndim == bias.ndim
     assert (running_mean is None and running_var is None) or (
         running_mean is not None and running_var is not None
     )
     assert running_mean is None or running_mean.ndim == x.ndim
     assert running_var is None or running_var.ndim == x.ndim
     # Upcast to common dtype on the non-scalar
     # Keep as is dtype of scalar (epsilon and running_average_factor)
     if running_mean:
         x, scale, bias, running_mean, running_var = as_common_dtype(
             x, scale, bias, running_mean, running_var
         )
     else:
         x, scale, bias = as_common_dtype(x, scale, bias)
     inputs = [x, scale, bias, epsilon, running_average_factor]
     output_types = [x.type(), scale.type(), scale.type()]
     if running_mean is not None and running_var is not None:
         inputs.append(running_mean)
         inputs.append(running_var)
         output_types.append(scale.type())
         output_types.append(scale.type())
     return Apply(self, inputs, output_types)
示例#25
0
    def make_node(self, img, topgrad, shape=None):
        img = as_tensor_variable(img)
        topgrad = as_tensor_variable(topgrad)
        img, topgrad = self.as_common_dtype(img, topgrad)
        if img.type.ndim != 4:
            raise TypeError("img must be 4D tensor")
        if topgrad.type.ndim != 4:
            raise TypeError("topgrad must be 4D tensor")
        if shape is None:
            if self.subsample != (1, 1) or self.border_mode == "half":
                raise ValueError(
                    "shape must be given if subsample != (1, 1)"
                    ' or border_mode == "half"'
                )
            height_width = []
        else:
            height_width = [
                as_tensor_variable(shape[0]).astype("int64"),
                as_tensor_variable(shape[1]).astype("int64"),
            ]

        if self.unshared is True:
            broadcastable = [
                topgrad.type.broadcastable[1],
                False,
                False,
                img.type.broadcastable[1],
                False,
                False,
            ]
        else:
            broadcastable = [
                topgrad.type.broadcastable[1],
                img.type.broadcastable[1],
                False,
                False,
            ]
        dtype = img.type.dtype
        return Apply(
            self, [img, topgrad] + height_width, [TensorType(dtype, broadcastable)()]
        )
示例#26
0
    def make_node(self, indices, dims):
        indices = basic.as_tensor_variable(indices)
        dims = basic.as_tensor_variable(dims)

        if indices.dtype not in basic.int_dtypes:
            raise TypeError(
                f"'{indices.dtype}' object cannot be interpreted as an index")
        if dims.dtype not in basic.int_dtypes:
            raise TypeError(
                f"'{dims.dtype}' object cannot be interpreted as an index")
        if dims.ndim != 1:
            raise TypeError("dims must be a 1D array")

        return Apply(
            self,
            [indices, dims],
            [
                basic.TensorType(dtype="int64",
                                 broadcastable=(False, ) * indices.ndim)()
                for i in range(basic.get_vector_length(dims))
            ],
        )
示例#27
0
    def make_node(self, kern, topgrad, shape=None):
        kern = as_tensor_variable(kern)
        topgrad = as_tensor_variable(topgrad)
        kern, topgrad = self.as_common_dtype(kern, topgrad)
        if kern.type.ndim != 5:
            raise TypeError("kern must be 5D tensor")
        if topgrad.type.ndim != 5:
            raise TypeError("topgrad must be 5D tensor")
        if shape is None:
            if self.subsample != (1, 1, 1):
                raise ValueError(
                    "shape must be given if subsample != (1, 1, 1)")
            height_width_depth = []
        else:
            height_width_depth = [
                as_tensor_variable(shape[0]).astype("int64"),
                as_tensor_variable(shape[1]).astype("int64"),
                as_tensor_variable(shape[2]).astype("int64"),
            ]

        if self.num_groups > 1:
            broadcastable = [
                topgrad.type.broadcastable[0], False, False, False, False
            ]
        else:
            broadcastable = [
                topgrad.type.broadcastable[0],
                kern.type.broadcastable[1],
                False,
                False,
                False,
            ]
        dtype = kern.type.dtype
        return Apply(
            self,
            [kern, topgrad] + height_width_depth,
            [TensorType(dtype, broadcastable)()],
        )
示例#28
0
 def make_node(self, a, n, axis):
     a = tensor.as_tensor_variable(a)
     if a.ndim < 1:
         raise TypeError(
             f"{self.__class__.__name__}: input must be an array, not a scalar"
         )
     if axis is None:
         axis = a.ndim - 1
         axis = tensor.as_tensor_variable(axis)
     else:
         axis = tensor.as_tensor_variable(axis)
         if axis.dtype not in tensor.integer_dtypes:
             raise TypeError("%s: index of the transformed axis must be"
                             " of type integer" % self.__class__.__name__)
         elif axis.ndim != 0 or (isinstance(axis, tensor.TensorConstant) and
                                 (axis.data < 0 or axis.data > a.ndim - 1)):
             raise TypeError(
                 f"{self.__class__.__name__}: index of the transformed axis must be"
                 " a scalar not smaller than 0 and smaller than"
                 " dimension of array")
     if n is None:
         n = a.shape[axis]
         n = tensor.as_tensor_variable(n)
     else:
         n = tensor.as_tensor_variable(n)
         if n.dtype not in tensor.integer_dtypes:
             raise TypeError("%s: length of the transformed axis must be"
                             " of type integer" % self.__class__.__name__)
         elif n.ndim != 0 or (isinstance(n, tensor.TensorConstant)
                              and n.data < 1):
             raise TypeError("%s: length of the transformed axis must be a"
                             " strictly positive scalar" %
                             self.__class__.__name__)
     return Apply(
         self,
         [a, n, axis],
         [tensor.TensorType("complex128", a.type.broadcastable)()],
     )
示例#29
0
    def make_node(self, *inp):
        multi_index = [basic.as_tensor_variable(i) for i in inp[:-1]]
        dims = basic.as_tensor_variable(inp[-1])

        for i in multi_index:
            if i.dtype not in basic.int_dtypes:
                raise TypeError(
                    f"'{i.dtype}' object cannot be interpreted as an index")
        if dims.dtype not in basic.int_dtypes:
            raise TypeError(
                f"'{dims.dtype}' object cannot be interpreted as an index")
        if dims.ndim != 1:
            raise TypeError("dims must be a 1D array")

        return Apply(
            self,
            multi_index + [dims],
            [
                basic.TensorType(
                    dtype="int64",
                    broadcastable=(False, ) * multi_index[0].ndim)()
            ],
        )
示例#30
0
    def make_node(self, rv, val):
        """Make an `Observed` random variable.

        Parameters
        ----------
        rv: RandomVariable
            The distribution from which `val` is assumed to be a sample value.
        val: Variable
            The observed value.
        """
        val = as_tensor_variable(val)

        if rv is not None:
            if not hasattr(rv,
                           "type") or rv.type.convert_variable(val) is None:
                raise TypeError(("`rv` and `val` do not have compatible types:"
                                 f" rv={rv}, val={val}"))
        else:
            rv = NoneConst.clone()

        inputs = [rv, val]

        return Apply(self, inputs, [val.type()])