Beispiel #1
0
    def make_node(
        self, x, scale, bias, estimated_mean, estimated_variance, epsilon=1e-4
    ):
        x = as_tensor_variable(x)
        scale = as_tensor_variable(scale)
        bias = as_tensor_variable(bias)
        estimated_mean = as_tensor_variable(estimated_mean)
        estimated_variance = as_tensor_variable(estimated_variance)
        epsilon = as_tensor_variable(epsilon)
        # Upcast to common dtype on the non-scalar
        # Keep as is dtype of scalar (epsilon)
        x, scale, bias, estimated_mean, estimated_variance = as_common_dtype(
            x, scale, bias, estimated_mean, estimated_variance
        )
        assert (
            x.ndim
            == scale.ndim
            == bias.ndim
            == estimated_mean.ndim
            == estimated_variance.ndim
        )

        return Apply(
            self,
            [x, scale, bias, estimated_mean, estimated_variance, epsilon],
            [x.type()],
        )
Beispiel #2
0
 def make_node(self, x):
     if self.axis.keys() and (x.ndim <= max(self.axis.keys())):
         raise ValueError("Trying to rebroadcast non-existent dimension")
     t = x.type.clone(broadcastable=[
         self.axis.get(i, b) for i, b in enumerate(x.type.broadcastable)
     ])
     return Apply(self, [x], [t()])
Beispiel #3
0
    def make_node(self, img, topgrad, shape=None):
        img = as_tensor_variable(img)
        topgrad = as_tensor_variable(topgrad)
        img, topgrad = self.as_common_dtype(img, topgrad)
        if img.type.ndim != 5:
            raise TypeError("img must be 5D tensor")
        if topgrad.type.ndim != 5:
            raise TypeError("topgrad must be 5D tensor")
        if shape is None:
            if self.subsample != (1, 1, 1) or self.border_mode == "half":
                raise ValueError(
                    "shape must be given if subsample != (1, 1, 1)"
                    ' or border_mode == "half"')
            height_width_depth = []
        else:
            height_width_depth = [
                as_tensor_variable(shape[0]).astype("int64"),
                as_tensor_variable(shape[1]).astype("int64"),
                as_tensor_variable(shape[2]).astype("int64"),
            ]

        broadcastable = [
            topgrad.type.broadcastable[1],
            img.type.broadcastable[1],
            False,
            False,
            False,
        ]
        dtype = img.type.dtype
        return Apply(
            self,
            [img, topgrad] + height_width_depth,
            [TensorType(dtype, broadcastable)()],
        )
Beispiel #4
0
    def make_node(self, activations, labels, input_lengths):
        t_activations = tt.as_tensor_variable(activations)
        # Ensure activations array is C-contiguous
        t_activations = cpu_contiguous(t_activations)

        t_labels = tt.as_tensor_variable(labels)
        t_input_lengths = tt.as_tensor_variable(input_lengths)

        if t_activations.type.dtype != "float32":
            raise TypeError("activations must use the float32 type!")

        if t_activations.ndim != 3:
            raise ValueError("activations must have 3 dimensions.")

        if t_labels.type.dtype != "int32":
            raise TypeError("labels must use the int32 type!")

        if t_labels.ndim != 2:
            raise ValueError("labels must have 2 dimensions.")

        if t_input_lengths.type.dtype != "int32":
            raise TypeError("input_lengths must use the int32 type!")

        if t_input_lengths.ndim != 1:
            raise ValueError("input_lengths must have 1 dimension.")

        costs = tt.fvector(name="ctc_cost")
        outputs = [costs]
        if self.compute_grad:
            gradients = tt.ftensor3(name="ctc_grad")
            outputs += [gradients]

        return Apply(self,
                     inputs=[t_activations, t_labels, t_input_lengths],
                     outputs=outputs)
Beispiel #5
0
    def make_node(self, ref, values, ref_dim, val_dim, *_hash):
        assert (values.ndim == 3)
        ref = as_tensor_variable(ref.astype("float32"))
        values = as_tensor_variable(values.astype("float32"))

        ref_dim = get_scalar_constant_value(ref_dim)
        val_dim = get_scalar_constant_value(val_dim)
        if "int" not in str(ref_dim.dtype) or "int" not in str(val_dim.dtype):
            raise ValueError("ref_dim and val_dim must be integers.")

        scaled_ref = ref * float(np.sqrt(2 / 3) * (ref_dim + 1))

        if len(_hash) == 0:
            hash_struct = PermutohedralHashTable()(scaled_ref, ref_dim)
        else:
            assert (len(_hash) == 6)
            hash_struct = [as_tensor_variable(v) for v in _hash]

        # Should we not do this?
        bcast = [False for _ in range(3)]
        if val_dim == 1:
            bcast[0] = True

        out_type = values.type.clone(broadcastable=bcast)

        ref_dim = constant(ref_dim, dtype="int32", name="ref_dim")
        val_dim = constant(val_dim, dtype="int32", name="val_dim")

        inputs = [ref, values, ref_dim, val_dim] + hash_struct
        return Apply(self, inputs, [out_type()])
Beispiel #6
0
 def make_node(self, x, index):
     assert isinstance(x.type, TypedListType)
     if not isinstance(index, Variable):
         if isinstance(index, slice):
             index = Constant(SliceType(), index)
             return Apply(self, [x, index], [x.type()])
         else:
             index = tt.constant(index, ndim=0, dtype="int64")
             return Apply(self, [x, index], [x.ttype()])
     if isinstance(index.type, SliceType):
         return Apply(self, [x, index], [x.type()])
     elif isinstance(index, tt.TensorVariable) and index.ndim == 0:
         assert index.dtype == "int64"
         return Apply(self, [x, index], [x.ttype()])
     else:
         raise TypeError("Expected scalar or slice as index.")
Beispiel #7
0
    def make_node(self, rv, val):
        """Make an `Observed` random variable.

        Parameters
        ----------
        rv: RandomVariable
            The distribution from which `val` is assumed to be a sample value.
        val: Variable
            The observed value.
        """
        val = as_tensor_variable(val)

        if rv is not None:
            if not hasattr(rv, "type") or rv.type.convert_variable(val) is None:
                raise TypeError(
                    (
                        "`rv` and `val` do not have compatible types:"
                        f" rv={rv}, val={val}"
                    )
                )
        else:
            rv = NoneConst.clone()

        inputs = [rv, val]

        return Apply(self, inputs, [val.type()])
Beispiel #8
0
    def make_node(self, kern, topgrad, shape=None):
        kern = as_tensor_variable(kern)
        topgrad = as_tensor_variable(topgrad)
        kern, topgrad = self.as_common_dtype(kern, topgrad)
        if self.unshared is True:
            if kern.type.ndim != 6:
                raise TypeError("kern must be 6D tensor")
        else:
            if kern.type.ndim != 4:
                raise TypeError("kern must be 4D tensor")
        if topgrad.type.ndim != 4:
            raise TypeError("topgrad must be 4D tensor")
        if shape is None:
            if self.subsample != (1, 1):
                raise ValueError("shape must be given if subsample != (1, 1)")
            height_width = []
        else:
            height_width = [
                as_tensor_variable(shape[0]).astype("int64"),
                as_tensor_variable(shape[1]).astype("int64"),
            ]

        if self.num_groups > 1:
            broadcastable = [topgrad.type.broadcastable[0], False, False, False]
        else:
            broadcastable = [
                topgrad.type.broadcastable[0],
                kern.type.broadcastable[-3],
                False,
                False,
            ]
        dtype = kern.type.dtype
        return Apply(
            self, [kern, topgrad] + height_width, [TensorType(dtype, broadcastable)()]
        )
Beispiel #9
0
 def make_node(self, *inputs):
     inputs = list(map(as_variable, inputs))
     for input in inputs:
         if not isinstance(input.type, MyType):
             raise Exception("Error 1")
         outputs = [MyType(sum([input.type.thingy for input in inputs]))()]
         return Apply(self, inputs, outputs)
Beispiel #10
0
 def make_node(self, *inputs):
     inputs = map(as_variable, inputs)
     for input in inputs:
         if not isinstance(input.type, MyType):
             print(input, input.type, type(input), type(input.type))
             raise Exception("Error 1")
     outputs = [MyVariable(sum([input.type.thingy for input in inputs]))]
     return Apply(self, inputs, outputs)
 def make_node(self, x, y):
     x = theano.tensor.as_tensor_variable(x)
     y = theano.tensor.as_tensor_variable(y)
     outdim = x.ndim
     output = theano.tensor.TensorType(dtype=theano.scalar.upcast(
         x.dtype, y.dtype),
                                       broadcastable=[False] * outdim)()
     return Apply(self, inputs=[x, y], outputs=[output])
Beispiel #12
0
 def make_node(self, *inputs):
     assert len(inputs) == self.nin
     inputs = map(as_variable, inputs)
     for input in inputs:
         if not isinstance(input.type, MyType):
             raise Exception("Error 1")
     outputs = [MyType(self.name + "_R")()]
     return Apply(self, inputs, outputs)
Beispiel #13
0
 def make_node(self, *inputs):
     assert len(inputs) == self.nin
     inputs = list(map(as_variable, inputs))
     for input in inputs:
         if input.type is not tdouble:
             raise Exception("Error 1")
     outputs = [double(self.name + "_R")]
     return Apply(self, inputs, outputs)
Beispiel #14
0
 def make_node(self, x, shape):
     if not isinstance(x, Variable):
         x = theano.tensor.as_tensor_variable(x)
     shape = theano.tensor.as_tensor_variable(shape)
     assert shape.ndim == 1
     assert shape.dtype in theano.tensor.integer_dtypes
     if isinstance(shape, theano.tensor.TensorConstant):
         assert shape.data.size == x.ndim
     return Apply(self, [x, shape], [x.type()])
Beispiel #15
0
 def make_node(self, x, index, toInsert):
     assert isinstance(x.type, TypedListType)
     assert x.ttype == toInsert.type
     if not isinstance(index, Variable):
         index = tt.constant(index, ndim=0, dtype="int64")
     else:
         assert index.dtype == "int64"
         assert isinstance(index, tt.TensorVariable) and index.ndim == 0
     return Apply(self, [x, index, toInsert], [x.type()])
Beispiel #16
0
    def make_node(self, x):
        x = as_tensor_variable(x)
        assert x.ndim == 2, "The input of qr function should be a matrix."
        q = theano.tensor.matrix(dtype=x.dtype)
        if self.mode != "raw":
            r = theano.tensor.matrix(dtype=x.dtype)
        else:
            r = theano.tensor.vector(dtype=x.dtype)

        return Apply(self, [x], [q, r])
Beispiel #17
0
 def make_node(self, M):
     M = basic.as_tensor_variable(M)
     if M.ndim != 0:
         raise TypeError(
             f"{self.__class__.__name__} only works on scalar input")
     elif M.dtype not in theano.tensor.integer_dtypes:
         # dtype is a theano attribute here
         raise TypeError(
             f"{self.__class__.__name__} only works on integer input")
     return Apply(self, [M], [basic.dvector()])
Beispiel #18
0
    def make_node(self, points, dim):
        assert (points.ndim == 3)
        points = as_tensor_variable(points.astype("float32"))

        dim = get_scalar_constant_value(dim)
        if "int" not in str(dim.dtype):
            raise ValueError("dim must be an integer.")

        dim = constant(dim, dtype="int32", name="dim")

        entries_type = TensorType("int32", broadcastable=(False, ))
        keys_type = TensorType("int16", broadcastable=(False, False))
        neib_ent_type = TensorType("int32",
                                   broadcastable=(False, False, False))
        bary_type = TensorType("float32",
                               broadcastable=points.type.broadcastable)

        valid_entries_type = TensorType("int32", broadcastable=(False, ))
        n_valid_type = TensorType("int32", broadcastable=(False, ))

        out_vars = [
            entries_type(name="hash_entries"),
            keys_type(name="hash_keys"),
            neib_ent_type(name="neighbor_entries"),
            bary_type(name="barycentric_coords"),
            valid_entries_type(name="valid_entries"),
            n_valid_type(name="n_valid")
        ]

        # Two sets of entries can't be meaningfully compared without also
        # having the corresponding keys. Since we can only define per-output
        # comparisons, we have to hope that any time someone compares two
        # tables for equality, they will check all outputs.
        out_vars[0].tag.values_eq_approx = lambda e1, e2: True
        out_vars[2].tag.values_eq_approx = lambda e1, e2: True

        # The number of valid entries between two equivalent tables may be
        # different since it includes duplicates.
        out_vars[5].tag.values_eq_approx = lambda n1, n2: True

        def keys_comparison(k1, k2):
            k1 = [tuple(k) for k in np.asarray(k1)]
            k2 = [tuple(k) for k in np.asarray(k2)]
            return set(k1) == set(k2)

        out_vars[1].tag.values_eq_approx = keys_comparison

        def valid_entries_comparison(e1, e2):
            e1 = np.asarray(e1)
            e2 = np.asarray(e2)
            return len(np.unique(e1)) == len(np.unique(e2))

        out_vars[4].tag.values_eq_approx = valid_entries_comparison

        return Apply(self, [points, dim], out_vars)
Beispiel #19
0
 def make_node(self, x, w, v, gw, gv):
     x, w, v, gw, gv = map(as_tensor_variable, (x, w, v, gw, gv))
     assert x.ndim == 2
     assert w.ndim == 1
     assert v.ndim == 2
     assert gw.ndim == 1
     assert gv.ndim == 2
     out_dtype = theano.scalar.upcast(x.dtype, w.dtype, v.dtype, gw.dtype,
                                      gv.dtype)
     out = theano.tensor.matrix(dtype=out_dtype)
     return Apply(self, [x, w, v, gw, gv], [out])
Beispiel #20
0
 def make_node(self, _x):
     warnings.warn(
         "DeprecationWarning: theano.tensor.nlinalg.AllocDiag"
         "is deprecated, please use theano.tensor.AllocDiag"
         "instead.",
         category=DeprecationWarning,
     )
     x = as_tensor_variable(_x)
     if x.type.ndim != 1:
         raise TypeError("AllocDiag only works on vectors", _x)
     return Apply(self, [x], [theano.tensor.matrix(dtype=x.type.dtype)])
Beispiel #21
0
 def make_node(self, slc, stop=None, step=None):
     # We need to accept and handle in make_node inputs the node
     # inputs to allow redoing a new op elsewhere in the graph by
     # optimization.
     if isinstance(slc, slice):
         assert stop is None
         assert step is None
         inp = [slc.start, slc.stop, slc.step]
     else:
         inp = [slc, stop, step]
     return Apply(self, list(map(as_int_none_variable, inp)), [slicetype()])
Beispiel #22
0
 def make_node(self, x):
     x = as_tensor_variable(x)
     assert x.ndim == 2
     # Numpy's linalg.eigh may return either double or single
     # presision eigenvalues depending on installed version of
     # LAPACK.  Rather than trying to reproduce the (rather
     # involved) logic, we just probe linalg.eigh with a trivial
     # input.
     w_dtype = self._numop([[np.dtype(x.dtype).type()]])[0].dtype.name
     w = theano.tensor.vector(dtype=w_dtype)
     v = theano.tensor.matrix(dtype=x.dtype)
     return Apply(self, [x], [w, v])
Beispiel #23
0
    def make_node(self, a):
        assert isinstance(a, (tuple, list))
        a2 = []
        for elem in a:
            if not isinstance(elem, Variable):
                elem = tt.as_tensor_variable(elem)
            a2.append(elem)
        if not all(a2[0].type == elem.type for elem in a2):
            raise TypeError(
                "MakeList need all input variable to be of the same type.")
        tl = TypedListType(a2[0].type)()

        return Apply(self, a2, [tl])
Beispiel #24
0
 def make_node(self, pvals, unis, n=1):
     pvals = tt.as_tensor_variable(pvals)
     unis = tt.as_tensor_variable(unis)
     if pvals.ndim != 2:
         raise NotImplementedError("pvals ndim should be 2", pvals.ndim)
     if unis.ndim != 1:
         raise NotImplementedError("unis ndim should be 1", unis.ndim)
     if self.odtype == "auto":
         odtype = pvals.dtype
     else:
         odtype = self.odtype
     out = tt.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals, unis, as_scalar(n)], [out])
Beispiel #25
0
 def make_node(self, a, val):
     a = basic.as_tensor_variable(a)
     val = basic.as_tensor_variable(val)
     if a.ndim < 2:
         raise TypeError("%s: first parameter must have at least"
                         " two dimensions" % self.__class__.__name__)
     elif val.ndim != 0:
         raise TypeError(
             f"{self.__class__.__name__}: second parameter must be a scalar"
         )
     val = basic.cast(val, dtype=upcast(a.dtype, val.dtype))
     if val.dtype != a.dtype:
         raise TypeError("%s: type of second parameter must be the same as"
                         " the first's" % self.__class__.__name__)
     return Apply(self, [a, val], [a.type()])
Beispiel #26
0
    def make_node(self, a, s=None):
        a = tt.as_tensor_variable(a)
        if a.ndim < 2:
            raise TypeError(
                "%s: input must have dimension > 2, with first dimension batches"
                % self.__class__.__name__)

        if s is None:
            s = a.shape[1:]
            s = tt.as_tensor_variable(s)
        else:
            s = tt.as_tensor_variable(s)
            if s.dtype not in tt.integer_dtypes:
                raise TypeError("%s: length of the transformed axis must be"
                                " of type integer" % self.__class__.__name__)
        return Apply(self, [a, s], [self.output_type(a)()])
Beispiel #27
0
    def make_node(self, a, s=None):
        a = tt.as_tensor_variable(a)
        if a.ndim < 3:
            raise TypeError(
                f"{self.__class__.__name__}: input must have dimension >= 3,  with "
                + "first dimension batches and last real/imag parts")

        if s is None:
            s = a.shape[1:-1]
            s = tt.set_subtensor(s[-1], (s[-1] - 1) * 2)
            s = tt.as_tensor_variable(s)
        else:
            s = tt.as_tensor_variable(s)
            if s.dtype not in tt.integer_dtypes:
                raise TypeError("%s: length of the transformed axis must be"
                                " of type integer" % self.__class__.__name__)
        return Apply(self, [a, s], [self.output_type(a)()])
Beispiel #28
0
    def make_node(self, x, dy, scale, x_mean, x_invstd, epsilon=1e-4):
        x = as_tensor_variable(x)
        dy = as_tensor_variable(dy)
        scale = as_tensor_variable(scale)
        x_mean = as_tensor_variable(x_mean)
        x_invstd = as_tensor_variable(x_invstd)
        epsilon = as_tensor_variable(epsilon)

        # Upcast to common dtype on the non-scalar
        # Keep as is dtype of scalar (epsilon)
        x, dy, scale, x_mean, x_invstd = as_common_dtype(x, dy, scale, x_mean, x_invstd)
        assert x.ndim == dy.ndim == scale.ndim == x_mean.ndim == x_invstd.ndim
        return Apply(
            self,
            [x, dy, scale, x_mean, x_invstd, epsilon],
            [x.type(), scale.type(), scale.type()],
        )
Beispiel #29
0
    def make_node(self, ten4, neib_shape, neib_step=None):
        """
        Parameters
        ----------
        ten4 : a list of lists of images
            ten4 is of shape (list 1 dim, list 2 dim, row, col).
        neib_shape
            (r,c) where r is the height of the neighborhood in rows and c is
            the width of the neighborhood in columns.
        neib_step
            (dr,dc) where dr is the number of rows to skip between patch and dc
            is the number of columns. When None, this is the same as neib_shape
            (patch are disjoint).

        Returns
        -------
        matrix
            A 2D matrix, written using the following pattern::

                idx = 0
                for i in range(list 1 dim)
                    for j in range(list 2 dim)
                        for k in <image column coordinates>
                            for l in <image row coordinates>
                                output[idx,:]
                                     = flattened version of ten4[i,j,l:l+r,k:k+c]
                                idx += 1

            .. note:: The op isn't necessarily implemented internally with these
                for loops, they're just the easiest way to describe the output
                pattern.

        """
        ten4 = tt.as_tensor_variable(ten4)
        neib_shape = tt.as_tensor_variable(neib_shape)
        if neib_step is None:
            neib_step = neib_shape
        else:
            neib_step = tt.as_tensor_variable(neib_step)

        assert ten4.ndim == 4
        assert neib_shape.ndim == 1
        assert neib_step.ndim == 1

        return Apply(self, [ten4, neib_shape, neib_step],
                     [tt.matrix(dtype=ten4.type.dtype)])
    def make_node(self, o, x, y, xIdx, yIdx, alpha=None):
        """
        Compute the dot product of the specified pieces of vectors
        and matrices.

        The parameter types are actually their expected shapes
        relative to each other.

        Parameters
        ----------
        o : xBlocks, yBlocks, xSize, ySize
        x : batch, xWin, xSize
        y : batch, yWin, ySize
        xIdx : batch, iWin
            indexes of the x blocks
        yIdx : batch, oWin
            indexes of the y blocks

        Returns
        -------
        (xBlocks, yBlocks, xSize, ySize)
            outer(x[i], y[j]) + o[i, j]

        Notes
        -----
        - `batch` is the number of examples in a minibatch (batch size).
        - `xBlocks` is the total number of blocks in x.
        - `xSize` is the size of each of these x blocks.
        - `xWin` is the number of blocks that will be used as x. Which blocks
          will be used is specified in `xIdx`.
        - `yBlocks` is the number or possible y blocks.
        - `ySize` is the size of each of these y blocks.
        - `yWin` is the number of y blocks that will actually be computed.
          Which blocks will be computed is specified in `yIdx`.

        """
        one = theano.tensor.constant(np.asarray(1.0, dtype="float32"))
        o = theano.tensor.as_tensor_variable(o)
        x = theano.tensor.as_tensor_variable(x)
        y = theano.tensor.as_tensor_variable(y)

        if alpha is None:
            alpha = one

        return Apply(self, [o, x, y, xIdx, yIdx, alpha], [o.type()])