Пример #1
0
 def make_node(self, input, axis=-1):
     input = theano.tensor.as_tensor_variable(input)
     if (axis is None
             or (isinstance(axis, theano.Constant) and axis.data is None)):
         axis = theano.Constant(theano.gof.generic, None)
         bcast = [False]
     else:
         axis = theano.tensor.as_tensor_variable(axis)
         bcast = input.type.broadcastable
     return theano.Apply(
         self, [input, axis],
         [theano.tensor.TensorType(dtype="int64", broadcastable=bcast)()])
Пример #2
0
    def make_node(self, Z, c, y0, i, freq, W_re, *args):
        """
    :param Z: {input,output,forget} gate + cell state. 3d (time,batch,dim*4)
    :param c: initial cell state. 2d (batch,dim)
    :param y0: output of t = -1 (for recursion at t = 0). 2d (batch,dim)
    :param i: index. 2d (time,batch) -> 0 or 1
    :param W_re: recurrent matrix. 2d (dim,dim*4)
    :param freq: call frequency to custom function. int
    :param args: custom_inputs + initial_state_vars: other inputs for the custom function
    """
        from returnn.util.basic import have_gpu
        assert have_gpu()

        assert len(args) == self._get_num_custom_vars(
        ) + self._get_num_state_vars(), self.recurrent_transform
        custom_inputs = args[:self._get_num_custom_vars()]
        initial_state_vars = args[self._get_num_custom_vars():]

        custom_inputs = [
            gpu_contiguous(as_cuda_ndarray_variable(x)) for x in custom_inputs
        ]
        initial_state_vars = [
            gpu_contiguous(as_cuda_ndarray_variable(x))
            for x in initial_state_vars
        ]
        Z = gpu_contiguous(as_cuda_ndarray_variable(Z))
        c = gpu_contiguous(as_cuda_ndarray_variable(c))
        y0 = gpu_contiguous(as_cuda_ndarray_variable(y0))
        i = gpu_contiguous(as_cuda_ndarray_variable(T.cast(i, 'float32')))
        W_re = gpu_contiguous(as_cuda_ndarray_variable(W_re))
        self.freq = gpu_contiguous(as_cuda_ndarray_variable(freq))
        assert Z.dtype == "float32"
        assert c.dtype == "float32"
        assert y0.dtype == "float32"
        assert W_re.dtype == "float32"
        for x in custom_inputs:
            assert x.dtype == "float32"
        for x in initial_state_vars:
            assert x.dtype == "float32"
        assert Z.ndim == 3
        assert c.ndim == 2
        assert y0.ndim == 2
        assert i.ndim == 2
        assert W_re.ndim == 2

        seq_state_vars = [
            self._seq_var_for_initial_state_var(x) for x in initial_state_vars
        ]
        return theano.Apply(
            self,
            [Z, c, y0, i, freq, W_re] + custom_inputs + initial_state_vars,
            # results: (output) Y, (gates and cell state) H, (final cell state) d, state vars sequences
            [Z.type(), Z.type(), c.type()] + seq_state_vars)
Пример #3
0
    def make_node(self, x):
        assert x.dtype == 'float32'
        if not isinstance(x.type, CudaNdarrayType):
            raise TypeError('x must be a CudaNdarrayType', x)

        if x.ndim > GpuCumsum.SUPPORTED_NDIMS:
            raise NotImplementedError('Only cumsum on 1D, 2D and 3D array are supported right now!')

        if self.axis >= x.ndim or self.axis < -x.ndim:
            raise ValueError('axis(={1}) out of bounds'.format(self.axis))

        return theano.Apply(self, [x], [x.type()])
Пример #4
0
    def make_node(self, V, d, WShape, dCdH):
        V_ = T.as_tensor_variable(V)
        d_ = T.as_tensor_variable(d)
        WShape_ = T.as_tensor_variable(WShape)
        dCdH_ = T.as_tensor_variable(dCdH)

        return theano.Apply(
            self,
            inputs=[V_, d_, WShape_, dCdH_],
            outputs=[
                T.TensorType(V_.dtype, (False, False, False, False, False))()
            ])
Пример #5
0
 def make_node(self, posteriors, seq_lengths):
   """
   :param numpy.ndarray posteriors:
   :param numpy.ndarray seq_lengths:
   :return:
   """
   # We get the posteriors here from the Network output function,
   # which should be softmax.
   posteriors = theano.tensor.as_tensor_variable(posteriors)
   seq_lengths = theano.tensor.as_tensor_variable(seq_lengths)
   assert seq_lengths.ndim == 1  # vector of seqs lengths
   return theano.Apply(op=self, inputs=[posteriors, seq_lengths], outputs=[T.fvector(), posteriors.type()])
Пример #6
0
  def make_node(self, *args):
    """
    For Theano.

    :param args:
    :return:
    """
    assert len(args) == len(self.in_info)
    args = [self._convert_input_var(arg, info) for arg, info in zip(args, self.in_info)]
    outputs = [self.tensor_type(dtype=info.get("dtype", "float32"), ndim=info["ndim"])()
               for info in self.out_info]
    return theano.Apply(self, args, outputs)
Пример #7
0
    def make_node(self, inp1, inp2):
        inp1 = basic_ops.gpu_contiguous(
            basic_ops.as_cuda_ndarray_variable(inp1))
        inp2 = basic_ops.gpu_contiguous(
            basic_ops.as_cuda_ndarray_variable(inp2))

        assert inp1.dtype == "float32"
        assert inp2.dtype == "float32"
        assert inp1.ndim == 4  # (batch, a, b, real/imag)
        assert inp2.ndim == 4

        return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
Пример #8
0
 def make_node(self, x, y, len_x, len_y):
     x = theano.tensor.as_tensor_variable(x)
     assert x.ndim == 3  # tensor: nframes x nseqs x dim
     y = theano.tensor.as_tensor_variable(y)
     assert y.ndim == 2  # matrix: nseqs x max_labelling_length
     len_x = theano.tensor.as_tensor_variable(len_x)
     len_y = theano.tensor.as_tensor_variable(len_y)
     assert len_x.ndim == 1  # vector of seqs lengths
     assert len_x.dtype == "int32"
     assert len_y.ndim == 1  # vector of seqs lengths
     assert len_y.dtype == "int32"
     return theano.Apply(self, [x, y, len_x, len_y], [T.ftensor3()])
Пример #9
0
    def make_node(self, inp1, inp2):
        inp1 = cuda.basic_ops.gpu_contiguous(
            cuda.basic_ops.as_cuda_ndarray_variable(inp1))
        inp2 = cuda.basic_ops.gpu_contiguous(
            cuda.basic_ops.as_cuda_ndarray_variable(inp2))

        assert inp1.dtype == "float32"
        assert inp2.dtype == "float32"
        assert inp1.ndim == 2
        assert inp2.ndim == 2

        return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
Пример #10
0
    def make_node(self, x, y, seq_lengths):
        x = theano.tensor.as_tensor_variable(x)
        assert x.ndim == 3  # tensor: nframes x nseqs x dim
        y = theano.tensor.as_tensor_variable(y)
        assert y.ndim == 2  # matrix: nseqs x max_labelling_length
        seq_lengths = theano.tensor.as_tensor_variable(seq_lengths)
        assert seq_lengths.ndim == 1  # vector of seqs lengths
        assert seq_lengths.dtype == "int32"

        return theano.Apply(
            self, [x, y, seq_lengths],
            [T.fvector(), T.ftensor3(), T.fmatrix()])
Пример #11
0
 def make_node(self, sizes, factor):
     sizes = T.as_tensor_variable(sizes)
     assert sizes.dtype == "float32"
     assert sizes.ndim == 2
     factor = T.as_tensor_variable(factor)
     assert factor.dtype == "float32"
     assert factor.ndim == 0
     return theano.Apply(
         self, [sizes, factor],
         [sizes.type(),
          sizes.type(),
          sizes.type(),
          T.fvector()])
Пример #12
0
 def make_node(self, *inputs):
     in_args = [as_tensor_variable(i) for i in inputs]
     if any(i.dtype != "float64" for i in in_args):
         raise ValueError("float64 dtypes are required for LimbDark op; "
                          "got:\n{0}".format([i.dtype for i in inputs]))
     out_args = [
         in_args[1].type(),
         tt.TensorType(dtype="float64",
                       broadcastable=[False] * (in_args[1].ndim + 1))(),
         in_args[1].type(),
         in_args[2].type(),
     ]
     return theano.Apply(self, in_args, out_args)
Пример #13
0
    def make_node(self, W, b, d, H, RShape=None):
        W_ = as_cuda_ndarray_variable(W)
        b_ = as_cuda_ndarray_variable(b)
        d_ = T.as_tensor_variable(d)
        H_ = as_cuda_ndarray_variable(H)
        if RShape:
            RShape_ = T.as_tensor_variable(RShape)
        else:
            RShape_ = T.as_tensor_variable([-1, -1, -1])

        return theano.Apply(self, inputs=[W_, b_, d_, H_, RShape_],
                            outputs=[CudaNdarrayType(dtype=H_.dtype,
                                                     broadcastable=(False,)*5)()])
Пример #14
0
 def make_node(self, A):
     ctx_name = infer_context_name(A)
     A = as_gpuarray_variable(A, ctx_name)
     A = gpu_contiguous(A)
     if A.ndim != 2:
         raise LinAlgError("Matrix rank error")
     if A.dtype != "float32":
         raise TypeError("only `float32` is supported for now")
     if self.complete:
         return theano.Apply(
             self,
             [A],
             # return R, Q
             [A.type(), A.type()],
         )
     else:
         return theano.Apply(
             self,
             [A],
             # return R
             [A.type()],
         )
Пример #15
0
    def make_node(self, inps):
        inp, dy = inps

        if dy.type.ndim not in (1, 2) \
                or dy.type.dtype not in T.float_dtypes:
            raise ValueError('dy must be 1-d or 2-d tensor of floats. Got ',
                             dy.type)
        if dy.ndim == 1:
            dy = T.shape_padleft(dy, n_ones=1)
        elif dy.ndim == 2:
            if inp.ndim == 1:
                dy = T.sum(dy, axis=0)

        return theano.Apply(self, [dy], [dy.type()])
Пример #16
0
 def make_node(self, x, y, rcond):
     x = theano.tensor.as_tensor_variable(x)
     y = theano.tensor.as_tensor_variable(y)
     rcond = theano.tensor.as_tensor_variable(rcond)
     return theano.Apply(
         self,
         [x, y, rcond],
         [
             theano.tensor.matrix(),
             theano.tensor.dvector(),
             theano.tensor.lscalar(),
             theano.tensor.dvector(),
         ],
     )
Пример #17
0
    def make_node(self, V, d, WShape, dCdH):
        """
        :param V: visible
        :param d: strides
        :param WShape: shapes of the weights -> shape of this op output
        :param dCdH: other input with what V will be convolved.
        """
        V_ = as_cuda_ndarray_variable(V)
        d_ = T.as_tensor_variable(d)
        WShape_ = T.as_tensor_variable(WShape)
        dCdH_ = as_cuda_ndarray_variable(dCdH)

        return theano.Apply(self, inputs=[V_, d_, WShape_, dCdH_],
                            outputs = [ CudaNdarrayType(dtype=V_.dtype, broadcastable=(False,)*5)()])
Пример #18
0
 def make_node(self, X, DY, regions_y, regions_x):
     X = gpu_contiguous(as_cuda_ndarray_variable(X))
     assert X.dtype == "float32"
     assert X.ndim == 4
     DY = gpu_contiguous(as_cuda_ndarray_variable(DY))
     assert DY.dtype == "float32"
     assert DY.ndim == 4
     regions_y = gpu_contiguous(as_cuda_ndarray_variable(regions_y))
     assert regions_y.dtype == "float32"
     assert regions_y.ndim == 2
     regions_x = gpu_contiguous(as_cuda_ndarray_variable(regions_x))
     assert regions_x.dtype == "float32"
     assert regions_x.ndim == 2, regions_x.ndim
     return theano.Apply(self, [X, DY, regions_y, regions_x], [X.type()])
Пример #19
0
    def make_node(self, activations, labels, input_lengths):
        context_name = infer_context_name(activations)
        t_activations = as_gpuarray_variable(activations,
                                             context_name=context_name)
        # Ensure activations array is C-contiguous
        t_activations = gpu_contiguous(t_activations)

        # Labels and input lengths are always on the CPU
        t_labels = tt.as_tensor_variable(labels)
        t_input_lengths = tt.as_tensor_variable(input_lengths)

        if t_activations.type.dtype != "float32":
            raise TypeError("activations must use the float32 type.")

        if t_activations.ndim != 3:
            raise ValueError("activations must have 3 dimensions.")

        if t_labels.type.dtype != "int32":
            raise TypeError("labels must use the int32 type.")

        if t_labels.ndim != 2:
            raise ValueError("labels must have 2 dimensions.")

        if t_input_lengths.type.dtype != "int32":
            raise TypeError("input_lengths must use the int32 type.")

        if t_input_lengths.ndim != 1:
            raise ValueError("input_lengths must have 1 dimension.")

        costs = GpuArrayType(dtype="float32",
                             broadcastable=(False, ),
                             context_name=context_name)()
        outputs = [costs]

        if self.compute_grad:
            gradients = GpuArrayType(
                dtype="float32",
                broadcastable=(
                    False,
                    False,
                    False,
                ),
                context_name=context_name,
            )()
            outputs += [gradients]

        return theano.Apply(self,
                            inputs=[t_activations, t_labels, t_input_lengths],
                            outputs=outputs)
Пример #20
0
    def make_node(self,
            #alpha, j_bias,
            voltage, refractory_time,
            input_current, dt):
        orig_inputs = [voltage, refractory_time,
                input_current, dt]
        tsor_inputs = map(theano.tensor.as_tensor_variable,
                orig_inputs)

        new_voltage = voltage.type()
        new_refractory_time = refractory_time.type()
        spiked = voltage.type()  # XXX should be ints?
        outputs = [new_voltage, new_refractory_time, spiked]

        return theano.Apply(self, tsor_inputs, outputs)
Пример #21
0
 def make_node(self, x, x2, x3, x4):
     # check that the theano version has support for __props__.
     # This next line looks like it has a typo,
     # but it's actually a way to detect the theano version
     # is sufficiently recent to support the use of __props__.
     assert hasattr(
         self, '_props'
     ), "Your version of theano is too old to support __props__."
     x = tensor.as_tensor_variable(x)
     x2 = tensor.as_tensor_variable(x2)
     x3 = tensor.as_tensor_variable(x3)
     x4 = tensor.as_tensor_variable(x4)
     return theano.Apply(self, [x, x2, x3, x4],
                         [tensor.fvector().type(),
                          tensor.imatrix().type()])
Пример #22
0
  def make_node(self, Z, V_h, c, i):
    Z = gpu_contiguous(as_cuda_ndarray_variable(Z))
    V_h = gpu_contiguous(as_cuda_ndarray_variable(V_h))
    c = gpu_contiguous(as_cuda_ndarray_variable(c))
    i = gpu_contiguous(as_cuda_ndarray_variable(i))
    assert Z.dtype == "float32"
    assert V_h.dtype == "float32"
    assert c.dtype == 'float32'
    assert c.ndim == 2
    assert Z.ndim == 2
    assert i.ndim == 1
    assert V_h.ndim == 2

    #results: output Y, (gates and cell state) H
    return theano.Apply(self, [Z, V_h, c, i], [Z.type(), Z.type(), c.type()])
Пример #23
0
 def make_node(self, X, regions_y, regions_x, out_size):
     X = gpu_contiguous(as_cuda_ndarray_variable(X))
     assert X.dtype == "float32"
     assert X.ndim == 4
     regions_y = gpu_contiguous(as_cuda_ndarray_variable(regions_y))
     assert regions_y.dtype == "float32"
     assert regions_y.ndim == 2
     regions_x = gpu_contiguous(as_cuda_ndarray_variable(regions_x))
     assert regions_x.dtype == "float32"
     assert regions_x.ndim == 2, regions_x.ndim
     out_size = T.as_tensor_variable(out_size)
     assert out_size.dtype == "float32"
     assert out_size.ndim == 1
     return theano.Apply(self, [X, regions_y, regions_x, out_size],
                         [X.type()])
Пример #24
0
    def make_node(self, V_f, V_b, c_f, c_b, idx_f, idx_b, Dd_f, Dd_b, DY_f,
                  DY_b, Y_f, Y_b, H_f, H_b):
        V_f = gpu_contiguous(as_cuda_ndarray_variable(V_f))
        V_b = gpu_contiguous(as_cuda_ndarray_variable(V_b))
        c_f = gpu_contiguous(as_cuda_ndarray_variable(c_f))
        c_b = gpu_contiguous(as_cuda_ndarray_variable(c_b))
        DY_f = gpu_contiguous(as_cuda_ndarray_variable(DY_f))
        DY_b = gpu_contiguous(as_cuda_ndarray_variable(DY_b))
        idx_f = gpu_contiguous(
            as_cuda_ndarray_variable(T.cast(idx_f, 'float32')))
        idx_b = gpu_contiguous(
            as_cuda_ndarray_variable(T.cast(idx_b, 'float32')))
        Dd_f = gpu_contiguous(as_cuda_ndarray_variable(Dd_f))
        Dd_b = gpu_contiguous(as_cuda_ndarray_variable(Dd_b))
        assert V_f.dtype == "float32"
        assert V_b.dtype == "float32"
        assert DY_f.dtype == 'float32'
        assert DY_b.dtype == 'float32'
        assert Y_f.dtype == 'float32'
        assert Y_b.dtype == 'float32'
        assert H_f.dtype == 'float32'
        assert H_b.dtype == 'float32'
        assert c_f.dtype == 'float32'
        assert c_b.dtype == 'float32'
        assert V_f.ndim == 2
        assert V_b.ndim == 2
        assert DY_f.ndim == 3
        assert DY_b.ndim == 3
        assert Y_f.ndim == 3
        assert Y_b.ndim == 3
        assert H_f.ndim == 3
        assert H_b.ndim == 3
        assert c_f.ndim == 2
        assert c_b.ndim == 2
        assert idx_f.ndim == 2
        assert idx_b.ndim == 2

        return theano.Apply(self, [
            V_f, V_b, c_f, c_b, idx_f, idx_b, Dd_f, Dd_b, DY_f, DY_b, Y_f, Y_b,
            H_f, H_b
        ], [
            H_f.type(),
            H_b.type(),
            V_f.type(),
            V_b.type(),
            c_f.type(),
            c_b.type()
        ])
Пример #25
0
        def make_node(self, img):
            assert hasattr(self, '_props'), "Your version of theano is too old " \
                "to support __props__."
            # Theano's CudaNdArray support strides. But this require writing C
            # code calling the functions of sandbox/cuda/cuda_ndarray.cuh
            # and passing all the strides to the kernel to do the correct
            # computation. Instead, enforce contiguous arrays.
            cu_img = cuda.basic_ops.gpu_contiguous(
                cuda.basic_ops.as_cuda_ndarray_variable(img))
            assert cu_img.dtype == 'float32'

            # N x nchannels x nbins
            output = cuda.CudaNdarrayType(
                dtype='float32',
                broadcastable=[False, False, False])()
            return theano.Apply(self, [cu_img], [output])
Пример #26
0
 def make_node(self, array, start_idxs, batch_lens, beam_width, pad_left,
               pad_right):
     array = T.as_tensor_variable(array)
     start_idxs = T.as_tensor_variable(start_idxs)
     batch_lens = T.as_tensor_variable(batch_lens)
     beam_width = T.as_tensor_variable(beam_width)
     pad_left = T.as_tensor_variable(pad_left)
     pad_right = T.as_tensor_variable(pad_right)
     assert array.ndim >= 2
     assert start_idxs.ndim == 1
     assert batch_lens.ndim == 1
     assert beam_width.ndim == 0
     return theano.Apply(
         self,
         [array, start_idxs, batch_lens, beam_width, pad_left, pad_right],
         [array.type()])
Пример #27
0
 def make_node(self, V, W, b, d):
     """
         :param V: Visible unit, input
         :param W: Weights, filter
         :param b: bias
         :param d: strides when moving the filter over the input
     """
     V_ = as_cuda_ndarray_variable(V)
     W_ = as_cuda_ndarray_variable(W)
     b_ = as_cuda_ndarray_variable(b)
     d_ = T.as_tensor_variable(d)
     broad = (V_.broadcastable[0], W_.broadcastable[0], False, False, False)
     return theano.Apply(
         self,
         inputs=[V_, W_, b_, d_],
         outputs=[CudaNdarrayType(dtype=V_.dtype, broadcastable=broad)()])
Пример #28
0
    def make_node(self, x1, x2, x3, x4):
        assert hasattr(
            self, '_props'
        ), "Your version of theano is too old to support __props__."
        x1 = tensor.as_tensor_variable(x1)
        x2 = tensor.as_tensor_variable(x2)
        x3 = tensor.as_tensor_variable(x3)
        x4 = tensor.as_tensor_variable(x4)
        out = [
            tensor.fmatrix().type(),
            tensor.itensor3().type(),
            tensor.imatrix().type(),
            tensor.fmatrix().type()
        ]

        return theano.Apply(self, [x1, x2, x3, x4], out)
Пример #29
0
    def make_node(self, W, b, d, H, RShape=None):
        """
        :param W: Weights, filter
        :param b: bias, shape == (W.shape[0],)
        :param d: strides when moving the filter over the input
        :param H: The output of Conv3D
        """
        W_ = T.as_tensor_variable(W)
        b_ = T.as_tensor_variable(b)
        d_ = T.as_tensor_variable(d)
        H_ = T.as_tensor_variable(H)
        if RShape:
            RShape_ = T.as_tensor_variable(RShape)
        else:
            RShape_ = T.as_tensor_variable([-1, -1, -1])

        return theano.Apply(self, inputs=[W_,b_,d_,H_, RShape_], outputs = [ T.TensorType(H_.dtype, (False,False,False,False,False))() ] )
Пример #30
0
    def make_node(self, inputs):
        inlist = []

        self.fixed_values = OrderedDict()
        self.varnames = []

        for k, v in inputs.items():
            varname = k.split('_')[-1]   # split of dataset naming
            if isinstance(v, FreeRV):
                self.varnames.append(varname)
                inlist.append(tt.as_tensor_variable(v))
            else:
                self.fixed_values[varname] = v

        outv = tt.as_tensor_variable(num.zeros((2, 2)))
        outlist = [outv.type()]
        return theano.Apply(self, inlist, outlist)