Beispiel #1
0
 def make_node(self, dy, sm):
     dy = as_cuda_ndarray_variable(dy)
     sm = as_cuda_ndarray_variable(sm)
     assert dy.ndim == 4
     assert sm.ndim == 4
     return Apply(self, [dy, sm], [sm.type.make_variable()])
Beispiel #2
0
 def make_node(self, input):
     input = as_gpuarray_variable(input,
                                  context_name=infer_context_name(input))
     return Apply(self, [input], [input.type()])
 def make_node(self, x):
     x = as_tensor_variable(x)
     return Apply(self, [x], [x.type()])
Beispiel #4
0
 def make_node(self, a, b):
     return Apply(
         self, [scalar.as_scalar(a), scalar.as_scalar(b)], [scalar.float64()]
     )
Beispiel #5
0
 def make_node(self):
     return Apply(self, [], [scalar.uint32()])
Beispiel #6
0
 def make_node(self, dnll, sm, y_idx):
     dnll = as_gpuarray_variable(dnll)
     sm = as_gpuarray_variable(sm)
     y_idx = as_gpuarray_variable(y_idx)
     return Apply(self, [dnll, sm, y_idx], [sm.type()])
Beispiel #7
0
 def make_node(self, x, b):
     x = as_gpuarray_variable(x)
     b = as_gpuarray_variable(b)
     return Apply(self, [x, b], [x.type()])
Beispiel #8
0
 def make_node(self, A, s, m, A2, s2, m2):
     return Apply(self, [A, s, m, A2, s2, m2], [s.type()])
Beispiel #9
0
 def make_node(self, rstate, size):
     # error checking slightly redundant here, since
     # this op should not be called directly.
     #
     # call through MRG_RandomStreams instead.
     return Apply(self, [rstate, size], [rstate.type(), self.output_type()])
Beispiel #10
0
 def make_node(self, x):
     if not isinstance(x.type, CudaNdarrayType):
         raise TypeError()
     if not x.type.ndim == 4:
         raise TypeError()
     return Apply(self, [x], [x.type()])
Beispiel #11
0
 def make_node(self, x, z, gz):
     return Apply(self, [x, z, gz], [x.type()])
Beispiel #12
0
 def make_node(self, z, a, x, y, b):
     # the more complicated error checking performed by tensor.gemm is assumed to already
     # have been done
     return Apply(self, [z, a, x, y, b], [z.type()])
Beispiel #13
0
 def make_node(self):
     return Apply(self, [], [CDataType("cudnnPoolingDescriptor_t")()])
Beispiel #14
0
 def make_node(self):
     return Apply(self, [], [Generic()()])
Beispiel #15
0
 def make_node(self, value, *shape):
     res = Alloc.make_node(self, value, *shape)
     value = as_gpuarray_variable(value)
     otype = GpuArrayType(dtype=res.outputs[0].dtype,
                          broadcastable=res.outputs[0].broadcastable)
     return Apply(self, [value] + res.inputs[1:], [otype()])
Beispiel #16
0
    def make_node(self, o, W, h, inputIdx, outputIdx):
        """
        Compute the dot product of the specified pieces of vectors
        and matrices.

        The parameter types are actually their expected shapes
        relative to each other.

        Parameters
        ----------
        o : batch, oWin, oSize
            output vector
        W : iBlocks, oBlocks, iSize, oSize
            weight matrix
        h : batch, iWin, iSize
            input from lower layer (sparse)
        inputIdx : batch, iWin
            indexes of the input blocks
        outputIdx : batch, oWin
            indexes of the output blocks

        Returns
        -------
        (batch, oWin, oSize)
            dot(W[i, j], h[i]) + o[j]

        Notes
        -----
        - `batch` is the number of examples in a minibatch (batch size).
        - `iBlocks` is the total number of blocks in the input (from lower
            layer).
        - `iSize` is the size of each of these input blocks.
        - `iWin` is the number of blocks that will be used as inputs. Which
           blocks will be used is specified in `inputIdx`.
        - `oBlocks` is the number or possible output blocks.
        - `oSize` is the size of each of these output blocks.
        - `oWin` is the number of output blocks that will actually be computed.
            Which blocks will be computed is specified in `outputIdx`.

        """
        o = theano.tensor.as_tensor_variable(o)
        W = theano.tensor.as_tensor_variable(W)
        h = theano.tensor.as_tensor_variable(h)
        inputIdx = theano.tensor.as_tensor_variable(inputIdx)
        outputIdx = theano.tensor.as_tensor_variable(outputIdx)

        if o.ndim != 3:
            raise TypeError("The output o must be a 2D tensor")
        if W.ndim != 4:
            raise TypeError("The weight matrix W must be a 4D tensor")
        if h.ndim != 3:
            raise TypeError("The input h must be a 3D tensor")
        if inputIdx.ndim != 2:
            raise TypeError("The input indices inputIdx must be a 2D tensor")
        if outputIdx.ndim != 2:
            raise TypeError("The output indices outputIdx must be a 2D tensor")

        assert inputIdx.type.dtype in discrete_dtypes
        assert outputIdx.type.dtype in discrete_dtypes

        return Apply(self, [o, W, h, inputIdx, outputIdx], [o.type()])
Beispiel #17
0
 def make_node(self, x, shp):
     x = as_gpuarray_variable(x)
     res = host_from_gpu(x).reshape(shp, ndim=self.ndim)
     otype = GpuArrayType(dtype=res.dtype, broadcastable=res.broadcastable)
     return Apply(self, [x, shp], [otype()])
Beispiel #18
0
 def make_node(self, dnll, sm, y_idx):
     ctx_name = infer_context_name(dnll, sm, y_idx)
     dnll = as_gpuarray_variable(dnll, ctx_name)
     sm = as_gpuarray_variable(sm, ctx_name)
     y_idx = as_gpuarray_variable(y_idx, ctx_name)
     return Apply(self, [dnll, sm, y_idx], [sm.type()])
Beispiel #19
0
 def make_node(self, x):
     x = as_gpuarray_variable(x)
     return Apply(self, [x], [x.type()])
Beispiel #20
0
 def make_node(self, x):
     x = as_gpuarray_variable(x, infer_context_name(x))
     return Apply(self, [x], [x.type()])
Beispiel #21
0
 def make_node(self, i):
     return Apply(self, [i], [CDataType('void *', 'py_decref')()])
Beispiel #22
0
 def make_node(self, x, b):
     ctx_name = infer_context_name(x, b)
     x = as_gpuarray_variable(x, ctx_name)
     b = as_gpuarray_variable(b, ctx_name)
     return Apply(self, [x, b], [x.type()])
Beispiel #23
0
 def make_node(self, i):
     return Apply(self, [i], [CDataType("void *", "py_decref")()])
Beispiel #24
0
 def make_node(self, input):
     input = as_gpuarray_variable(input)
     return Apply(self, [input], [input.type()])
Beispiel #25
0
 def make_node(self, c):
     return Apply(self, [c], [TensorType("float32", (False,))()])
Beispiel #26
0
 def make_node(self, x, y):
     res = Dot22.make_node(self, x, y)
     x = as_gpuarray_variable(x)
     y = as_gpuarray_variable(y)
     assert x.dtype == y.dtype
     return Apply(self, [x, y], [x.type()])
Beispiel #27
0
    def make_node(self, val):
        from theano.scalar import as_scalar
        from theano import Apply

        val = as_scalar(val).astype('uint64')
        return Apply(self, [val], [self.rtype()])
 def make_node(self, input):
     res = DimShuffle.make_node(self, input)
     otype = GpuArrayType(dtype=res.outputs[0].type.dtype,
                          broadcastable=res.outputs[0].type.broadcastable)
     input = as_gpuarray_variable(input)
     return Apply(self, [input], [otype()])
Beispiel #29
0
 def make_node(self, x):
     if not isinstance(x.type, GpuArrayType):
         raise TypeError(x)
     return Apply(self, [x], [
         tensor.TensorType(dtype=x.dtype, broadcastable=x.broadcastable)()
     ])
Beispiel #30
0
 def make_node(self, x):
     x = as_cuda_ndarray_variable(x)
     assert x.ndim == 4
     return Apply(self, [x], [x.type()])