def make_node(self, dy, sm): dy = as_cuda_ndarray_variable(dy) sm = as_cuda_ndarray_variable(sm) assert dy.ndim == 4 assert sm.ndim == 4 return Apply(self, [dy, sm], [sm.type.make_variable()])
def make_node(self, input): input = as_gpuarray_variable(input, context_name=infer_context_name(input)) return Apply(self, [input], [input.type()])
def make_node(self, x): x = as_tensor_variable(x) return Apply(self, [x], [x.type()])
def make_node(self, a, b): return Apply( self, [scalar.as_scalar(a), scalar.as_scalar(b)], [scalar.float64()] )
def make_node(self): return Apply(self, [], [scalar.uint32()])
def make_node(self, dnll, sm, y_idx): dnll = as_gpuarray_variable(dnll) sm = as_gpuarray_variable(sm) y_idx = as_gpuarray_variable(y_idx) return Apply(self, [dnll, sm, y_idx], [sm.type()])
def make_node(self, x, b): x = as_gpuarray_variable(x) b = as_gpuarray_variable(b) return Apply(self, [x, b], [x.type()])
def make_node(self, A, s, m, A2, s2, m2): return Apply(self, [A, s, m, A2, s2, m2], [s.type()])
def make_node(self, rstate, size): # error checking slightly redundant here, since # this op should not be called directly. # # call through MRG_RandomStreams instead. return Apply(self, [rstate, size], [rstate.type(), self.output_type()])
def make_node(self, x): if not isinstance(x.type, CudaNdarrayType): raise TypeError() if not x.type.ndim == 4: raise TypeError() return Apply(self, [x], [x.type()])
def make_node(self, x, z, gz): return Apply(self, [x, z, gz], [x.type()])
def make_node(self, z, a, x, y, b): # the more complicated error checking performed by tensor.gemm is assumed to already # have been done return Apply(self, [z, a, x, y, b], [z.type()])
def make_node(self): return Apply(self, [], [CDataType("cudnnPoolingDescriptor_t")()])
def make_node(self): return Apply(self, [], [Generic()()])
def make_node(self, value, *shape): res = Alloc.make_node(self, value, *shape) value = as_gpuarray_variable(value) otype = GpuArrayType(dtype=res.outputs[0].dtype, broadcastable=res.outputs[0].broadcastable) return Apply(self, [value] + res.inputs[1:], [otype()])
def make_node(self, o, W, h, inputIdx, outputIdx): """ Compute the dot product of the specified pieces of vectors and matrices. The parameter types are actually their expected shapes relative to each other. Parameters ---------- o : batch, oWin, oSize output vector W : iBlocks, oBlocks, iSize, oSize weight matrix h : batch, iWin, iSize input from lower layer (sparse) inputIdx : batch, iWin indexes of the input blocks outputIdx : batch, oWin indexes of the output blocks Returns ------- (batch, oWin, oSize) dot(W[i, j], h[i]) + o[j] Notes ----- - `batch` is the number of examples in a minibatch (batch size). - `iBlocks` is the total number of blocks in the input (from lower layer). - `iSize` is the size of each of these input blocks. - `iWin` is the number of blocks that will be used as inputs. Which blocks will be used is specified in `inputIdx`. - `oBlocks` is the number or possible output blocks. - `oSize` is the size of each of these output blocks. - `oWin` is the number of output blocks that will actually be computed. Which blocks will be computed is specified in `outputIdx`. """ o = theano.tensor.as_tensor_variable(o) W = theano.tensor.as_tensor_variable(W) h = theano.tensor.as_tensor_variable(h) inputIdx = theano.tensor.as_tensor_variable(inputIdx) outputIdx = theano.tensor.as_tensor_variable(outputIdx) if o.ndim != 3: raise TypeError("The output o must be a 2D tensor") if W.ndim != 4: raise TypeError("The weight matrix W must be a 4D tensor") if h.ndim != 3: raise TypeError("The input h must be a 3D tensor") if inputIdx.ndim != 2: raise TypeError("The input indices inputIdx must be a 2D tensor") if outputIdx.ndim != 2: raise TypeError("The output indices outputIdx must be a 2D tensor") assert inputIdx.type.dtype in discrete_dtypes assert outputIdx.type.dtype in discrete_dtypes return Apply(self, [o, W, h, inputIdx, outputIdx], [o.type()])
def make_node(self, x, shp): x = as_gpuarray_variable(x) res = host_from_gpu(x).reshape(shp, ndim=self.ndim) otype = GpuArrayType(dtype=res.dtype, broadcastable=res.broadcastable) return Apply(self, [x, shp], [otype()])
def make_node(self, dnll, sm, y_idx): ctx_name = infer_context_name(dnll, sm, y_idx) dnll = as_gpuarray_variable(dnll, ctx_name) sm = as_gpuarray_variable(sm, ctx_name) y_idx = as_gpuarray_variable(y_idx, ctx_name) return Apply(self, [dnll, sm, y_idx], [sm.type()])
def make_node(self, x): x = as_gpuarray_variable(x) return Apply(self, [x], [x.type()])
def make_node(self, x): x = as_gpuarray_variable(x, infer_context_name(x)) return Apply(self, [x], [x.type()])
def make_node(self, i): return Apply(self, [i], [CDataType('void *', 'py_decref')()])
def make_node(self, x, b): ctx_name = infer_context_name(x, b) x = as_gpuarray_variable(x, ctx_name) b = as_gpuarray_variable(b, ctx_name) return Apply(self, [x, b], [x.type()])
def make_node(self, i): return Apply(self, [i], [CDataType("void *", "py_decref")()])
def make_node(self, input): input = as_gpuarray_variable(input) return Apply(self, [input], [input.type()])
def make_node(self, c): return Apply(self, [c], [TensorType("float32", (False,))()])
def make_node(self, x, y): res = Dot22.make_node(self, x, y) x = as_gpuarray_variable(x) y = as_gpuarray_variable(y) assert x.dtype == y.dtype return Apply(self, [x, y], [x.type()])
def make_node(self, val): from theano.scalar import as_scalar from theano import Apply val = as_scalar(val).astype('uint64') return Apply(self, [val], [self.rtype()])
def make_node(self, input): res = DimShuffle.make_node(self, input) otype = GpuArrayType(dtype=res.outputs[0].type.dtype, broadcastable=res.outputs[0].type.broadcastable) input = as_gpuarray_variable(input) return Apply(self, [input], [otype()])
def make_node(self, x): if not isinstance(x.type, GpuArrayType): raise TypeError(x) return Apply(self, [x], [ tensor.TensorType(dtype=x.dtype, broadcastable=x.broadcastable)() ])
def make_node(self, x): x = as_cuda_ndarray_variable(x) assert x.ndim == 4 return Apply(self, [x], [x.type()])