def ensure_float(val, name): if not isinstance(val, Variable): val = constant(val) if hasattr(val, 'ndim') and val.ndim == 0: val = as_scalar(val) if not isinstance(val.type, theano.scalar.Scalar): raise TypeError("%s: expected a scalar value" % (name,)) if not val.type.dtype == 'float32': raise TypeError("%s: type is not float32" % (name,)) return val
def wrapper(*args): x = as_sparse_variable(args[0]) xs = [scalar.as_scalar(arg) for arg in args[1:]] data, ind, ptr, shape = csm_properties(x) data = tensor_op(data, *xs) return CSM(x.format)(data, ind, ptr, shape)
def ensure_float(val, name): if not isinstance(val, Variable): val = constant(val) if hasattr(val, 'ndim') and val.ndim == 0: val = as_scalar(val) if not isinstance(val.type, theano.scalar.Scalar): raise TypeError("%s: expected a scalar value" % (name, )) if not val.type.dtype == 'float32': raise TypeError("%s: type is not float32" % (name, )) return val
def make_node(self, kern, topgrad, desc, h, w): kern = as_cuda_ndarray_variable(kern) topgrad = as_cuda_ndarray_variable(topgrad) if kern.type.ndim != 4: raise TypeError('kern must be 4D tensor') if topgrad.type.ndim != 4: raise TypeError('topgrad must be 4D tensor') if not isinstance(desc.type, CDataType) \ or desc.type.ctype != 'cudnnConvolutionDescriptor_t': raise TypeError('desc must be cudnnConvolutionDescriptor_t') h = as_scalar(h) w = as_scalar(w) broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1], False, False] return Apply(self, [kern, topgrad, desc, h, w], [CudaNdarrayType(broadcastable)()])
def ensure_dt(val, default, name, dtype): if val is None: val = default.clone() if not isinstance(val, Variable): val = constant(val) if hasattr(val, 'ndim') and val.ndim == 0: val = as_scalar(val) if not isinstance(val.type, theano.scalar.Scalar): raise TypeError("%s: expected a scalar value" % (name,)) if not val.type.dtype == dtype: val = val.astype(dtype) return val
def make_node(self, pvals, unis, n=1): pvals = tt.as_tensor_variable(pvals) unis = tt.as_tensor_variable(unis) if pvals.ndim != 2: raise NotImplementedError("pvals ndim should be 2", pvals.ndim) if unis.ndim != 1: raise NotImplementedError("unis ndim should be 1", unis.ndim) if self.odtype == "auto": odtype = "int64" else: odtype = self.odtype out = tt.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable) return Apply(self, [pvals, unis, as_scalar(n)], [out])
def make_node(self, pvals, unis, n=1): pvals = T.as_tensor_variable(pvals) unis = T.as_tensor_variable(unis) if pvals.ndim != 2: raise NotImplementedError('pvals ndim should be 2', pvals.ndim) if unis.ndim != 1: raise NotImplementedError('unis ndim should be 1', unis.ndim) if self.odtype == 'auto': odtype = pvals.dtype else: odtype = self.odtype out = T.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable) return Apply(self, [pvals, unis, as_scalar(n)], [out])
def make_node(self, pvals, unis, n): assert pvals.dtype == "float32" assert unis.dtype == "float32" ctx_name = infer_context_name(pvals, unis) pvals = as_gpuarray_variable(pvals, ctx_name) unis = as_gpuarray_variable(unis, ctx_name) if pvals.ndim != 2: raise NotImplementedError("pvals ndim should be 2", pvals.ndim) if unis.ndim != 1: raise NotImplementedError("unis ndim should be 1", unis.ndim) if self.odtype == "auto": odtype = "int64" else: odtype = self.odtype assert odtype == "int64", odtype br = (pvals.broadcastable[1], pvals.broadcastable[0]) out = GpuArrayType(broadcastable=br, dtype=odtype, context_name=ctx_name)() return Apply(self, [pvals, unis, as_scalar(n)], [out])
def make_node(self, pvals, unis, n): assert pvals.dtype == 'float32' assert unis.dtype == 'float32' ctx_name = infer_context_name(pvals, unis) pvals = as_gpuarray_variable(pvals, ctx_name) unis = as_gpuarray_variable(unis, ctx_name) if pvals.ndim != 2: raise NotImplementedError('pvals ndim should be 2', pvals.ndim) if unis.ndim != 1: raise NotImplementedError('unis ndim should be 1', unis.ndim) if self.odtype == 'auto': odtype = 'int64' else: odtype = self.odtype assert odtype == 'int64', odtype br = (pvals.broadcastable[1], pvals.broadcastable[0]) out = GpuArrayType(broadcastable=br, dtype=odtype, context_name=ctx_name)() return Apply(self, [pvals, unis, as_scalar(n)], [out])
def make_node(self, val): from theano.scalar import as_scalar from theano import Apply val = as_scalar(val).astype('uint64') return Apply(self, [val], [self.rtype()])
def make_node(self, a, b): return Apply(self, [scalar.as_scalar(a), scalar.as_scalar(b)], [scalar.float64()])
def make_node(self, a, b): return Apply( self, [scalar.as_scalar(a), scalar.as_scalar(b)], [scalar.float64()] )
def make_node(self, x, scal): x = as_tensor_variable(x) scal = as_scalar(scal) return Apply(self, [x, scal], [x.type()])
def make_node(self, input): input = scalar.as_scalar(input) output = input.type() return Apply(self, [input], [output])
def make_node(self, x): x = t.as_scalar(x) return theano.Apply(self, [x], [x.type()])