Пример #1
0
def ensure_float(val, name):
    if not isinstance(val, Variable):
        val = constant(val)
    if hasattr(val, 'ndim') and val.ndim == 0:
        val = as_scalar(val)
    if not isinstance(val.type, theano.scalar.Scalar):
        raise TypeError("%s: expected a scalar value" % (name,))
    if not val.type.dtype == 'float32':
        raise TypeError("%s: type is not float32" % (name,))
    return val
Пример #2
0
        def wrapper(*args):
            x = as_sparse_variable(args[0])
            
            xs = [scalar.as_scalar(arg) for arg in args[1:]]

            data, ind, ptr, shape = csm_properties(x)

            data = tensor_op(data, *xs)

            return CSM(x.format)(data, ind, ptr, shape)
Пример #3
0
        def wrapper(*args):
            x = as_sparse_variable(args[0])

            xs = [scalar.as_scalar(arg) for arg in args[1:]]

            data, ind, ptr, shape = csm_properties(x)

            data = tensor_op(data, *xs)

            return CSM(x.format)(data, ind, ptr, shape)
Пример #4
0
def ensure_float(val, name):
    if not isinstance(val, Variable):
        val = constant(val)
    if hasattr(val, 'ndim') and val.ndim == 0:
        val = as_scalar(val)
    if not isinstance(val.type, theano.scalar.Scalar):
        raise TypeError("%s: expected a scalar value" % (name, ))
    if not val.type.dtype == 'float32':
        raise TypeError("%s: type is not float32" % (name, ))
    return val
Пример #5
0
Файл: dnn.py Проект: c0g/Theano
    def make_node(self, kern, topgrad, desc, h, w):
        kern = as_cuda_ndarray_variable(kern)
        topgrad = as_cuda_ndarray_variable(topgrad)
        if kern.type.ndim != 4:
            raise TypeError('kern must be 4D tensor')
        if topgrad.type.ndim != 4:
            raise TypeError('topgrad must be 4D tensor')

        if not isinstance(desc.type, CDataType) \
                or desc.type.ctype != 'cudnnConvolutionDescriptor_t':
            raise TypeError('desc must be cudnnConvolutionDescriptor_t')

        h = as_scalar(h)
        w = as_scalar(w)

        broadcastable = [topgrad.type.broadcastable[0],
                         kern.type.broadcastable[1],
                         False, False]

        return Apply(self, [kern, topgrad, desc, h, w],
                     [CudaNdarrayType(broadcastable)()])
Пример #6
0
def ensure_dt(val, default, name, dtype):
    if val is None:
        val = default.clone()
    if not isinstance(val, Variable):
        val = constant(val)
    if hasattr(val, 'ndim') and val.ndim == 0:
        val = as_scalar(val)
    if not isinstance(val.type, theano.scalar.Scalar):
        raise TypeError("%s: expected a scalar value" % (name,))
    if not val.type.dtype == dtype:
        val = val.astype(dtype)
    return val
Пример #7
0
    def make_node(self, kern, topgrad, desc, h, w):
        kern = as_cuda_ndarray_variable(kern)
        topgrad = as_cuda_ndarray_variable(topgrad)
        if kern.type.ndim != 4:
            raise TypeError('kern must be 4D tensor')
        if topgrad.type.ndim != 4:
            raise TypeError('topgrad must be 4D tensor')

        if not isinstance(desc.type, CDataType) \
                or desc.type.ctype != 'cudnnConvolutionDescriptor_t':
            raise TypeError('desc must be cudnnConvolutionDescriptor_t')

        h = as_scalar(h)
        w = as_scalar(w)

        broadcastable = [topgrad.type.broadcastable[0],
                         kern.type.broadcastable[1],
                         False, False]

        return Apply(self, [kern, topgrad, desc, h, w],
                     [CudaNdarrayType(broadcastable)()])
Пример #8
0
def ensure_dt(val, default, name, dtype):
    if val is None:
        val = default.clone()
    if not isinstance(val, Variable):
        val = constant(val)
    if hasattr(val, 'ndim') and val.ndim == 0:
        val = as_scalar(val)
    if not isinstance(val.type, theano.scalar.Scalar):
        raise TypeError("%s: expected a scalar value" % (name,))
    if not val.type.dtype == dtype:
        val = val.astype(dtype)
    return val
Пример #9
0
 def make_node(self, pvals, unis, n=1):
     pvals = tt.as_tensor_variable(pvals)
     unis = tt.as_tensor_variable(unis)
     if pvals.ndim != 2:
         raise NotImplementedError("pvals ndim should be 2", pvals.ndim)
     if unis.ndim != 1:
         raise NotImplementedError("unis ndim should be 1", unis.ndim)
     if self.odtype == "auto":
         odtype = "int64"
     else:
         odtype = self.odtype
     out = tt.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals, unis, as_scalar(n)], [out])
Пример #10
0
 def make_node(self, pvals, unis, n=1):
     pvals = T.as_tensor_variable(pvals)
     unis = T.as_tensor_variable(unis)
     if pvals.ndim != 2:
         raise NotImplementedError('pvals ndim should be 2', pvals.ndim)
     if unis.ndim != 1:
         raise NotImplementedError('unis ndim should be 1', unis.ndim)
     if self.odtype == 'auto':
         odtype = pvals.dtype
     else:
         odtype = self.odtype
     out = T.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals, unis, as_scalar(n)], [out])
Пример #11
0
 def make_node(self, pvals, unis, n=1):
     pvals = T.as_tensor_variable(pvals)
     unis = T.as_tensor_variable(unis)
     if pvals.ndim != 2:
         raise NotImplementedError('pvals ndim should be 2', pvals.ndim)
     if unis.ndim != 1:
         raise NotImplementedError('unis ndim should be 1', unis.ndim)
     if self.odtype == 'auto':
         odtype = pvals.dtype
     else:
         odtype = self.odtype
     out = T.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals, unis, as_scalar(n)], [out])
Пример #12
0
    def make_node(self, pvals, unis, n):
        assert pvals.dtype == "float32"
        assert unis.dtype == "float32"
        ctx_name = infer_context_name(pvals, unis)

        pvals = as_gpuarray_variable(pvals, ctx_name)
        unis = as_gpuarray_variable(unis, ctx_name)

        if pvals.ndim != 2:
            raise NotImplementedError("pvals ndim should be 2", pvals.ndim)
        if unis.ndim != 1:
            raise NotImplementedError("unis ndim should be 1", unis.ndim)
        if self.odtype == "auto":
            odtype = "int64"
        else:
            odtype = self.odtype
        assert odtype == "int64", odtype
        br = (pvals.broadcastable[1], pvals.broadcastable[0])
        out = GpuArrayType(broadcastable=br, dtype=odtype, context_name=ctx_name)()

        return Apply(self, [pvals, unis, as_scalar(n)], [out])
Пример #13
0
    def make_node(self, pvals, unis, n):
        assert pvals.dtype == 'float32'
        assert unis.dtype == 'float32'
        ctx_name = infer_context_name(pvals, unis)

        pvals = as_gpuarray_variable(pvals, ctx_name)
        unis = as_gpuarray_variable(unis, ctx_name)

        if pvals.ndim != 2:
            raise NotImplementedError('pvals ndim should be 2', pvals.ndim)
        if unis.ndim != 1:
            raise NotImplementedError('unis ndim should be 1', unis.ndim)
        if self.odtype == 'auto':
            odtype = 'int64'
        else:
            odtype = self.odtype
        assert odtype == 'int64', odtype
        br = (pvals.broadcastable[1], pvals.broadcastable[0])
        out = GpuArrayType(broadcastable=br,
                           dtype=odtype,
                           context_name=ctx_name)()

        return Apply(self, [pvals, unis, as_scalar(n)], [out])
Пример #14
0
    def make_node(self, val):
        from theano.scalar import as_scalar
        from theano import Apply

        val = as_scalar(val).astype('uint64')
        return Apply(self, [val], [self.rtype()])
Пример #15
0
 def make_node(self, a, b):
     return Apply(self, [scalar.as_scalar(a), scalar.as_scalar(b)], [scalar.float64()])
Пример #16
0
 def make_node(self, a, b):
     return Apply(
         self, [scalar.as_scalar(a), scalar.as_scalar(b)], [scalar.float64()]
     )
Пример #17
0
 def make_node(self, x, scal):
     x = as_tensor_variable(x)
     scal = as_scalar(scal)
     return Apply(self, [x, scal], [x.type()])
Пример #18
0
    def make_node(self, val):
        from theano.scalar import as_scalar
        from theano import Apply

        val = as_scalar(val).astype('uint64')
        return Apply(self, [val], [self.rtype()])
Пример #19
0
 def make_node(self, x, scal):
     x = as_tensor_variable(x)
     scal = as_scalar(scal)
     return Apply(self, [x, scal], [x.type()])
Пример #20
0
 def make_node(self, input):
     input = scalar.as_scalar(input)
     output = input.type()
     return Apply(self, [input], [output])
Пример #21
0
 def make_node(self, x):
     x = t.as_scalar(x)
     return theano.Apply(self, [x], [x.type()])