Example #1
0
def may_share_memory(a, b, raise_other_type=True):
    a_ndarray = isinstance(a, np.ndarray)
    b_ndarray = isinstance(b, np.ndarray)
    if a_ndarray and b_ndarray:
        return TensorType.may_share_memory(a, b)
    a_cuda = _is_cuda(a)
    b_cuda = _is_cuda(b)
    if a_cuda and b_cuda:
        return CudaNdarrayType.may_share_memory(a, b)
    a_gpua = _is_gpua(a)
    b_gpua = _is_gpua(b)
    if a_gpua and b_gpua:
        return gpuarray.pygpu.gpuarray.may_share_memory(a, b)

    a_sparse = _is_sparse(a)
    b_sparse = _is_sparse(b)
    if (not (a_ndarray or a_sparse or a_cuda or a_gpua)
            or not (b_ndarray or b_sparse or b_cuda or b_gpua)):
        if raise_other_type:
            raise TypeError("may_share_memory support only ndarray"
                            " and scipy.sparse, CudaNdarray or GpuArray type")
        return False

    if a_cuda or b_cuda or a_gpua or b_gpua:
        return False
    return SparseType.may_share_memory(a, b)
def sparse_constructor(value,
                       name=None,
                       strict=False,
                       allow_downcast=None,
                       borrow=False,
                       format=None):
    """
    SharedVariable Constructor for SparseType.

    writeme

    """
    if not isinstance(value, scipy.sparse.spmatrix):
        raise TypeError(
            "Expected a sparse matrix in the sparse shared variable constructor. Received: ",
            value.__class__)

    if format is None:
        format = value.format
    type = SparseType(format=format, dtype=value.dtype)
    if not borrow:
        value = copy.deepcopy(value)
    return SparseTensorSharedVariable(type=type,
                                      value=value,
                                      name=name,
                                      strict=strict,
                                      allow_downcast=allow_downcast)
Example #3
0
 def make_node(self, n, p, shape):
     n = tensor.as_tensor_variable(n)
     p = tensor.as_tensor_variable(p)
     shape = tensor.as_tensor_variable(shape)
     return gof.Apply(self, [n, p, shape],
                      [SparseType(dtype=self.dtype,
                                  format=self.format).make_variable()])
Example #4
0
def may_share_memory(a, b, raise_other_type=True):
    a_ndarray = isinstance(a, np.ndarray)
    b_ndarray = isinstance(b, np.ndarray)
    if a_ndarray and b_ndarray:
        return TensorType.may_share_memory(a, b)
    a_cuda = _is_cuda(a)
    b_cuda = _is_cuda(b)
    if a_cuda and b_cuda:
        return CudaNdarrayType.may_share_memory(a, b)
    a_gpua = _is_gpua(a)
    b_gpua = _is_gpua(b)
    if a_gpua and b_gpua:
        return gpuarray.pygpu.gpuarray.may_share_memory(a, b)

    a_sparse = _is_sparse(a)
    b_sparse = _is_sparse(b)
    if (not(a_ndarray or a_sparse or a_cuda or a_gpua) or
            not(b_ndarray or b_sparse or b_cuda or b_gpua)):
        if raise_other_type:
            raise TypeError("may_share_memory support only ndarray"
                            " and scipy.sparse, CudaNdarray or GpuArray type")
        return False

    if a_cuda or b_cuda or a_gpua or b_gpua:
        return False
    return SparseType.may_share_memory(a, b)
Example #5
0
 def make_node(self, x, y):
     x, y = map(as_sparse_variable, [x, y])
     if x.type.dtype != y.type.dtype:
         raise NotImplementedError()
     if x.type.format != y.type.format:
         raise NotImplementedError()
     return gof.Apply(self, [x, y], [
         SparseType(dtype=x.type.dtype,
                    format=x.type.format).make_variable()
     ])
Example #6
0
    def make_node(self, n, p, shape):
        n = tensor.as_tensor_variable(n)
        p = tensor.as_tensor_variable(p)
        shape = tensor.as_tensor_variable(shape)

        assert n.dtype in discrete_dtypes
        assert p.dtype in float_dtypes
        assert shape.dtype in discrete_dtypes

        return gof.Apply(self, [n, p, shape],
                         [SparseType(dtype=self.dtype, format=self.format)()])
Example #7
0
    def make_node(self, x, y):
        x = as_sparse_variable(x)
        y = tensor.as_tensor_variable(y)

        assert y.type.ndim == 1

        if x.type.dtype != y.type.dtype:
            raise NotImplementedError()
        return gof.Apply(self, [x, y], [
            SparseType(dtype=x.type.dtype,
                       format=x.type.format).make_variable()
        ])
Example #8
0
def may_share_memory(a, b, raise_other_type=True):
    a_ndarray = isinstance(a, numpy.ndarray)
    b_ndarray = isinstance(b, numpy.ndarray)
    a_sparse = _is_sparse(a)
    b_sparse = _is_sparse(b)
    a_cuda = _is_cuda(a)
    b_cuda = _is_cuda(b)

    if not(a_ndarray or a_sparse or a_cuda) or not(b_ndarray or b_sparse or b_cuda):
        if raise_other_type:
            raise TypeError("may_share_memory support only ndarray and scipy.sparse and CudaNdarray type")
        return False

    if a_ndarray and b_ndarray:
        return TensorType.may_share_memory(a,b)
    if a_cuda and b_cuda:
        from theano.sandbox.cuda.type import CudaNdarrayType
        return CudaNdarrayType.may_share_memory(a,b)
    if a_cuda or b_cuda:
        return False
    return SparseType.may_share_memory(a,b)
Example #9
0
def may_share_memory(a, b, raise_other_type=True):
    a_ndarray = isinstance(a, numpy.ndarray)
    b_ndarray = isinstance(b, numpy.ndarray)
    a_sparse = _is_sparse(a)
    b_sparse = _is_sparse(b)
    a_cuda = _is_cuda(a)
    b_cuda = _is_cuda(b)

    if (not (a_ndarray or a_sparse or a_cuda)
            or not (b_ndarray or b_sparse or b_cuda)):
        if raise_other_type:
            raise TypeError("may_share_memory support only ndarray"
                            " and scipy.sparse and CudaNdarray type")
        return False

    if a_ndarray and b_ndarray:
        return TensorType.may_share_memory(a, b)
    if a_cuda and b_cuda:
        from theano.sandbox.cuda.type import CudaNdarrayType
        return CudaNdarrayType.may_share_memory(a, b)
    if a_cuda or b_cuda:
        return False
    return SparseType.may_share_memory(a, b)
Example #10
0
 def make_node(self, x):
     x = as_sparse_variable(x)
     return gof.Apply(
         self, [x],
         [SparseType(dtype=self.out_type, format=x.format).make_variable()])