コード例 #1
0
def may_share_memory(a, b, raise_other_type=True):
    a_ndarray = isinstance(a, np.ndarray)
    b_ndarray = isinstance(b, np.ndarray)
    if a_ndarray and b_ndarray:
        return TensorType.may_share_memory(a, b)
    a_cuda = _is_cuda(a)
    b_cuda = _is_cuda(b)
    if a_cuda and b_cuda:
        return CudaNdarrayType.may_share_memory(a, b)
    a_gpua = _is_gpua(a)
    b_gpua = _is_gpua(b)
    if a_gpua and b_gpua:
        return gpuarray.pygpu.gpuarray.may_share_memory(a, b)

    a_sparse = _is_sparse(a)
    b_sparse = _is_sparse(b)
    if (not (a_ndarray or a_sparse or a_cuda or a_gpua)
            or not (b_ndarray or b_sparse or b_cuda or b_gpua)):
        if raise_other_type:
            raise TypeError("may_share_memory support only ndarray"
                            " and scipy.sparse, CudaNdarray or GpuArray type")
        return False

    if a_cuda or b_cuda or a_gpua or b_gpua:
        return False
    return SparseType.may_share_memory(a, b)
コード例 #2
0
def may_share_memory(a, b, raise_other_type=True):
    a_ndarray = isinstance(a, np.ndarray)
    b_ndarray = isinstance(b, np.ndarray)
    if a_ndarray and b_ndarray:
        return TensorType.may_share_memory(a, b)
    a_cuda = _is_cuda(a)
    b_cuda = _is_cuda(b)
    if a_cuda and b_cuda:
        return CudaNdarrayType.may_share_memory(a, b)
    a_gpua = _is_gpua(a)
    b_gpua = _is_gpua(b)
    if a_gpua and b_gpua:
        return gpuarray.pygpu.gpuarray.may_share_memory(a, b)

    a_sparse = _is_sparse(a)
    b_sparse = _is_sparse(b)
    if (not(a_ndarray or a_sparse or a_cuda or a_gpua) or
            not(b_ndarray or b_sparse or b_cuda or b_gpua)):
        if raise_other_type:
            raise TypeError("may_share_memory support only ndarray"
                            " and scipy.sparse, CudaNdarray or GpuArray type")
        return False

    if a_cuda or b_cuda or a_gpua or b_gpua:
        return False
    return SparseType.may_share_memory(a, b)
コード例 #3
0
def tensor_constructor(value,
                       name=None,
                       strict=False,
                       allow_downcast=None,
                       borrow=False,
                       broadcastable=None,
                       target='cpu'):
    """
    SharedVariable Constructor for TensorType.

    Notes
    -----
    Regarding the inference of the broadcastable pattern...
    The default is to assume that the value might be resized in any
    dimension, so the default broadcastable is ``(False,)*len(value.shape)``.
    The optional `broadcastable` argument will override this default.

    """
    if target != 'cpu':
        raise TypeError('not for cpu')

    if not isinstance(value, numpy.ndarray):
        raise TypeError()

    # if no broadcastable is given, then the default is to assume that
    # the value might be resized in any dimension in the future.
    #
    if broadcastable is None:
        broadcastable = (False, ) * len(value.shape)
    type = TensorType(value.dtype, broadcastable=broadcastable)
    return TensorSharedVariable(type=type,
                                value=numpy.array(value, copy=(not borrow)),
                                name=name,
                                strict=strict,
                                allow_downcast=allow_downcast)
コード例 #4
0
ファイル: sharedvar.py プロジェクト: yubow/Theano
def scalar_constructor(value, name=None, strict=False, allow_downcast=None):
    """SharedVariable constructor for scalar values. Default: int64 or float64.

    :note: We implement this using 0-d tensors for now.

    """
    if not isinstance(value, (numpy.number, float, int, complex)):
        raise TypeError()
    try:
        dtype = value.dtype
    except Exception:
        dtype = numpy.asarray(value).dtype

    dtype = str(dtype)
    value = theano._asarray(value, dtype=dtype)
    tensor_type = TensorType(dtype=str(value.dtype), broadcastable=[])

    try:
        # Do not pass the dtype to asarray because we want this to fail if
        # strict is True and the types do not match.
        rval = ScalarSharedVariable(type=tensor_type,
                                    value=numpy.array(value, copy=True),
                                    name=name,
                                    strict=strict,
                                    allow_downcast=allow_downcast)
        return rval
    except Exception:
        traceback.print_exc()
        raise
コード例 #5
0
ファイル: sharedvar.py プロジェクト: luke14free/Theano-PyMC
def scalar_constructor(value,
                       name=None,
                       strict=False,
                       allow_downcast=None,
                       borrow=False,
                       target="cpu"):
    """
    SharedVariable constructor for scalar values. Default: int64 or float64.

    Notes
    -----
    We implement this using 0-d tensors for now.

    We ignore the borrow parameter as we convert ``value`` to an
    ndarray (this is a new object). This respects the semantic of
    borrow, as it is a hint to Theano that we can reuse it.

    """
    if target != "cpu":
        raise TypeError("not for cpu")

    if not isinstance(value, (np.number, float, int, complex)):
        raise TypeError()
    try:
        dtype = value.dtype
    except Exception:
        dtype = np.asarray(value).dtype

    dtype = str(dtype)
    value = _asarray(value, dtype=dtype)
    tensor_type = TensorType(dtype=str(value.dtype), broadcastable=[])

    try:
        # Do not pass the dtype to asarray because we want this to fail if
        # strict is True and the types do not match.
        rval = ScalarSharedVariable(
            type=tensor_type,
            value=np.array(value, copy=True),
            name=name,
            strict=strict,
            allow_downcast=allow_downcast,
        )
        return rval
    except Exception:
        traceback.print_exc()
        raise
コード例 #6
0
def may_share_memory(a, b, raise_other_type=True):
    a_ndarray = isinstance(a, numpy.ndarray)
    b_ndarray = isinstance(b, numpy.ndarray)
    a_sparse = _is_sparse(a)
    b_sparse = _is_sparse(b)
    a_cuda = _is_cuda(a)
    b_cuda = _is_cuda(b)

    if not(a_ndarray or a_sparse or a_cuda) or not(b_ndarray or b_sparse or b_cuda):
        if raise_other_type:
            raise TypeError("may_share_memory support only ndarray and scipy.sparse and CudaNdarray type")
        return False

    if a_ndarray and b_ndarray:
        return TensorType.may_share_memory(a,b)
    if a_cuda and b_cuda:
        from theano.sandbox.cuda.type import CudaNdarrayType
        return CudaNdarrayType.may_share_memory(a,b)
    if a_cuda or b_cuda:
        return False
    return SparseType.may_share_memory(a,b)
コード例 #7
0
def may_share_memory(a, b, raise_other_type=True):
    a_ndarray = isinstance(a, numpy.ndarray)
    b_ndarray = isinstance(b, numpy.ndarray)
    a_sparse = _is_sparse(a)
    b_sparse = _is_sparse(b)
    a_cuda = _is_cuda(a)
    b_cuda = _is_cuda(b)

    if (not (a_ndarray or a_sparse or a_cuda)
            or not (b_ndarray or b_sparse or b_cuda)):
        if raise_other_type:
            raise TypeError("may_share_memory support only ndarray"
                            " and scipy.sparse and CudaNdarray type")
        return False

    if a_ndarray and b_ndarray:
        return TensorType.may_share_memory(a, b)
    if a_cuda and b_cuda:
        from theano.sandbox.cuda.type import CudaNdarrayType
        return CudaNdarrayType.may_share_memory(a, b)
    if a_cuda or b_cuda:
        return False
    return SparseType.may_share_memory(a, b)
コード例 #8
0
ファイル: test_ops.py プロジェクト: rpgoldman/Theano-PyMC
    def test_rebroadcast(self):
        rng = np.random.RandomState(3453)
        # Rebroadcast
        adtens4 = dtensor4()
        adict = [(0, False), (1, True), (2, False), (3, True)]
        adtens4_val = rng.rand(2, 1, 3, 1).astype(config.floatX)
        self._compile_and_check(
            [adtens4],
            [Rebroadcast(*adict)(adtens4)],
            [adtens4_val],
            Rebroadcast,
            warn=False,
        )

        adtens4_bro = TensorType("float64", (True, True, True, False))()
        bdict = [(0, True), (1, False), (2, False), (3, False)]
        adtens4_bro_val = rng.rand(1, 1, 1, 3).astype(config.floatX)
        self._compile_and_check(
            [adtens4_bro],
            [Rebroadcast(*bdict)(adtens4_bro)],
            [adtens4_bro_val],
            Rebroadcast,
        )