def test_may_share_memory_scipy():
        a = scipy.sparse.csc_matrix(scipy.sparse.eye(5, 3))
        b = scipy.sparse.csc_matrix(scipy.sparse.eye(4, 3))
        as_ar = lambda a: theano._asarray(a, dtype='int32')
        for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False),
                          (a, a.data, True), (a, a.indptr, True), (a, a.indices, True), (a, as_ar(a.shape), False),
                          (a.data, a, True), (a.indptr, a, True), (a.indices, a, True), (as_ar(a.shape), a, False),
                          (b, b.data, True), (b, b.indptr, True), (b, b.indices, True), (b, as_ar(b.shape), False),
                          (b.data, b, True), (b.indptr, b, True), (b.indices, b, True), (as_ar(b.shape), b, False),
                          (b.data, a, False), (b.indptr, a, False), (b.indices, a, False), (as_ar(b.shape), a, False),
                          ]:

            assert may_share_memory(a_, b_) == rep
            assert may_share_memory(b_, a_) == rep

        # test that it raise error when needed.
        for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False)]:
            assert may_share_memory(a_, b_, False) == rep
            assert may_share_memory(b_, a_, False) == rep
            try:
                may_share_memory(a_, b_)
                raise Exception("An error was expected")
            except TypeError:
                pass
            try:
                may_share_memory(b_, a_)
                raise Exception("An error was expected")
            except TypeError:
                pass
def test_may_share_memory():
    a = numpy.random.rand(5, 4)
    b = numpy.random.rand(5, 4)
    va = a.view()
    vb = b.view()
    ra = a.reshape((4, 5))
    rb = b.reshape((4, 5))
    ta = a.T
    tb = b.T

    for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False),
                      (a, a[0], True), (a, a[:, 0], True), (a, a.T, True),
                      (a, (0,), False), (a, 1, False), (a, None, False),
                      (a, va, True), (b, vb, True), (va, b, False), (a, vb, False),
                      (a, ra, True), (b, rb, True), (ra, b, False), (a, rb, False),
                      (a, ta, True), (b, tb, True), (ta, b, False), (a, tb, False),
                      ]:

        assert may_share_memory(a_, b_, False) == rep
        assert may_share_memory(b_, a_, False) == rep

    # test that it raise error when needed.
    for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False), ]:
        assert may_share_memory(a_, b_, False) == rep
        assert may_share_memory(b_, a_, False) == rep
        try:
            may_share_memory(a_, b_)
            raise Exception("An error was expected")
        except TypeError:
            pass
        try:
            may_share_memory(b_, a_)
            raise Exception("An error was expected")
        except TypeError:
            pass
Exemplo n.º 3
0
    def test_may_share_memory_scipy():
        a = scipy.sparse.csc_matrix(scipy.sparse.eye(5, 3))
        b = scipy.sparse.csc_matrix(scipy.sparse.eye(4, 3))

        def as_ar(a):
            return theano._asarray(a, dtype='int32')
        for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False),
                            (a, a.data, True), (a, a.indptr, True),
                            (a, a.indices, True), (a, as_ar(a.shape), False),
                            (a.data, a, True), (a.indptr, a, True),
                            (a.indices, a, True), (as_ar(a.shape), a, False),
                            (b, b.data, True), (b, b.indptr, True),
                            (b, b.indices, True), (b, as_ar(b.shape), False),
                            (b.data, b, True), (b.indptr, b, True),
                            (b.indices, b, True), (as_ar(b.shape), b, False),
                            (b.data, a, False), (b.indptr, a, False),
                            (b.indices, a, False), (as_ar(b.shape), a, False)]:

            assert may_share_memory(a_, b_) == rep
            assert may_share_memory(b_, a_) == rep

        # test that it raise error when needed.
        for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False)]:
            assert may_share_memory(a_, b_, False) == rep
            assert may_share_memory(b_, a_, False) == rep
            try:
                may_share_memory(a_, b_)
                raise Exception("An error was expected")
            except TypeError:
                pass
            try:
                may_share_memory(b_, a_)
                raise Exception("An error was expected")
            except TypeError:
                pass
Exemplo n.º 4
0
def may_share_memory_core(a, b):
    va = a.view()
    vb = b.view()
    ra = a.reshape((4, 5))
    rb = b.reshape((4, 5))
    ta = a.T
    tb = b.T

    for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False),
                        (a, a[0], True), (a, a[:, 0], True), (a, a.T, True),
                        (a, (0,), False), (a, 1, False), (a, None, False),
                        (a, va, True), (b, vb, True), (va, b, False),
                        (a, vb, False), (a, ra, True), (b, rb, True),
                        (ra, b, False), (a, rb, False), (a, ta, True),
                        (b, tb, True), (ta, b, False), (a, tb, False)]:

        assert may_share_memory(a_, b_, False) == rep
        assert may_share_memory(b_, a_, False) == rep

    # test that it raise error when needed.
    for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False), ]:
        assert may_share_memory(a_, b_, False) == rep
        assert may_share_memory(b_, a_, False) == rep
        try:
            may_share_memory(a_, b_)
            raise Exception("An error was expected")
        except TypeError:
            pass
        try:
            may_share_memory(b_, a_)
            raise Exception("An error was expected")
        except TypeError:
            pass
Exemplo n.º 5
0
def test_may_share_memory_cuda():
    from theano.misc.may_share_memory import may_share_memory

    a = cuda.CudaNdarray(numpy.zeros((3, 4), dtype="float32"))
    b = cuda.CudaNdarray(numpy.zeros((3, 4), dtype="float32"))
    na = numpy.zeros((3, 4))
    nb = numpy.zeros((3, 4))
    va = a.view()
    vb = b.view()
    ra = a.reshape((4, 3))
    rb = b.reshape((4, 3))

    # can't test the transpose as ta._strides = is not implemented
    # manual transpose of a
    # ta = a.reshape((4,3))
    # ta._strides = (ta._strides[1],ta._strides[0])#not implemented
    # elem_size=elem_size = numpy.zeros(0,dtype=a.dtype).dtype.itemsize
    # ta.gpudata += ta.size*elem_size

    for a_, b_, rep in [
        (a, a, True),
        (b, b, True),
        (a, b, False),
        (a, na, False),
        (b, nb, False),
        (na, b, False),
        (nb, a, False),
        (a, va, True),
        (b, vb, True),
        (va, b, False),
        (a, vb, False),
        (a, ra, True),
        (b, rb, True),
        (ra, b, False),
        (a, rb, False),
    ]:
        assert may_share_memory(a_, b_) == rep
        assert may_share_memory(b_, a_) == rep

    # test that it raise error when needed.
    for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False)]:
        assert may_share_memory(a_, b_, False) == rep
        assert may_share_memory(b_, a_, False) == rep
        try:
            may_share_memory(a_, b_)
            raise Exception("An error was expected")
        except TypeError:
            pass
        try:
            may_share_memory(b_, a_)
            raise Exception("An error was expected")
        except TypeError:
            pass
Exemplo n.º 6
0
def test_may_share_memory_cuda():
    from theano.misc.may_share_memory import may_share_memory
    a = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
    b = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))
    na = numpy.zeros((3, 4))
    nb = numpy.zeros((3, 4))
    va = a.view()
    vb = b.view()
    ra = a.reshape((4, 3))
    rb = b.reshape((4, 3))

    # can't test the transpose as ta._strides = is not implemented
    # manual transpose of a
    # ta = a.reshape((4,3))
    # ta._strides = (ta._strides[1],ta._strides[0])#not implemented
    # elem_size=elem_size = numpy.zeros(0,dtype=a.dtype).dtype.itemsize
    # ta.gpudata += ta.size*elem_size

    for a_, b_, rep in [
        (a, a, True),
        (b, b, True),
        (a, b, False),
        (a, na, False),
        (b, nb, False),
        (na, b, False),
        (nb, a, False),
        (a, va, True),
        (b, vb, True),
        (va, b, False),
        (a, vb, False),
        (a, ra, True),
        (b, rb, True),
        (ra, b, False),
        (a, rb, False),
    ]:
        assert may_share_memory(a_, b_) == rep
        assert may_share_memory(b_, a_) == rep

    # test that it raise error when needed.
    for a_, b_, rep in [(a, (0, ), False), (a, 1, False), (a, None, False)]:
        assert may_share_memory(a_, b_, False) == rep
        assert may_share_memory(b_, a_, False) == rep
        try:
            may_share_memory(a_, b_)
            raise Exception("An error was expected")
        except TypeError:
            pass
        try:
            may_share_memory(b_, a_)
            raise Exception("An error was expected")
        except TypeError:
            pass
Exemplo n.º 7
0
        def test_inplace_set_value(self):
            """
            We test that if the SharedVariable implement it we do inplace set_value
            We also test this for partial inplace modification when accessing the internal of theano.
            """
            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            shp = (100//4, 1024)  # 100KB

            x = np.zeros(shp, dtype=dtype)
            x = self.cast_value(x)
            x_shared = self.shared_constructor(x, borrow=True)

            old_data = x_shared.container.storage[0]
            nd = np.ones(shp, dtype=dtype)

            if x.__class__.__name__ != 'csr_matrix':
                # sparse matrix don't support inplace affectation
                x_shared.container.value[:] = nd
                assert (np.asarray(x_shared.get_value(borrow=True)) == nd).all()
                # This should always share value!
                assert may_share_memory(old_data, x_shared.container.storage[0])
                assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))

                nd[0] += 1
                x_shared.container.value[0] = nd[0]
                assert (np.asarray(x_shared.get_value(borrow=True)[0]) == nd[0]).all()
                assert (np.asarray(x_shared.get_value(borrow=True)[1:]) == nd[1:]).all()
                # This should always share value!
                assert may_share_memory(old_data, x_shared.container.storage[0])
                assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))

            if x.__class__.__name__ != 'csr_matrix':
                # sparse matrix don't support inplace affectation
                nd += 1
                # THIS DOENS'T DO WHAT WE EXPECT the content of a is
                # not updated for GpuArray, but it is for ndarray
                x_shared.get_value(borrow=True)[:] = nd
                assert may_share_memory(old_data, x_shared.container.storage[0])
                x_shared.get_value(borrow=True)

            # Test by set_value with borrow=False
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(nd, borrow=False)
            assert np.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
                    self.ref_fct(self.cast_value(nd)))
            assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace

            # Test by set_value with borrow=False when new data cast.
            # specificaly useful for gpu data
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(self.cast_value(nd), borrow=False)
            assert np.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
                    self.ref_fct(self.cast_value(nd)))
            assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace

            # Test by set_value with borrow=True
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(nd.copy(), borrow=True)
            assert np.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
                    self.ref_fct(self.cast_value(nd)))
            assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace

            # Test by set_value with borrow=True when new data cast.
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(self.cast_value(nd.copy()), borrow=True)
            assert np.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))
            assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace
Exemplo n.º 8
0
        def test_inplace_set_value(self):
            # We test that if the SharedVariable implement it we do inplace set_value
            # We also test this for partial inplace modification when accessing the internal of theano.

            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            shp = (100 // 4, 1024)  # 100KB

            x = np.zeros(shp, dtype=dtype)
            x = self.cast_value(x)
            x_shared = self.shared_constructor(x, borrow=True)

            old_data = x_shared.container.storage[0]
            nd = np.ones(shp, dtype=dtype)

            if x.__class__.__name__ != "csr_matrix":
                # sparse matrix don't support inplace affectation
                x_shared.container.value[:] = nd
                assert (np.asarray(
                    x_shared.get_value(borrow=True)) == nd).all()
                # This should always share value!
                assert may_share_memory(old_data,
                                        x_shared.container.storage[0])
                assert may_share_memory(
                    old_data,
                    x_shared.get_value(borrow=True, return_internal_type=True))

                nd[0] += 1
                x_shared.container.value[0] = nd[0]
                assert (np.asarray(
                    x_shared.get_value(borrow=True)[0]) == nd[0]).all()
                assert (np.asarray(
                    x_shared.get_value(borrow=True)[1:]) == nd[1:]).all()
                # This should always share value!
                assert may_share_memory(old_data,
                                        x_shared.container.storage[0])
                assert may_share_memory(
                    old_data,
                    x_shared.get_value(borrow=True, return_internal_type=True))

            if x.__class__.__name__ != "csr_matrix":
                # sparse matrix don't support inplace affectation
                nd += 1
                # THIS DOENS'T DO WHAT WE EXPECT the content of a is
                # not updated for GpuArray, but it is for ndarray
                x_shared.get_value(borrow=True)[:] = nd
                assert may_share_memory(old_data,
                                        x_shared.container.storage[0])
                x_shared.get_value(borrow=True)

            # Test by set_value with borrow=False
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(nd, borrow=False)
            assert np.allclose(
                self.ref_fct(x_shared.get_value(borrow=True)),
                self.ref_fct(self.cast_value(nd)),
            )
            assert (may_share_memory(
                old_data,
                x_shared.container.storage[0]) == self.set_value_inplace)

            # Test by set_value with borrow=False when new data cast.
            # specificaly useful for gpu data
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(self.cast_value(nd), borrow=False)
            assert np.allclose(
                self.ref_fct(x_shared.get_value(borrow=True)),
                self.ref_fct(self.cast_value(nd)),
            )
            assert (may_share_memory(
                old_data,
                x_shared.container.storage[0]) == self.set_cast_value_inplace)

            # Test by set_value with borrow=True
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(nd.copy(), borrow=True)
            assert np.allclose(
                self.ref_fct(x_shared.get_value(borrow=True)),
                self.ref_fct(self.cast_value(nd)),
            )
            assert (may_share_memory(
                old_data,
                x_shared.container.storage[0]) == self.set_value_inplace)

            # Test by set_value with borrow=True when new data cast.
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(self.cast_value(nd.copy()), borrow=True)
            assert np.allclose(
                self.ref_fct(x_shared.get_value(borrow=True)),
                self.ref_fct(self.cast_value(nd)),
            )
            assert (may_share_memory(
                old_data,
                x_shared.container.storage[0]) == self.set_cast_value_inplace)
Exemplo n.º 9
0
        def test_inplace_set_value(self):
            """
            We test that if the SharedVariable implement it we do inplace set_value
            We also test this for partial inplace modification when accessing the internal of theano.
            """
            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            shp = (100/4,1024)#100KB

            x = numpy.zeros(shp, dtype=dtype)
            x = self.cast_value(x)
            x_shared = self.shared_constructor(x, borrow=True)

            old_data = x_shared.container.storage[0]
            nd = numpy.ones(shp, dtype=dtype)

            if x.__class__.__name__ != 'csr_matrix':
                #sparse matrix don't support inplace affectation
                x_shared.container.value[:] = nd
                assert (numpy.asarray(x_shared.get_value(borrow=True))==nd).all()
                #This should always share value!
                assert may_share_memory(old_data, x_shared.container.storage[0])
                assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))

                nd[0]+=1
                x_shared.container.value[0] = nd[0]
                assert (numpy.asarray(x_shared.get_value(borrow=True)[0])==nd[0]).all()
                assert (numpy.asarray(x_shared.get_value(borrow=True)[1:])==nd[1:]).all()
                #This should always share value!
                assert may_share_memory(old_data, x_shared.container.storage[0])
                assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))

            if x.__class__.__name__ != 'csr_matrix':
                #sparse matrix don't support inplace affectation
                nd += 1
                #THIS DON't DO WHAT WE EXPECT the contain of a is not updated for CudaNdarray, but it is for ndarray
                x_shared.get_value(borrow=True)[:] = nd
                #assert (numpy.asarray(x_shared.get_value(borrow=True))!=nd).all()
                assert may_share_memory(old_data, x_shared.container.storage[0])
                x_shared.get_value(borrow=True)

            # Test by .value
            # As we know that .value is deprecated, we filter out the warning
            warnings.filterwarnings(
                    action='ignore',
                    message='The .value property of shared variables is deprecated.'
                    )
            try:
                nd += 1
                old_data = x_shared.container.storage[0]
                x_shared.value = nd
                assert numpy.allclose(self.ref_fct(x_shared.value), self.ref_fct(self.cast_value(nd)))
                assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace
            finally:
                # Restore the default behavior.
                # TODO There is a cleaner way to do this in Python 2.6, once
                # Theano drops support of Python 2.4 and 2.5.
                warnings.filterwarnings(
                    action='default',
                    message='The .value property of shared variables is deprecated.'
                    )

            # Test by set_value with borrow=False
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(nd, borrow=False)
            assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
                    self.ref_fct(self.cast_value(nd)))
            assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace

            # Test by set_value with borrow=False when new data casted.
            # specificaly useful for gpu data
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(self.cast_value(nd), borrow=False)
            assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
                    self.ref_fct(self.cast_value(nd)))
            assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_casted_value_inplace

            # Test by set_value with borrow=True
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(nd.copy(), borrow=True)
            assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),
                    self.ref_fct(self.cast_value(nd)))
            assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace

            # Test by set_value with borrow=True when new data casted.
            nd += 1
            old_data = x_shared.container.storage[0]
            x_shared.set_value(self.cast_value(nd.copy()), borrow=True)
            assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))
            assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_casted_value_inplace