Beispiel #1
0
    def test_main(self):
        dtypes = [
            'float32', 'float64', 'int32', 'int64', 'uint8', 'int8', 'bool'
        ]

        places = [fluid.CPUPlace()]
        if fluid.core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
            places.append(fluid.CUDAPinnedPlace())

        for p in places:
            for dtype in dtypes:
                np_arr = np.reshape(
                    np.array(six.moves.range(np.prod(self.shape))).astype(
                        dtype), self.shape)

                t = fluid.LoDTensor()
                t.set(np_arr, p)

                ret_np_arr = np.array(t)
                self.assertEqual(np_arr.shape, ret_np_arr.shape)
                self.assertEqual(np_arr.dtype, ret_np_arr.dtype)

                all_equal = np.all(np_arr == ret_np_arr)
                self.assertTrue(all_equal)
Beispiel #2
0
 def test_input_cuda_pinned_var(self):
     with fluid.dygraph.guard():
         data = np.random.random((2, 80, 16128)).astype('float32')
         var = core.VarBase(value=data,
                            name='',
                            persistable=False,
                            place=fluid.CUDAPinnedPlace(),
                            zero_copy=False)
         sliced = var[:, 10:, :var.shape[1]]
         self.assertEqual(sliced.shape, [2, 70, 80])
Beispiel #3
0
    def _set_var(self, var, ndarray):
        t = global_scope().find_var(var.name).get_tensor()
        p = t._place()
        if p.is_cpu_place():
            place = fluid.CPUPlace()
        elif p.is_cuda_pinned_place():
            place = fluid.CUDAPinnedPlace()
        else:
            p = fluid.core.Place()
            p.set_place(t._place())
            place = fluid.CUDAPlace(p.gpu_device_id())

        t.set(ndarray, place)
    def func_tensor_tolist(self):
        places = [fluid.CPUPlace()]
        if fluid.core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
            places.append(fluid.CUDAPinnedPlace())

        for p in places:
            np_arr = np.reshape(
                np.array(six.moves.range(np.prod(self.shape))), self.shape)
            expectlist = np_arr.tolist()

            t = paddle.to_tensor(np_arr, place=p)
            tensorlist = t.tolist()

            self.assertEqual(tensorlist, expectlist)
    def test_tensor_fill_true(self):
        typelist = ['float32', 'float64', 'int32', 'int64', 'float16']
        places = [fluid.CPUPlace()]
        if fluid.core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
            places.append(fluid.CUDAPinnedPlace())

        for p in places:
            np_arr = np.reshape(
                np.array(six.moves.range(np.prod(self.shape))), self.shape)
            for dtype in typelist:
                tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype)
                target = tensor.numpy()
                target[...] = 0

                tensor.zero_()
                self.assertEqual((tensor.numpy() == target).all().item(), True)
    def _feed_random_data(self, use_gpu, as_lodtensor=False):
        print("feed random data")
        feed = {}
        if use_gpu and as_lodtensor:
            #place = fluid.CPUPlace()
            place = fluid.CUDAPinnedPlace()
        for var in self.feed_vars:
            if var.type != fluid.core.VarDesc.VarType.LOD_TENSOR:
                raise TypeError("Feed data of non LoDTensor is not supported.")

            shape = var.shape
            dtype = self.convert_dtype(var.dtype, to_string=True)
            data = np.random.random(shape).astype(dtype)
            if use_gpu and as_lodtensor:
                tensor = fluid.core.LoDTensor()
                tensor.set(data, place)
                feed[var.name] = tensor
            else:
                feed[var.name] = data
        return feed
Beispiel #7
0
    def func_test_tensor_fill_true(self):
        typelist = ['float32', 'float64', 'int32', 'int64', 'float16']
        places = [fluid.CPUPlace()]
        if fluid.core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
            places.append(fluid.CUDAPinnedPlace())

        for idx, p in enumerate(places):
            if idx == 0:
                paddle.set_device('cpu')
            else:
                paddle.set_device('gpu')
            np_arr = np.reshape(
                np.array(six.moves.range(np.prod(self.shape))), self.shape)
            for dtype in typelist:
                var = 1.
                tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype)
                target = tensor.numpy()
                target[...] = var

                tensor.fill_(var)  #var type is basic type in typelist
                self.assertEqual((tensor.numpy() == target).all(), True)
Beispiel #8
0
    def func_test_tensor_fill_backward(self):
        typelist = ['float32']
        places = [fluid.CPUPlace()]
        if fluid.core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
            places.append(fluid.CUDAPinnedPlace())

        for idx, p in enumerate(places):
            if idx == 0:
                paddle.set_device('cpu')
            else:
                paddle.set_device('gpu')
            np_arr = np.reshape(
                np.array(six.moves.range(np.prod(self.shape))), self.shape)
            for dtype in typelist:
                var = int(1)
                tensor = paddle.to_tensor(np_arr, place=p, dtype=dtype)
                tensor.stop_gradient = False
                y = tensor * 2
                y.fill_(var)
                loss = y.sum()
                loss.backward()

                self.assertEqual((y.grad.numpy() == 0).all().item(), True)