Ejemplo n.º 1
0
 def test_host_alloc_mapped(self):
     ary = cuda.mapped_array(10, dtype=np.uint32)
     ary.fill(123)
     self.assertTrue(all(ary == 123))
     driver.device_memset(ary, 0, driver.device_memory_size(ary))
     self.assertTrue(all(ary == 0))
     self.assertTrue(sum(ary != 0) == 0)
Ejemplo n.º 2
0
 def test_host_alloc_pinned(self):
     ary = cuda.pinned_array(10, dtype=np.uint32)
     ary.fill(123)
     self.assertTrue(all(ary == 123))
     devary = cuda.to_device(ary)
     driver.device_memset(devary, 0, driver.device_memory_size(devary))
     self.assertTrue(all(ary == 123))
     devary.copy_to_host(ary)
     self.assertTrue(all(ary == 0))
 def test_host_alloc_pinned(self):
     ary = cuda.pinned_array(10, dtype=np.uint32)
     ary.fill(123)
     self.assertTrue(all(ary == 123))
     devary = cuda.to_device(ary)
     driver.device_memset(devary, 0, driver.device_memory_size(devary))
     self.assertTrue(all(ary == 123))
     devary.copy_to_host(ary)
     self.assertTrue(all(ary == 0))
Ejemplo n.º 4
0
    def __init__(self,
                 shape,
                 strides,
                 dtype,
                 stream=0,
                 writeback=None,
                 gpu_data=None):
        """
        Args
        ----

        shape
            array shape.
        strides
            array strides.
        dtype
            data type as np.dtype coercible object.
        stream
            cuda stream.
        writeback
            Deprecated.
        gpu_data
            user provided device memory for the ndarray data buffer
        """
        if isinstance(shape, int):
            shape = (shape, )
        if isinstance(strides, int):
            strides = (strides, )
        dtype = np.dtype(dtype)
        self.ndim = len(shape)
        if len(strides) != self.ndim:
            raise ValueError('strides not match ndim')
        self._dummy = dummyarray.Array.from_desc(0, shape, strides,
                                                 dtype.itemsize)
        self.shape = tuple(shape)
        self.strides = tuple(strides)
        self.dtype = dtype
        self.size = int(functools.reduce(operator.mul, self.shape, 1))
        # prepare gpu memory
        if self.size > 0:
            if gpu_data is None:
                self.alloc_size = _driver.memory_size_from_info(
                    self.shape, self.strides, self.dtype.itemsize)
                gpu_data = devices.get_context().memalloc(self.alloc_size)
            else:
                self.alloc_size = _driver.device_memory_size(gpu_data)
        else:
            # Make NULL pointer for empty allocation
            gpu_data = _driver.MemoryPointer(context=devices.get_context(),
                                             pointer=c_void_p(0),
                                             size=0)
            self.alloc_size = 0

        self.gpu_data = gpu_data

        self.__writeback = writeback  # should deprecate the use of this
        self.stream = stream
 def test_host_alloc_mapped(self):
     ary = cuda.mapped_array(10, dtype=np.uint32)
     ary.fill(123)
     self.assertTrue(all(ary == 123))
     driver.device_memset(ary, 0, driver.device_memory_size(ary))
     self.assertTrue(all(ary == 0))