def test_empty_tensor_to_host(self): with DeviceArray(shape=(5, 2, 0, 3, 0), dtype=np.float32) as buf: assert util.volume(buf.shape) == 0 host_buf = np.empty(tuple(), dtype=np.float32) assert util.volume(host_buf.shape) == 1 host_buf = buf.copy_to(host_buf) assert host_buf.shape == buf.shape assert host_buf.nbytes == 0 assert util.volume(host_buf.shape) == 0
def test_large_allocation(self): dtype = np.byte # See if we can alloc 3GB (bigger than value of signed int) shape = (3 * 1024 * 1024 * 1024, ) with DeviceArray(shape=shape, dtype=dtype) as buf: assert buf.allocated_nbytes == util.volume(shape) * np.dtype( dtype).itemsize
def check_empty_tensor_expand(runner, shapes): shape = shapes["new_shape"] feed_dict = { "data": np.zeros(shape=(2, 0, 3, 0), dtype=np.float32), "new_shape": np.array(shape, dtype=np.int32) } outputs = runner.infer(feed_dict) # Empty tensor will still be empty after broadcast assert outputs["expanded"].shape == shape assert util.volume(outputs["expanded"].shape) == 0
def resize(self, shape): """ Resizes or reshapes the array to the specified shape. If the allocated memory region is already large enough, no reallocation is performed. Args: shape (Tuple[int]): The new shape. """ nbytes = util.volume(shape) * np.dtype(self.dtype).itemsize if nbytes > self.allocated_nbytes: self.free() self.allocate(nbytes) self.shape = shape
def test_volume(case): it, vol = case assert util.volume(it) == vol
def nbytes(self): """ The number of bytes in the memory region. """ return util.volume(self.shape) * np.dtype(self.dtype).itemsize
def arange(shape): return np.arange(util.volume(shape)).reshape(shape)