def test_empty_tensor_to_host(self): buf = DeviceBuffer(shape=(5, 2, 0, 3, 0), dtype=np.float32) assert misc.volume(buf.shape) == 0 host_buf = np.empty(tuple(), dtype=np.float32) assert misc.volume(host_buf.shape) == 1 host_buf = buf.copy_to(host_buf) assert host_buf.shape == buf.shape assert host_buf.nbytes == 0 assert misc.volume(host_buf.shape) == 0
def copy_to(self, host_buffer, stream=None): """ Copies from this device buffer to the provided host buffer. Host buffer must be contiguous in memory (see np.ascontiguousarray). Args: host_buffer (numpy.ndarray): The host buffer to copy into. stream (Stream): A Stream instance (see util/cuda.py). Performs a synchronous copy if no stream is provided. Returns: numpy.ndarray: The host buffer, possibly reallocated if the provided buffer was too small. """ nbytes = misc.volume(self.shape) * np.dtype(self.dtype).itemsize self._check_dtype_matches(host_buffer) try: host_buffer.resize(self.shape, refcheck=False) except ValueError: host_buffer = np.empty(self.shape, dtype=np.dtype(self.dtype)) if nbytes: host_ptr = host_buffer.ctypes.data_as(ctypes.c_void_p) wrapper().dtoh(dst=host_ptr, src=self._ptr, nbytes=nbytes, stream=try_get_stream_handle(stream)) host_buffer = host_buffer.reshape(self.shape) return host_buffer
def check_empty_tensor_expand(runner, shapes): shape = shapes["new_shape"] feed_dict = { "data": np.array((0, )), "new_shape": np.array(shape, dtype=np.int32) } outputs = runner.infer(feed_dict) # Empty tensor will still be empty after broadcast assert outputs["expanded"].shape == shape assert misc.volume(outputs["expanded"].shape) == 0
def resize(self, shape): nbytes = misc.volume(shape) * np.dtype(self.dtype).itemsize if nbytes > self.allocated_nbytes: self.free() self.allocate(nbytes) self.shape = shape
def test_volume(case): it, vol = case assert misc.volume(it) == vol