def test_dynamic_tensor_shapes(): t = DynamicTensor( create_store("./data/test/test_dynamic_tensor_5"), mode="w", shape=(5, None, None), max_shape=(5, 100, 100), dtype="int32", ) t[0] = np.ones((5, 10), dtype="int32") t[0, 6] = 2 * np.ones((20, ), dtype="int32") assert t[0, -1].tolist() == [2] * 20 t.close()
def test_dynamic_tensor_2(): t = DynamicTensor( create_store("./data/test/test_dynamic_tensor_2"), mode="w", shape=(5, None, None), max_shape=(5, 100, 100), dtype="int32", ) t[0] = np.ones((10, 10), dtype="int32") assert t[0, 5].tolist() == [1] * 10 assert t[0, 5, :].tolist() == [1] * 10 t[0, 6] = 2 * np.ones((20, ), dtype="int32") assert t[0, 5, :].tolist() == [1] * 10 + [0] * 10 assert t.get_shape(0).tolist() == [10, 20] assert t.get_shape(slice(0, 1)).tolist() == [1, 10, 20]
def _generate_storage_tensors(self): for t in self._flat_tensors: t_dtype, t_path = t path = posixpath.join(self._path, t_path[1:]) self._fs.makedirs(posixpath.join(path, "--dynamic--")) yield t_path, DynamicTensor( fs_map=MetaStorage( t_path, get_storage_map( self._fs, path, self._cache, self.lock_cache, storage_cache=self._storage_cache, ), self._fs_map, self, ), mode=self._mode, shape=self._shape + t_dtype.shape, max_shape=self._shape + t_dtype.max_shape, dtype=_get_dynamic_tensor_dtype(t_dtype), chunks=t_dtype.chunks, compressor=_get_compressor(t_dtype.compressor), )
def test_dynamic_tensor_shape_none(): try: DynamicTensor( create_store("./data/test/test_dynamic_tensor_shape_none"), mode="w", dtype="int32", ) except TypeError as ex: assert "shape cannot be none" in str(ex)
def test_dynamic_tensor(): t = DynamicTensor( create_store("./data/test/test_dynamic_tensor"), mode="w", shape=(5, 100, 100), max_shape=(5, 100, 100), dtype="int32", ) t[0, 80:, 80:] = np.ones((20, 20), dtype="int32") assert t[0, -5, 90:].tolist() == [1] * 10
def test_dynamic_tensor_4(): t = DynamicTensor( create_store("./data/test/test_dynamic_tensor_6"), mode="w", shape=(5, None, None, None), max_shape=(5, 100, 100, 10), dtype="int32", ) t[0, 6:8] = np.ones((2, 20, 10), dtype="int32") assert (t[0, 6:8] == np.ones((2, 20, 10), dtype="int32")).all()
def test_dynamic_tensor_3(): t = DynamicTensor( create_store("./data/test/test_dynamic_tensor_3"), mode="w", shape=(5, None, None, None), max_shape=(5, 100, 100, 100), dtype="int32", ) t[0, 5] = np.ones((20, 30), dtype="int32") t[0, 6:8, 5:9] = 5 * np.ones((2, 4, 30), dtype="int32") assert t[0, 5, 7].tolist() == [1] * 30 assert t[0, 7, 8].tolist() == [5] * 30
def _open_storage_tensors(self): for t in self._flat_tensors: t_dtype, t_path = t path = posixpath.join(self._path, t_path[1:]) yield t_path, DynamicTensor( fs_map=MetaStorage( t_path, get_storage_map( self._fs, path, self._cache, self.lock_cache, storage_cache=self._storage_cache, ), self._fs_map, ), mode=self._mode, # FIXME We don't need argument below here shape=self._shape + t_dtype.shape, )
def test_read_and_append_modes(): t = DynamicTensor( create_store("./data/test/test_read_and_append_modes"), mode="a", shape=(5, 100, 100), max_shape=(5, 100, 100), dtype="int32", ) t[0, 80:, 80:] = np.ones((20, 20), dtype="int32") assert t[0, -5, 90:].tolist() == [1] * 10 t.flush() t.close() t = DynamicTensor( create_store("./data/test/test_read_and_append_modes", overwrite=False), mode="r", ) t.get_shape(0) == (100, 100) assert t[0, -5, 90:].tolist() == [1] * 10 t.close()