def _fsstore_from_path(path: Path, mode: str = "a") -> FSStore: storage_options = {} if hasattr(path, "_kwargs"): upath = cast(UPath, path) storage_options = upath._kwargs.copy() storage_options.pop("_url", None) return FSStore(url=str(upath), mode=mode, **storage_options) return FSStore(url=str(path), mode=mode, **storage_options)
def nested_store(self, mode: str = None) -> FSStore: """ Not ideal. Stores should remain hidden TODO: could also check dimension_separator """ path = self.__path if mode is None: mode = self.__mode kwargs = { "key_separator": "/", # TODO: in 2.8 "dimension_separator" "normalize_keys": True, } mkdir = True if "r" in mode or path.startswith("http"): # Could be simplified on the fsspec side mkdir = False if mkdir: kwargs["auto_mkdir"] = True store = FSStore( path, mode=mode, **kwargs, ) # TODO: open issue for using Path LOGGER.debug(f"Created nested FSStore {path}(mode={mode}, {kwargs})") return store
def init_store(self, path: str, mode: str = "r") -> FSStore: """ Not ideal. Stores should remain hidden TODO: could also check dimension_separator """ kwargs = { "dimension_separator": "/", "normalize_keys": True, } mkdir = True if "r" in mode or path.startswith("http"): # Could be simplified on the fsspec side mkdir = False if mkdir: kwargs["auto_mkdir"] = True store = FSStore( path, mode=mode, **kwargs, ) # TODO: open issue for using Path LOGGER.debug(f"Created nested FSStore({path}, {mode}, {kwargs})") return store
def test_fsstore(dataset): """ Use FSStore to open the dataset fixture. Legacy nested datasets without the dimension_separator metadata are not expected to be openable. """ failure = "nested_legacy" in dataset verify(Array(store=FSStore(dataset)), failure)
def flat_store(self, mode: str = None) -> FSStore: path = self.__path if mode is None: mode = self.__mode store = FSStore(path, mode=mode) LOGGER.debug(f"Created legacy flat FSStore {path}(mode={mode})") return store
def _open_store(name: str) -> FSStore: """ Create an FSStore instance that supports nested storage of chunks. """ return FSStore( name, auto_mkdir=True, key_separator="/", normalize_keys=False, mode="w", )
def normalize_store_arg(store, clobber=False, storage_options=None, mode='w'): if store is None: return dict() elif isinstance(store, str): mode = mode if clobber else "r" if "://" in store or "::" in store: return FSStore(store, mode=mode, **(storage_options or {})) elif storage_options: raise ValueError("storage_options passed with non-fsspec path") if store.endswith('.zip'): return ZipStore(store, mode=mode) elif store.endswith('.n5'): return N5Store(store) else: return DirectoryStore(store) else: return store
def create_tile_directory(self, series, resolution, width, height): dimension_separator = '/' if not self.nested: dimension_separator = '.' self.zarr_store = FSStore(self.slide_directory, dimension_separator=dimension_separator, normalize_keys=True, auto_mkdir=True) self.zarr_group = zarr.group(store=self.zarr_store) self.zarr_group.attrs['bioformats2raw.layout'] = LAYOUT_VERSION # important to explicitly set the chunk size to 1 for non-XY dims # setting to None may cause all planes to be chunked together # ordering is TZCYX and hard-coded since Z and T are not present self.zarr_group.create_dataset( "%s/%s" % (str(series), str(resolution)), shape=(1, 1, 3, height, width), chunks=(1, 1, 1, self.tile_height, self.tile_width), dtype='B')
def create_store(): path = tempfile.mkdtemp() atexit.register(atexit_rmtree, path) store = FSStore(path, key_separator='/', auto_mkdir=True) return store, None
def create_store(): path = tempfile.mkdtemp() atexit.register(atexit_rmtree, path) store = FSStore(path) return store, None
#!/usr/bin/env python import zarr from zarr.storage import FSStore store = FSStore("http://localhost:8000") group = zarr.group(store=store) print(group.attrs["example"]) test = group.test print(test.attrs["image"]) print(test[:])
def test_consolidate_metadata(with_chunk_store, zarr_version, listable, monkeypatch, stores_from_path): # setup initial data if stores_from_path: store = tempfile.mkdtemp() atexit.register(atexit_rmtree, store) if with_chunk_store: chunk_store = tempfile.mkdtemp() atexit.register(atexit_rmtree, chunk_store) else: chunk_store = None version_kwarg = {'zarr_version': zarr_version} else: if zarr_version == 2: store = MemoryStore() chunk_store = MemoryStore() if with_chunk_store else None elif zarr_version == 3: store = MemoryStoreV3() chunk_store = MemoryStoreV3() if with_chunk_store else None version_kwarg = {} path = 'dataset' if zarr_version == 3 else None z = group(store, chunk_store=chunk_store, path=path, **version_kwarg) # Reload the actual store implementation in case str store_to_copy = z.store z.create_group('g1') g2 = z.create_group('g2') g2.attrs['hello'] = 'world' arr = g2.create_dataset('arr', shape=(20, 20), chunks=(5, 5), dtype='f8') assert 16 == arr.nchunks assert 0 == arr.nchunks_initialized arr.attrs['data'] = 1 arr[:] = 1.0 assert 16 == arr.nchunks_initialized if stores_from_path: # get the actual store class for use with consolidate_metadata store_class = z._store else: store_class = store if zarr_version == 3: # error on v3 if path not provided with pytest.raises(ValueError): consolidate_metadata(store_class, path=None) with pytest.raises(ValueError): consolidate_metadata(store_class, path='') # perform consolidation out = consolidate_metadata(store_class, path=path) assert isinstance(out, Group) assert ['g1', 'g2'] == list(out) if not stores_from_path: if zarr_version == 2: assert isinstance(out._store, ConsolidatedMetadataStore) assert '.zmetadata' in store meta_keys = ['.zgroup', 'g1/.zgroup', 'g2/.zgroup', 'g2/.zattrs', 'g2/arr/.zarray', 'g2/arr/.zattrs'] else: assert isinstance(out._store, ConsolidatedMetadataStoreV3) assert 'meta/root/consolidated/.zmetadata' in store meta_keys = ['zarr.json', meta_root + 'dataset.group.json', meta_root + 'dataset/g1.group.json', meta_root + 'dataset/g2.group.json', meta_root + 'dataset/g2/arr.array.json', 'meta/root/consolidated.group.json'] for key in meta_keys: del store[key] # https://github.com/zarr-developers/zarr-python/issues/993 # Make sure we can still open consolidated on an unlistable store: if not listable: fs_memory = pytest.importorskip("fsspec.implementations.memory") monkeypatch.setattr(fs_memory.MemoryFileSystem, "isdir", lambda x, y: False) monkeypatch.delattr(fs_memory.MemoryFileSystem, "ls") fs = fs_memory.MemoryFileSystem() if zarr_version == 2: store_to_open = FSStore("", fs=fs) else: store_to_open = FSStoreV3("", fs=fs) # copy original store to new unlistable store store_to_open.update(store_to_copy) else: store_to_open = store # open consolidated z2 = open_consolidated(store_to_open, chunk_store=chunk_store, path=path, **version_kwarg) assert ['g1', 'g2'] == list(z2) assert 'world' == z2.g2.attrs['hello'] assert 1 == z2.g2.arr.attrs['data'] assert (z2.g2.arr[:] == 1.0).all() assert 16 == z2.g2.arr.nchunks if listable: assert 16 == z2.g2.arr.nchunks_initialized else: with pytest.raises(NotImplementedError): _ = z2.g2.arr.nchunks_initialized if stores_from_path: # path string is note a BaseStore subclass so cannot be used to # initialize a ConsolidatedMetadataStore. if zarr_version == 2: with pytest.raises(ValueError): cmd = ConsolidatedMetadataStore(store) elif zarr_version == 3: with pytest.raises(ValueError): cmd = ConsolidatedMetadataStoreV3(store) else: # tests del/write on the store if zarr_version == 2: cmd = ConsolidatedMetadataStore(store) with pytest.raises(PermissionError): del cmd['.zgroup'] with pytest.raises(PermissionError): cmd['.zgroup'] = None else: cmd = ConsolidatedMetadataStoreV3(store) with pytest.raises(PermissionError): del cmd[meta_root + 'dataset.group.json'] with pytest.raises(PermissionError): cmd[meta_root + 'dataset.group.json'] = None # test getsize on the store assert isinstance(getsize(cmd), Integral) # test new metadata are not writeable with pytest.raises(PermissionError): z2.create_group('g3') with pytest.raises(PermissionError): z2.create_dataset('spam', shape=42, chunks=7, dtype='i4') with pytest.raises(PermissionError): del z2['g2'] # test consolidated metadata are not writeable with pytest.raises(PermissionError): z2.g2.attrs['hello'] = 'universe' with pytest.raises(PermissionError): z2.g2.arr.attrs['foo'] = 'bar' # test the data are writeable z2.g2.arr[:] = 2 assert (z2.g2.arr[:] == 2).all() # test invalid modes with pytest.raises(ValueError): open_consolidated(store, chunk_store=chunk_store, mode='a', path=path) with pytest.raises(ValueError): open_consolidated(store, chunk_store=chunk_store, mode='w', path=path) with pytest.raises(ValueError): open_consolidated(store, chunk_store=chunk_store, mode='w-', path=path) # make sure keyword arguments are passed through without error open_consolidated( store, chunk_store=chunk_store, path=path, cache_attrs=True, synchronizer=None, **version_kwarg, )
def init_store(self, path: str, mode: str = "r") -> FSStore: store = FSStore(path, mode=mode, dimension_separator=".") LOGGER.debug(f"Created legacy flat FSStore({path}, {mode})") return store