예제 #1
0
def test_normalize_store_arg_v3(tmpdir):

    fn = tmpdir.join('store.zip')
    store = normalize_store_arg(str(fn), zarr_version=3, mode='w')
    assert isinstance(store, ZipStoreV3)
    assert 'zarr.json' in store

    # can't pass storage_options to non-fsspec store
    with pytest.raises(ValueError):
        normalize_store_arg(str(fn),
                            zarr_version=3,
                            mode='w',
                            storage_options={"some": "kwargs"})

    if have_fsspec:
        path = tempfile.mkdtemp()
        store = normalize_store_arg("file://" + path, zarr_version=3, mode='w')
        assert isinstance(store, FSStoreV3)
        assert 'zarr.json' in store

    fn = tmpdir.join('store.n5')
    with pytest.raises(NotImplementedError):
        normalize_store_arg(str(fn), zarr_version=3, mode='w')

    # error on zarr_version=3 with a v2 store
    with pytest.raises(ValueError):
        normalize_store_arg(KVStore(dict()), zarr_version=3, mode='w')

    # error on zarr_version=2 with a v3 store
    with pytest.raises(ValueError):
        normalize_store_arg(KVStoreV3(dict()), zarr_version=2, mode='w')
예제 #2
0
def save_array(store: StoreLike,
               arr,
               *,
               zarr_version=None,
               path=None,
               **kwargs):
    """Convenience function to save a NumPy array to the local file system, following a
    similar API to the NumPy save() function.

    Parameters
    ----------
    store : MutableMapping or string
        Store or path to directory in file system or name of zip file.
    arr : ndarray
        NumPy array with data to save.
    zarr_version : {2, 3, None}, optional
        The zarr protocol version to use when saving. The default value of None
        will attempt to infer the version from `store` if possible, otherwise
        it will fall back to 2.
    path : str or None, optional
        The path within the store where the array will be saved.
    kwargs
        Passed through to :func:`create`, e.g., compressor.

    Examples
    --------
    Save an array to a directory on the file system (uses a :class:`DirectoryStore`)::

        >>> import zarr
        >>> import numpy as np
        >>> arr = np.arange(10000)
        >>> zarr.save_array('data/example.zarr', arr)
        >>> zarr.load('data/example.zarr')
        array([   0,    1,    2, ..., 9997, 9998, 9999])

    Save an array to a single file (uses a :class:`ZipStore`)::

        >>> zarr.save_array('data/example.zip', arr)
        >>> zarr.load('data/example.zip')
        array([   0,    1,    2, ..., 9997, 9998, 9999])

    """
    may_need_closing = _might_close(store)
    _store: BaseStore = normalize_store_arg(store,
                                            mode="w",
                                            zarr_version=zarr_version)
    path = _check_and_update_path(_store, path)
    try:
        _create_array(arr,
                      store=_store,
                      overwrite=True,
                      zarr_version=zarr_version,
                      path=path,
                      **kwargs)
    finally:
        if may_need_closing:
            # needed to ensure zip file records are written
            _store.close()
예제 #3
0
def _normalize_store_arg(store,
                         *,
                         storage_options=None,
                         mode="r",
                         zarr_version=None):
    if zarr_version is None:
        zarr_version = getattr(store, '_store_version', DEFAULT_ZARR_VERSION)
    if store is None:
        return MemoryStore() if zarr_version == 2 else MemoryStoreV3()
    return normalize_store_arg(store,
                               storage_options=storage_options,
                               mode=mode,
                               zarr_version=zarr_version)
예제 #4
0
def load(store: StoreLike, zarr_version=None, path=None):
    """Load data from an array or group into memory.

    Parameters
    ----------
    store : MutableMapping or string
        Store or path to directory in file system or name of zip file.
    zarr_version : {2, 3, None}, optional
        The zarr protocol version to use when loading. The default value of
        None will attempt to infer the version from `store` if possible,
        otherwise it will fall back to 2.
    path : str or None, optional
        The path within the store from which to load.

    Returns
    -------
    out
        If the store contains an array, out will be a numpy array. If the store contains
        a group, out will be a dict-like object where keys are array names and values
        are numpy arrays.

    See Also
    --------
    save, savez

    Notes
    -----
    If loading data from a group of arrays, data will not be immediately loaded into
    memory. Rather, arrays will be loaded into memory as they are requested.

    """
    # handle polymorphic store arg
    _store = normalize_store_arg(store, zarr_version=zarr_version)
    path = _check_and_update_path(_store, path)
    if contains_array(_store, path=path):
        return Array(store=_store, path=path)[...]
    elif contains_group(_store, path=path):
        grp = Group(store=_store, path=path)
        return LazyLoader(grp)
예제 #5
0
def open(store: StoreLike = None,
         mode: str = "a",
         *,
         zarr_version=None,
         path=None,
         **kwargs):
    """Convenience function to open a group or array using file-mode-like semantics.

    Parameters
    ----------
    store : Store or string, optional
        Store or path to directory in file system or name of zip file.
    mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
        Persistence mode: 'r' means read only (must exist); 'r+' means
        read/write (must exist); 'a' means read/write (create if doesn't
        exist); 'w' means create (overwrite if exists); 'w-' means create
        (fail if exists).
    zarr_version : {2, 3, None}, optional
        The zarr protocol version to use. The default value of None will attempt
        to infer the version from `store` if possible, otherwise it will fall
        back to 2.
    path : str or None, optional
        The path within the store to open.
    **kwargs
        Additional parameters are passed through to :func:`zarr.creation.open_array` or
        :func:`zarr.hierarchy.open_group`.

    Returns
    -------
    z : :class:`zarr.core.Array` or :class:`zarr.hierarchy.Group`
        Array or group, depending on what exists in the given store.

    See Also
    --------
    zarr.creation.open_array, zarr.hierarchy.open_group

    Examples
    --------

    Storing data in a directory 'data/example.zarr' on the local file system::

        >>> import zarr
        >>> store = 'data/example.zarr'
        >>> zw = zarr.open(store, mode='w', shape=100, dtype='i4')  # open new array
        >>> zw
        <zarr.core.Array (100,) int32>
        >>> za = zarr.open(store, mode='a')  # open existing array for reading and writing
        >>> za
        <zarr.core.Array (100,) int32>
        >>> zr = zarr.open(store, mode='r')  # open existing array read-only
        >>> zr
        <zarr.core.Array (100,) int32 read-only>
        >>> gw = zarr.open(store, mode='w')  # open new group, overwriting previous data
        >>> gw
        <zarr.hierarchy.Group '/'>
        >>> ga = zarr.open(store, mode='a')  # open existing group for reading and writing
        >>> ga
        <zarr.hierarchy.Group '/'>
        >>> gr = zarr.open(store, mode='r')  # open existing group read-only
        >>> gr
        <zarr.hierarchy.Group '/' read-only>

    """

    # handle polymorphic store arg
    # we pass storage options explicitly, since normalize_store_arg might construct
    # a store if the input is a fsspec-compatible URL
    _store: BaseStore = normalize_store_arg(
        store,
        storage_options=kwargs.pop("storage_options", {}),
        mode=mode,
        zarr_version=zarr_version,
    )
    # path = _check_and_update_path(_store, path)
    path = normalize_storage_path(path)
    kwargs['path'] = path

    if mode in {'w', 'w-', 'x'}:
        if 'shape' in kwargs:
            return open_array(_store, mode=mode, **kwargs)
        else:
            return open_group(_store, mode=mode, **kwargs)

    elif mode == "a":
        if "shape" in kwargs or contains_array(_store, path):
            return open_array(_store, mode=mode, **kwargs)
        else:
            return open_group(_store, mode=mode, **kwargs)

    else:
        if contains_array(_store, path):
            return open_array(_store, mode=mode, **kwargs)
        elif contains_group(_store, path):
            return open_group(_store, mode=mode, **kwargs)
        else:
            raise PathNotFoundError(path)
예제 #6
0
def save_group(store: StoreLike,
               *args,
               zarr_version=None,
               path=None,
               **kwargs):
    """Convenience function to save several NumPy arrays to the local file system, following a
    similar API to the NumPy savez()/savez_compressed() functions.

    Parameters
    ----------
    store : MutableMapping or string
        Store or path to directory in file system or name of zip file.
    args : ndarray
        NumPy arrays with data to save.
    zarr_version : {2, 3, None}, optional
        The zarr protocol version to use when saving. The default value of None
        will attempt to infer the version from `store` if possible, otherwise
        it will fall back to 2.
    path : str or None, optional
        Path within the store where the group will be saved.
    kwargs
        NumPy arrays with data to save.

    Examples
    --------
    Save several arrays to a directory on the file system (uses a
    :class:`DirectoryStore`):

        >>> import zarr
        >>> import numpy as np
        >>> a1 = np.arange(10000)
        >>> a2 = np.arange(10000, 0, -1)
        >>> zarr.save_group('data/example.zarr', a1, a2)
        >>> loader = zarr.load('data/example.zarr')
        >>> loader
        <LazyLoader: arr_0, arr_1>
        >>> loader['arr_0']
        array([   0,    1,    2, ..., 9997, 9998, 9999])
        >>> loader['arr_1']
        array([10000,  9999,  9998, ...,     3,     2,     1])

    Save several arrays using named keyword arguments::

        >>> zarr.save_group('data/example.zarr', foo=a1, bar=a2)
        >>> loader = zarr.load('data/example.zarr')
        >>> loader
        <LazyLoader: bar, foo>
        >>> loader['foo']
        array([   0,    1,    2, ..., 9997, 9998, 9999])
        >>> loader['bar']
        array([10000,  9999,  9998, ...,     3,     2,     1])

    Store several arrays in a single zip file (uses a :class:`ZipStore`)::

        >>> zarr.save_group('data/example.zip', foo=a1, bar=a2)
        >>> loader = zarr.load('data/example.zip')
        >>> loader
        <LazyLoader: bar, foo>
        >>> loader['foo']
        array([   0,    1,    2, ..., 9997, 9998, 9999])
        >>> loader['bar']
        array([10000,  9999,  9998, ...,     3,     2,     1])

    Notes
    -----
    Default compression options will be used.

    """
    if len(args) == 0 and len(kwargs) == 0:
        raise ValueError('at least one array must be provided')
    # handle polymorphic store arg
    may_need_closing = _might_close(store)
    _store: BaseStore = normalize_store_arg(store,
                                            mode="w",
                                            zarr_version=zarr_version)
    path = _check_and_update_path(_store, path)
    try:
        grp = _create_group(_store,
                            path=path,
                            overwrite=True,
                            zarr_version=zarr_version)
        for i, arr in enumerate(args):
            k = 'arr_{}'.format(i)
            grp.create_dataset(k,
                               data=arr,
                               overwrite=True,
                               zarr_version=zarr_version)
        for k, arr in kwargs.items():
            grp.create_dataset(k,
                               data=arr,
                               overwrite=True,
                               zarr_version=zarr_version)
    finally:
        if may_need_closing:
            # needed to ensure zip file records are written
            _store.close()
예제 #7
0
def open_consolidated(store: StoreLike,
                      metadata_key=".zmetadata",
                      mode="r+",
                      **kwargs):
    """Open group using metadata previously consolidated into a single key.

    This is an optimised method for opening a Zarr group, where instead of
    traversing the group/array hierarchy by accessing the metadata keys at
    each level, a single key contains all of the metadata for everything.
    For remote data sources where the overhead of accessing a key is large
    compared to the time to read data.

    The group accessed must have already had its metadata consolidated into a
    single key using the function :func:`consolidate_metadata`.

    This optimised method only works in modes which do not change the
    metadata, although the data may still be written/updated.

    Parameters
    ----------
    store : MutableMapping or string
        Store or path to directory in file system or name of zip file.
    metadata_key : str
        Key to read the consolidated metadata from. The default (.zmetadata)
        corresponds to the default used by :func:`consolidate_metadata`.
    mode : {'r', 'r+'}, optional
        Persistence mode: 'r' means read only (must exist); 'r+' means
        read/write (must exist) although only writes to data are allowed,
        changes to metadata including creation of new arrays or group
        are not allowed.
    **kwargs
        Additional parameters are passed through to :func:`zarr.creation.open_array` or
        :func:`zarr.hierarchy.open_group`.

    Returns
    -------
    g : :class:`zarr.hierarchy.Group`
        Group instance, opened with the consolidated metadata.

    See Also
    --------
    consolidate_metadata

    """

    # normalize parameters
    zarr_version = kwargs.get('zarr_version', None)
    store = normalize_store_arg(store,
                                storage_options=kwargs.get("storage_options"),
                                mode=mode,
                                zarr_version=zarr_version)
    if mode not in {'r', 'r+'}:
        raise ValueError(
            "invalid mode, expected either 'r' or 'r+'; found {!r}".format(
                mode))

    path = kwargs.pop('path', None)
    if store._store_version == 2:
        ConsolidatedStoreClass = ConsolidatedMetadataStore
    else:
        ConsolidatedStoreClass = ConsolidatedMetadataStoreV3
        # default is to store within 'consolidated' group on v3
        if not metadata_key.startswith('meta/root/'):
            metadata_key = 'meta/root/consolidated/' + metadata_key
        if not path:
            raise ValueError(
                "path must be provided to open a Zarr 3.x consolidated store")

    # setup metadata store
    meta_store = ConsolidatedStoreClass(store, metadata_key=metadata_key)

    # pass through
    chunk_store = kwargs.pop('chunk_store', None) or store
    return open(store=meta_store,
                chunk_store=chunk_store,
                mode=mode,
                path=path,
                **kwargs)
예제 #8
0
def consolidate_metadata(store: BaseStore,
                         metadata_key=".zmetadata",
                         *,
                         path=''):
    """
    Consolidate all metadata for groups and arrays within the given store
    into a single resource and put it under the given key.

    This produces a single object in the backend store, containing all the
    metadata read from all the zarr-related keys that can be found. After
    metadata have been consolidated, use :func:`open_consolidated` to open
    the root group in optimised, read-only mode, using the consolidated
    metadata to reduce the number of read operations on the backend store.

    Note, that if the metadata in the store is changed after this
    consolidation, then the metadata read by :func:`open_consolidated`
    would be incorrect unless this function is called again.

    .. note:: This is an experimental feature.

    Parameters
    ----------
    store : MutableMapping or string
        Store or path to directory in file system or name of zip file.
    metadata_key : str
        Key to put the consolidated metadata under.
    path : str or None
        Path corresponding to the group that is being consolidated. Not required
        for zarr v2 stores.

    Returns
    -------
    g : :class:`zarr.hierarchy.Group`
        Group instance, opened with the new consolidated metadata.

    See Also
    --------
    open_consolidated

    """
    store = normalize_store_arg(store, mode="w")

    version = store._store_version

    if version == 2:

        def is_zarr_key(key):
            return (key.endswith('.zarray') or key.endswith('.zgroup')
                    or key.endswith('.zattrs'))

    else:

        sfx = _get_metadata_suffix(store)  # type: ignore

        def is_zarr_key(key):
            return (key.endswith('.array' + sfx)
                    or key.endswith('.group' + sfx) or key == 'zarr.json')

        # cannot create a group without a path in v3
        # so create /meta/root/consolidated group to store the metadata
        if 'consolidated' not in store:
            _create_group(store, path='consolidated')
        if not metadata_key.startswith('meta/root/'):
            metadata_key = 'meta/root/consolidated/' + metadata_key
        # path = 'consolidated'

    out = {
        'zarr_consolidated_format': 1,
        'metadata':
        {key: json_loads(store[key])
         for key in store if is_zarr_key(key)}
    }
    store[metadata_key] = json_dumps(out)
    return open_consolidated(store, metadata_key=metadata_key, path=path)
예제 #9
0
def open_array(store=None,
               mode="a",
               shape=None,
               chunks=True,
               dtype=None,
               compressor="default",
               fill_value=0,
               order="C",
               synchronizer=None,
               filters=None,
               cache_metadata=True,
               cache_attrs=True,
               path=None,
               object_codec=None,
               chunk_store=None,
               storage_options=None,
               partial_decompress=False,
               write_empty_chunks=True,
               *,
               zarr_version=None,
               dimension_separator=None,
               **kwargs):
    """Open an array using file-mode-like semantics.

    Parameters
    ----------
    store : MutableMapping or string, optional
        Store or path to directory in file system or name of zip file.
    mode : {'r', 'r+', 'a', 'w', 'w-'}, optional
        Persistence mode: 'r' means read only (must exist); 'r+' means
        read/write (must exist); 'a' means read/write (create if doesn't
        exist); 'w' means create (overwrite if exists); 'w-' means create
        (fail if exists).
    shape : int or tuple of ints, optional
        Array shape.
    chunks : int or tuple of ints, optional
        Chunk shape. If True, will be guessed from `shape` and `dtype`. If
        False, will be set to `shape`, i.e., single chunk for the whole array.
        If an int, the chunk size in each dimension will be given by the value
        of `chunks`. Default is True.
    dtype : string or dtype, optional
        NumPy dtype.
    compressor : Codec, optional
        Primary compressor.
    fill_value : object, optional
        Default value to use for uninitialized portions of the array.
    order : {'C', 'F'}, optional
        Memory layout to be used within each chunk.
    synchronizer : object, optional
        Array synchronizer.
    filters : sequence, optional
        Sequence of filters to use to encode chunk data prior to compression.
    cache_metadata : bool, optional
        If True, array configuration metadata will be cached for the
        lifetime of the object. If False, array metadata will be reloaded
        prior to all data access and modification operations (may incur
        overhead depending on storage and data access pattern).
    cache_attrs : bool, optional
        If True (default), user attributes will be cached for attribute read
        operations. If False, user attributes are reloaded from the store prior
        to all attribute read operations.
    path : string, optional
        Array path within store.
    object_codec : Codec, optional
        A codec to encode object arrays, only needed if dtype=object.
    chunk_store : MutableMapping or string, optional
        Store or path to directory in file system or name of zip file.
    storage_options : dict
        If using an fsspec URL to create the store, these will be passed to
        the backend implementation. Ignored otherwise.
    partial_decompress : bool, optional
        If True and while the chunk_store is a FSStore and the compression used
        is Blosc, when getting data from the array chunks will be partially
        read and decompressed when possible.
    write_empty_chunks : bool, optional
        If True (default), all chunks will be stored regardless of their
        contents. If False, each chunk is compared to the array's fill value
        prior to storing. If a chunk is uniformly equal to the fill value, then
        that chunk is not be stored, and the store entry for that chunk's key
        is deleted. This setting enables sparser storage, as only chunks with
        non-fill-value data are stored, at the expense of overhead associated
        with checking the data of each chunk.

        .. versionadded:: 2.11

    zarr_version : {None, 2, 3}, optional
        The zarr protocol version of the array to be opened. If None, it will
        be inferred from ``store`` or ``chunk_store`` if they are provided,
        otherwise defaulting to 2.
    dimension_separator : {None, '.', '/'}, optional
        Can be used to specify whether the array is in a flat ('.') or nested
        ('/') format. If None, the appropriate value will be read from `store`
        when present. Otherwise, defaults to '.' when ``zarr_version == 2``
        and `/` otherwise.

    Returns
    -------
    z : zarr.core.Array

    Examples
    --------
    >>> import numpy as np
    >>> import zarr
    >>> z1 = zarr.open_array('data/example.zarr', mode='w', shape=(10000, 10000),
    ...                      chunks=(1000, 1000), fill_value=0)
    >>> z1[:] = np.arange(100000000).reshape(10000, 10000)
    >>> z1
    <zarr.core.Array (10000, 10000) float64>
    >>> z2 = zarr.open_array('data/example.zarr', mode='r')
    >>> z2
    <zarr.core.Array (10000, 10000) float64 read-only>
    >>> np.all(z1[:] == z2[:])
    True

    Notes
    -----
    There is no need to close an array. Data are automatically flushed to the
    file system.

    """

    # use same mode semantics as h5py
    # r : read only, must exist
    # r+ : read/write, must exist
    # w : create, delete if exists
    # w- or x : create, fail if exists
    # a : read/write if exists, create otherwise (default)

    if zarr_version is None and store is None:
        zarr_version = getattr(chunk_store, '_store_version',
                               DEFAULT_ZARR_VERSION)

    # handle polymorphic store arg
    store = normalize_store_arg(store,
                                storage_options=storage_options,
                                mode=mode,
                                zarr_version=zarr_version)
    zarr_version = getattr(store, '_store_version', DEFAULT_ZARR_VERSION)
    if chunk_store is not None:
        chunk_store = normalize_store_arg(chunk_store,
                                          storage_options=storage_options,
                                          mode=mode,
                                          zarr_version=zarr_version)

    # respect the dimension separator specified in a store, if present
    if dimension_separator is None:
        if hasattr(store, '_dimension_separator'):
            dimension_separator = store._dimension_separator
        else:
            dimension_separator = '.' if zarr_version == 2 else '/'

    if zarr_version == 3 and path is None:
        path = 'array'  # TODO: raise ValueError instead?

    path = normalize_storage_path(path)

    # API compatibility with h5py
    compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs)

    # ensure fill_value of correct type
    if fill_value is not None:
        fill_value = np.array(fill_value, dtype=dtype)[()]

    # ensure store is initialized

    if mode in ['r', 'r+']:
        if not contains_array(store, path=path):
            if contains_group(store, path=path):
                raise ContainsGroupError(path)
            raise ArrayNotFoundError(path)

    elif mode == 'w':
        init_array(store,
                   shape=shape,
                   chunks=chunks,
                   dtype=dtype,
                   compressor=compressor,
                   fill_value=fill_value,
                   order=order,
                   filters=filters,
                   overwrite=True,
                   path=path,
                   object_codec=object_codec,
                   chunk_store=chunk_store,
                   dimension_separator=dimension_separator)

    elif mode == 'a':
        if not contains_array(store, path=path):
            if contains_group(store, path=path):
                raise ContainsGroupError(path)
            init_array(store,
                       shape=shape,
                       chunks=chunks,
                       dtype=dtype,
                       compressor=compressor,
                       fill_value=fill_value,
                       order=order,
                       filters=filters,
                       path=path,
                       object_codec=object_codec,
                       chunk_store=chunk_store,
                       dimension_separator=dimension_separator)

    elif mode in ['w-', 'x']:
        if contains_group(store, path=path):
            raise ContainsGroupError(path)
        elif contains_array(store, path=path):
            raise ContainsArrayError(path)
        else:
            init_array(store,
                       shape=shape,
                       chunks=chunks,
                       dtype=dtype,
                       compressor=compressor,
                       fill_value=fill_value,
                       order=order,
                       filters=filters,
                       path=path,
                       object_codec=object_codec,
                       chunk_store=chunk_store,
                       dimension_separator=dimension_separator)

    # determine read only status
    read_only = mode == 'r'

    # instantiate array
    z = Array(store,
              read_only=read_only,
              synchronizer=synchronizer,
              cache_metadata=cache_metadata,
              cache_attrs=cache_attrs,
              path=path,
              chunk_store=chunk_store,
              write_empty_chunks=write_empty_chunks)

    return z
예제 #10
0
def create(shape,
           chunks=True,
           dtype=None,
           compressor='default',
           fill_value=0,
           order='C',
           store=None,
           synchronizer=None,
           overwrite=False,
           path=None,
           chunk_store=None,
           filters=None,
           cache_metadata=True,
           cache_attrs=True,
           read_only=False,
           object_codec=None,
           dimension_separator=None,
           write_empty_chunks=True,
           *,
           zarr_version=None,
           **kwargs):
    """Create an array.

    Parameters
    ----------
    shape : int or tuple of ints
        Array shape.
    chunks : int or tuple of ints, optional
        Chunk shape. If True, will be guessed from `shape` and `dtype`. If
        False, will be set to `shape`, i.e., single chunk for the whole array.
        If an int, the chunk size in each dimension will be given by the value
        of `chunks`. Default is True.
    dtype : string or dtype, optional
        NumPy dtype.
    compressor : Codec, optional
        Primary compressor.
    fill_value : object
        Default value to use for uninitialized portions of the array.
    order : {'C', 'F'}, optional
        Memory layout to be used within each chunk.
    store : MutableMapping or string
        Store or path to directory in file system or name of zip file.
    synchronizer : object, optional
        Array synchronizer.
    overwrite : bool, optional
        If True, delete all pre-existing data in `store` at `path` before
        creating the array.
    path : string, optional
        Path under which array is stored.
    chunk_store : MutableMapping, optional
        Separate storage for chunks. If not provided, `store` will be used
        for storage of both chunks and metadata.
    filters : sequence of Codecs, optional
        Sequence of filters to use to encode chunk data prior to compression.
    cache_metadata : bool, optional
        If True, array configuration metadata will be cached for the
        lifetime of the object. If False, array metadata will be reloaded
        prior to all data access and modification operations (may incur
        overhead depending on storage and data access pattern).
    cache_attrs : bool, optional
        If True (default), user attributes will be cached for attribute read
        operations. If False, user attributes are reloaded from the store prior
        to all attribute read operations.
    read_only : bool, optional
        True if array should be protected against modification.
    object_codec : Codec, optional
        A codec to encode object arrays, only needed if dtype=object.
    dimension_separator : {'.', '/'}, optional
        Separator placed between the dimensions of a chunk.

        .. versionadded:: 2.8

    write_empty_chunks : bool, optional
        If True (default), all chunks will be stored regardless of their
        contents. If False, each chunk is compared to the array's fill value
        prior to storing. If a chunk is uniformly equal to the fill value, then
        that chunk is not be stored, and the store entry for that chunk's key
        is deleted. This setting enables sparser storage, as only chunks with
        non-fill-value data are stored, at the expense of overhead associated
        with checking the data of each chunk.

        .. versionadded:: 2.11

    zarr_version : {None, 2, 3}, optional
        The zarr protocol version of the created array. If None, it will be
        inferred from ``store`` or ``chunk_store`` if they are provided,
        otherwise defaulting to 2.

    Returns
    -------
    z : zarr.core.Array

    Examples
    --------

    Create an array with default settings::

        >>> import zarr
        >>> z = zarr.create((10000, 10000), chunks=(1000, 1000))
        >>> z
        <zarr.core.Array (10000, 10000) float64>

    Create an array with different some different configuration options::

        >>> from numcodecs import Blosc
        >>> compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.BITSHUFFLE)
        >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='i1', order='F',
        ...                 compressor=compressor)
        >>> z
        <zarr.core.Array (10000, 10000) int8>

    To create an array with object dtype requires a filter that can handle Python object
    encoding, e.g., `MsgPack` or `Pickle` from `numcodecs`::

        >>> from numcodecs import MsgPack
        >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype=object,
        ...                 object_codec=MsgPack())
        >>> z
        <zarr.core.Array (10000, 10000) object>

    Example with some filters, and also storing chunks separately from metadata::

        >>> from numcodecs import Quantize, Adler32
        >>> store, chunk_store = dict(), dict()
        >>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='f8',
        ...                 filters=[Quantize(digits=2, dtype='f8'), Adler32()],
        ...                 store=store, chunk_store=chunk_store)
        >>> z
        <zarr.core.Array (10000, 10000) float64>

    """
    if zarr_version is None and store is None:
        zarr_version = getattr(chunk_store, '_store_version',
                               DEFAULT_ZARR_VERSION)

    # handle polymorphic store arg
    store = normalize_store_arg(store, zarr_version=zarr_version)
    zarr_version = getattr(store, '_store_version', DEFAULT_ZARR_VERSION)

    # API compatibility with h5py
    compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs)

    # optional array metadata
    if dimension_separator is None:
        dimension_separator = getattr(store, "_dimension_separator", None)
    else:
        store_separator = getattr(store, "_dimension_separator", None)
        if store_separator not in (None, dimension_separator):
            raise ValueError(
                f"Specified dimension_separator: {dimension_separator}"
                f"conflicts with store's separator: "
                f"{store_separator}")
    dimension_separator = normalize_dimension_separator(dimension_separator)

    if zarr_version > 2 and path is None:
        raise ValueError("path must be supplied to initialize a zarr v3 array")

    # initialize array metadata
    init_array(store,
               shape=shape,
               chunks=chunks,
               dtype=dtype,
               compressor=compressor,
               fill_value=fill_value,
               order=order,
               overwrite=overwrite,
               path=path,
               chunk_store=chunk_store,
               filters=filters,
               object_codec=object_codec,
               dimension_separator=dimension_separator)

    # instantiate array
    z = Array(store,
              path=path,
              chunk_store=chunk_store,
              synchronizer=synchronizer,
              cache_metadata=cache_metadata,
              cache_attrs=cache_attrs,
              read_only=read_only,
              write_empty_chunks=write_empty_chunks)

    return z