Beispiel #1
0
def cleanup_cube_bag(cube, store, blocksize=100):
    """
    Remove unused keys from cube datasets.

    .. important::
        All untracked keys which start with the cube's `uuid_prefix` followed by the `KTK_CUBE_UUID_SEPERATOR`
        (e.g. `my_cube_uuid++seed...`) will be deleted by this routine. These keys may be leftovers from past
        overwrites or index updates.

    Parameters
    ----------
    cube: Cube
        Cube specification.
    store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
        KV store.
    blocksize: int
        Number of keys to delete at once.

    Returns
    -------
    bag: dask.bag.Bag
        A dask bag that performs the given operation. May contain multiple partitions.
    """
    check_store_factory(store)
    check_blocksize(blocksize)

    store_obj = store()

    datasets = discover_datasets_unchecked(uuid_prefix=cube.uuid_prefix,
                                           store=store)
    keys = get_keys_to_clean(cube.uuid_prefix, datasets, store_obj)

    return db.from_sequence(
        seq=sorted(keys), partition_size=blocksize).map_partitions(_delete,
                                                                   store=store)
Beispiel #2
0
def copy_cube_bag(
    cube,
    src_store: StoreFactory,
    tgt_store: StoreFactory,
    blocksize: int = 100,
    overwrite: bool = False,
    datasets: Optional[Union[Iterable[str], Dict[str, DatasetMetadata]]] = None,
):
    """
    Copy cube from one store to another.

    Parameters
    ----------
    cube
        Cube specification.
    src_store
        Source KV store.
    tgt_store
        Target KV store.
    overwrite
        If possibly existing datasets in the target store should be overwritten.
    blocksize
        Number of keys to copy at once.
    datasets
        Datasets to copy, must all be part of the cube. May be either the result of :func:`~kartothek.api.discover.discover_datasets`, a list
        of Ktk_cube dataset ID or ``None`` (in which case entire cube will be copied).

    Returns
    -------
    bag: dask.bag.Bag
        A dask bag that performs the given operation. May contain multiple partitions.
    """
    check_store_factory(src_store)
    check_store_factory(tgt_store)
    check_blocksize(blocksize)
    assert_stores_different(
        src_store, tgt_store, cube.ktk_dataset_uuid(cube.seed_dataset)
    )

    keys = get_copy_keys(
        cube=cube,
        src_store=src_store,
        tgt_store=tgt_store,
        overwrite=overwrite,
        datasets=datasets,
    )

    return db.from_sequence(seq=sorted(keys), partition_size=blocksize).map_partitions(
        copy_keys, src_store=src_store, tgt_store=tgt_store
    )
Beispiel #3
0
def collect_stats_bag(
    cube: Cube,
    store: StoreFactory,
    datasets: Optional[Union[Iterable[str], Dict[str, DatasetMetadata]]] = None,
    blocksize: int = 100,
):
    """
    Collect statistics for given cube.

    Parameters
    ----------
    cube
        Cube specification.
    store
        KV store that preserves the cube.
    datasets
        Datasets to query, must all be part of the cube. May be either the result of :func:`~kartothek.api.discover.discover_datasets`, a list
        of Ktk_cube dataset ID or ``None`` (in which case auto-discovery will be used).
    blocksize
        Number of partitions to scan at once.

    Returns
    -------
    bag: dask.bag.Bag
        A dask bag that returns a single result of the form ``Dict[str, Dict[str, int]]`` and contains statistics per
        ktk_cube dataset ID.
    """
    check_store_factory(store)
    check_blocksize(blocksize)

    if not isinstance(datasets, dict):
        datasets = discover_datasets_unchecked(
            uuid_prefix=cube.uuid_prefix,
            store=store,
            filter_ktk_cube_dataset_ids=datasets,
        )

    all_metapartitions = get_metapartitions_for_stats(datasets)

    return (
        db.from_sequence(seq=all_metapartitions, partition_size=blocksize)
        .map_partitions(collect_stats_block, store=store)
        .reduction(
            perpartition=_obj_to_list,
            aggregate=_reduce_stats,
            split_every=False,
            out_type=db.Bag,
        )
    )
Beispiel #4
0
def delete_cube_bag(
    cube: Cube,
    store: StoreFactory,
    blocksize: int = 100,
    datasets: Optional[Union[Iterable[str], Dict[str, DatasetMetadata]]] = None,
):
    """
    Delete cube from store.

    .. important::
        This routine only deletes tracked files. Garbage and leftovers from old cubes and failed operations are NOT
        removed.

    Parameters
    ----------
    cube
        Cube specification.
    store
        KV store.
    blocksize
        Number of keys to delete at once.
    datasets
        Datasets to delete, must all be part of the cube. May be either the result of :func:`~kartothek.api.discover.discover_datasets`, a list
        of Ktk_cube dataset ID or ``None`` (in which case entire cube will be deleted).

    Returns
    -------
    bag: dask.bag.Bag
        A dask bag that performs the given operation. May contain multiple partitions.
    """
    check_store_factory(store)
    check_blocksize(blocksize)

    if not isinstance(datasets, dict):
        datasets = discover_datasets_unchecked(
            uuid_prefix=cube.uuid_prefix,
            store=store,
            filter_ktk_cube_dataset_ids=datasets,
        )

    keys = set()
    for ktk_cube_dataset_id in sorted(datasets.keys()):
        ds = datasets[ktk_cube_dataset_id]
        keys |= get_dataset_keys(ds)

    return db.from_sequence(seq=sorted(keys), partition_size=blocksize).map_partitions(
        _delete, store=store
    )
Beispiel #5
0
def query_cube_bag_internal(
    cube,
    store,
    conditions,
    datasets,
    dimension_columns,
    partition_by,
    payload_columns,
    blocksize,
):
    """
    Query cube.

    For detailed documentation, see :func:`~kartothek.io.eager_cube.query_cube`.

    Parameters
    ----------
    cube: Cube
        Cube specification.
    store: simplekv.KeyValueStore
        KV store that preserves the cube.
    conditions: Union[None, Condition, Iterable[Condition], Conjunction]
        Conditions that should be applied, optional.
    datasets: Union[None, Iterable[str], Dict[str, kartothek.core.dataset.DatasetMetadata]]
        Datasets to query, must all be part of the cube. May be either the result of :func:`~kartothek.api.discover.discover_datasets`, a list
        of Ktk_cube dataset ID or ``None`` (in which case auto-discovery will be used).
    dimension_columns: Union[None, str, Iterable[str]]
        Dimension columns of the query, may result in projection. If not provided, dimension columns from cube
        specification will be used.
    partition_by: Union[None, str, Iterable[str]]
        By which column logical partitions should be formed. If not provided, a single partition will be generated.
    payload_columns: Union[None, str, Iterable[str]]
        Which columns apart from ``dimension_columns`` and ``partition_by`` should be returned.
    blocksize: int
        Partition size of the bag.

    Returns
    -------
    empty: pandas.DataFrame
        Empty DataFrame with correct dtypes and column order.
    bag: dask.bag.Bag
        Bag of 1-sized partitions of non-empty DataFrames, order by ``partition_by``. Column of DataFrames is
        alphabetically ordered. Data types are provided on best effort (they are restored based on the preserved data,
        but may be different due to Pandas NULL-handling, e.g. integer columns may be floats).
    """
    check_store_factory(store)
    check_blocksize(blocksize)

    intention, empty, groups = plan_query(
        cube=cube,
        store=store,
        conditions=conditions,
        datasets=datasets,
        dimension_columns=dimension_columns,
        partition_by=partition_by,
        payload_columns=payload_columns,
    )

    b = (db.from_sequence(seq=groups, partition_size=blocksize).map(
        load_group, store=store, cube=cube).filter(_not_empty))

    if not intention.partition_by:
        b = (b.reduction(
            perpartition=list,
            aggregate=_collect_dfs,
            split_every=False,
            out_type=db.Bag,
        ).map(
            _quick_concat_or_none,
            dimension_columns=intention.dimension_columns,
            partition_columns=cube.partition_columns,
        ).filter(_not_none))
    return empty, b