Exemplo n.º 1
0
def write_zarr(df, tag):
    path, key = get_file_name(tag, '')
    delete_item_s3(key + '/', zarr=True)
    ds = convert_df_to_dataset(df, 'site_code', 'datetime', 'streamflow', {
        'datetime': df.shape[0],
        'site_code': df.shape[1]
    })
    zarr_store = load_s3_zarr_store(get_file_name(tag, '')[0])
    ds.to_zarr(zarr_store)
def write_out_chunks(chunks_dfs, out_file, out_format):
    all_chunks_df = pd.concat(chunks_dfs, axis=1)

    # write the data out to the output file
    if out_format == 'zarr':
        zarr_store = load_s3_zarr_store(out_file)
        append_to_zarr(all_chunks_df, zarr_store)
    elif out_format == 'csv':
        append_to_csv_column_wise(all_chunks_df, out_file)
    else:
        raise ValueError("output_format should be 'csv' or 'zarr'")
Exemplo n.º 3
0
def load_zarr_discharge():
    my_bucket = f'{bucket_name}/15min_discharge'
    zarr_store = load_s3_zarr_store(my_bucket)
    ds = xr.open_zarr(zarr_store)
    return ds
Exemplo n.º 4
0
def read_zarr(tag):
    zarr_store = load_s3_zarr_store(get_file_name(tag, '')[0])
    ds = xr.open_zarr(zarr_store)
    q = ds['streamflow'].load()
    return q