Пример #1
0
def test_dropped_dimension():
    test_size = (5, 15, 20)
    blocks = Blocks(test_size,
                    shape_block=(3, 7),
                    padding=(1, 2),
                    crop=((1, 1), (0, 0), (0, 0)))
    np.testing.assert_equal(blocks.drop_dim(1).shape_full, (5, 20))
Пример #2
0
def dff(dataset: SplitDataset,
        baseline_stack,
        output_dir=None,
        n_jobs=20,
        **kwargs):
    """Calculates change over baseline
    :param dataset:
    :param baseline_stack: F stack for the (F_i - F) / F calculation
    :param output_dir:
    :param n_jobs:
    :return:
    """
    old_dataset = Blocks(shape_full=dataset.shape,
                         shape_block=dataset.shape_block)

    new_dataset = EmptySplitDataset(
        root=output_dir or dataset.root.parent,
        name="dff",
        shape_full=dataset.shape,
        shape_block=dataset.shape_block,
    )

    Parallel(n_jobs=n_jobs)(delayed(_dff)(
        dataset,
        old_block,
        str(new_dataset.root / new_dataset.files[i_block]),
        baseline_stack,
        **kwargs,
    ) for i_block, (_,
                    old_block) in enumerate(old_dataset.slices(
                        as_tuples=True)))

    return new_dataset.finalize()
Пример #3
0
def test_cartesian_blocks():
    test_size = (20, 20)
    a = np.ones(test_size)
    blocks = Blocks(test_size, shape_block=(3, 7), padding=(1, 2))
    for idx, block in blocks.slices():
        a[block] = 0
    np.testing.assert_array_equal(a, np.zeros(test_size))
Пример #4
0
def downsample(
    dataset: SplitDataset,
    downsampling=(1, 1, 2, 2),
    proc_block_shape=None,
    crop=None,
    output_dir=None,
    n_jobs=20,
    method=np.sum,
):
    """Downsamples a dataset

    :param dataset:
    :param downsampling: tuple of 4 (original, original, ds_factor, ds_factor)
    :param crop:
    :param output_dir:
    :param n_jobs:
    :param method:
    :return:
    """
    crop = crop or tuple((0, 0) for _ in dataset.shape)
    old_dataset = Blocks(
        shape_full=dataset.shape,
        crop=crop,
        shape_block=proc_block_shape
        if proc_block_shape else dataset.shape_block,
    )

    shape_downsampled = tuple(
        sc // ds for sc, ds in zip(old_dataset.shape_cropped, downsampling))
    block_size_downsampled = tuple(
        sb // ds for (sb, ds) in zip(old_dataset.shape_block, downsampling))

    new_dataset = EmptySplitDataset(
        root=output_dir or dataset.root.parent,
        name="downsampled",
        shape_full=shape_downsampled,
        shape_block=block_size_downsampled,
    )

    Parallel(n_jobs=n_jobs)(delayed(_downsample_block)(
        dataset,
        old_block,
        str(new_dataset.root / new_dataset.files[i_block]),
        downsampling,
        method,
    ) for i_block, (_,
                    old_block) in enumerate(old_dataset.slices(
                        as_tuples=True)))

    return new_dataset.finalize()
Пример #5
0
def test_merging_rois():
    test_0 = np.array([
        [-1, -1, -1, -1, -1],
        [-1, 0, 0, 0, -1],
        [0, 0, 0, -1, 1],
        [-1, -1, 2, 2, 2],
        [-1, -1, 2, -1, -1],
    ])[None, :, :].astype(np.int32)
    test_1 = np.array([
        [-1, -1, -1, -1, -1],
        [-1, 0, 0, 0, -1],
        [0, 0, 0, 1, 1],
        [-1, -1, 2, 2, 2],
        [-1, 2, 2, 0, 0],
    ])[None, :, :].astype(np.int32)
    bl = Blocks((1, 5, 8), (1, 3, 3), padding=(0, 2, 2))
    block_arrays = np.array([[[test_0, test_1]]])
    labels = merge_rois(bl, block_arrays)
    np.testing.assert_array_equal(
        labels,
        np.array([[
            [-1, -1, -1, -1, -1, -1, -1, -1],
            [-1, 0, 0, 0, 2, 2, 2, -1],
            [0, 0, 0, 2, 2, 2, 3, 3],
            [-1, -1, 1, 1, 1, 4, 4, 4],
            [-1, -1, 1, -1, 4, 4, -1, -1],
        ]]),
    )
Пример #6
0
def _dff(
    dataset,
    block,
    dest_filename,
    baseline_stack,
    multiplier=128,
    output_type=np.int16,
    subtract=0,
):
    stack = dataset[Blocks.block_to_slices(block)]
    baseline_sel = baseline_stack[Blocks.block_to_slices(
        block)[1:]]  # crop the corresponding slice of the baseline
    dffi = (multiplier * (stack - baseline_sel) /
            np.maximum(baseline_sel - subtract, 1)).astype(output_type)
    fl.save(dest_filename, dict(stack_4D=dffi), compression="blosc")
    return None
Пример #7
0
def _corr_map_plane(dataset, block, out_file, time_lims, window_size):
    if time_lims is None:
        time_slice = slice(None)
    else:
        time_slice = slice(*time_lims)
    vid = dataset[(time_slice, ) + Blocks.block_to_slices(block)]
    cmap = ca.correlation_map(vid, window_size)
    fl.save(out_file, dict(stack_3D=cmap))
Пример #8
0
def _time_percentile(dataset,
                     block,
                     out_file,
                     method="mean",
                     percentile=50,
                     time_slice=None):
    if time_slice is None:
        time_slice = slice(None)
    else:
        time_slice = slice(*time_slice)

    vid = dataset[(time_slice, ) + Blocks.block_to_slices(block)]
    if method == "percentile":
        fl.save(out_file, dict(stack_3D=np.percentile(vid, percentile, 0)))
    elif method == "mean":
        fl.save(out_file, dict(stack_3D=np.mean(vid, 0)))
    else:
        raise AssertionError(f"Invalid method {method}")
Пример #9
0
def _align_and_shift(
    dataset,
    block,
    ref,
    out_file,
    shift_plane,
    prefilter_sigma,
    upsample_factor,
    max_shift,
):
    stack = dataset[Blocks.block_to_slices(block)]
    shifted, shifts = align_single_planes_sobel(
        stack,
        np.fft.fftn(ref),
        prefilter_sigma=prefilter_sigma,
        upsample_factor=upsample_factor,
        maxshift=max_shift,
        offset=-shift_plane,
    )
    fl.save(out_file,
            dict(stack_4D=shifted, shifts=shifts),
            compression="blosc")

    print("Saved {}...".format(out_file))
Пример #10
0
def _apply_shifts(dataset, block, out_file, shifts, shift_times):
    vid = dataset[Blocks.block_to_slices(block)]
    aligned = shift_stack(vid, range(block[0][0], block[0][1]), shifts,
                          shift_times)
    print(out_file)
    fl.save(out_file, dict(stack_4D=aligned, shifts=shifts))
Пример #11
0
def _extract_traces_coords(dataset, block, out_file, coords, **kwargs):
    vid = dataset[Blocks.block_to_slices(block)]
    traces = extract_traces_around_points(vid, coords, **kwargs)
    fl.save(out_file, traces)
Пример #12
0
def _extract_rois_block(dataset, block, out_file, rois):
    vid = dataset[Blocks.block_to_slices(block)]
    traces = ca.extract_traces(vid, rois)
    fl.save(out_file, traces)
Пример #13
0
def _grow_block_rois(dataset, cmap_dataset, block, out_file, time_lims,
                     **kwargs):
    vid = dataset[(slice(*time_lims), ) + Blocks.block_to_slices(block)]
    cmap = cmap_dataset[Blocks.block_to_slices(block)]
    rois = ca.grow_rois(vid, cmap, **kwargs)
    fl.save(out_file, dict(stack_3D=rois))
Пример #14
0
def _downsample_block(dataset, old_block, filename, factor, method):
    original = dataset[Blocks.block_to_slices(old_block)]
    downsampled = block_reduce(original, factor, method)
    ndims = len(downsampled.shape)
    fl.save(filename, {f"stack_{ndims}D": downsampled})
Пример #15
0
 def wrap_function(ds, *args, filename, new_block, **kwargs):
     original = ds[Blocks.block_to_slices(new_block)]
     processed = function(original, *args, **kwargs)
     fl.save(filename, {"stack_{}D".format(processed.ndim): processed})
Пример #16
0
def run_in_blocks(function,
                  dataset: SplitDataset,
                  *extra_args,
                  per_block_args=None,
                  output_dir=None,
                  output_shape_full=None,
                  output_shape_block=None,
                  process_shape_block=None,
                  n_jobs=20,
                  output_name=None,
                  **kwargs):
    """

    Runs a function over a split dataset in parallel

    :param function: the function to be applied (e.g. delta f over f or regression)
    :param dataset: the split dataset
    :param extra_args: the other positional arguments to the function
    :param per_block_args: a dictionary or list of extra arguments
    :param output_dir: (optional) the output directory
    :param output_shape_full: the output shape, if it will be different\
    :param process_shape_block: the size of block to process
    :param output_shape_block: the output block size, if different
    :param n_jobs: number of jobs to parallelize to
    :param output_name: the name of the output dataset, the function name is used
        if left blank
    :param kwargs: extra keyword arguments to the function
    :return: the processed dataset
    """

    # TODO avoid duplication of execution on first block
    # TODO figure out output_shape_full
    process_shape_block = process_shape_block or dataset.shape_block

    # Automatically determine the output shape
    processing_blocks = Blocks(shape_full=dataset.shape_full,
                               shape_block=process_shape_block)

    _, new_block = list(processing_blocks.slices(as_tuples=True))[0]

    if output_shape_block is None:
        processed = function(
            dataset[Blocks.block_to_slices(new_block)], *extra_args,
            *([] if per_block_args is None else per_block_args[0]), **kwargs)
        output_shape_block = processed.shape

    new_dataset = EmptySplitDataset(
        root=output_dir or dataset.root.parent,
        name=output_name or function.__name__,
        shape_full=output_shape_full or dataset.shape,
        shape_block=output_shape_block or process_shape_block,
        resolution=dataset.resolution,
    )

    def wrap_function(ds, *args, filename, new_block, **kwargs):
        original = ds[Blocks.block_to_slices(new_block)]
        processed = function(original, *args, **kwargs)
        fl.save(filename, {"stack_{}D".format(processed.ndim): processed})

    Parallel(n_jobs=n_jobs)(delayed(wrap_function)(
        dataset,
        *extra_args,
        *([] if per_block_args is None else per_block_args[i_block]),
        new_block=new_block,
        filename=str(new_dataset.root / new_dataset.files[i_block]),
        **kwargs) for i_block, ((
            _,
            new_block)) in enumerate(processing_blocks.slices(as_tuples=True)))
    return new_dataset.finalize()
Пример #17
0
def _average_block(dataset, block, start, trial_duration, n_trials, out_file):
    vid = dataset[(slice(start, start + trial_duration * n_trials), ) +
                  Blocks.block_to_slices(block)[1:]]
    vid_trials = np.reshape(vid, (n_trials, trial_duration) + vid.shape[1:])

    fl.save(out_file, dict(stack_4D=np.sum(vid_trials.astype(np.uint32), 0)))