Пример #1
0
 def save_data(self):
     if self.base_dir is not None:
         if self.dc is not None:
             # save the stimulus movie if it is generated
             movie, movie_times = self.window_display.widget_display.get_movie()
             if movie is not None:
                 if self.stim_movie_format == "h5":
                     movie_dict = dict(
                         movie=np.stack(movie, 0), movie_times=movie_times
                     )
                     fl.save(
                         self.filename_base() + "stim_movie.h5",
                         movie_dict,
                         compression="blosc",
                     )
                 elif self.stim_movie_format == "mp4":
                     imageio.mimwrite(
                         self.filename_base() + "stim_movie.mp4",
                         movie,
                         fps=30,
                         quality=None,
                         ffmpeg_params=[
                             "-pix_fmt",
                             "yuv420p",
                             "-profile:v",
                             "baseline",
                             "-level",
                             "3",
                         ],
                     )
                 else:
                     raise Exception(
                         "Tried to write the stimulus video into an unsupported format"
                     )
     super().save_data()
Пример #2
0
def extract_traces(dataset: SplitDataset,
                   rois,
                   output_dir=None,
                   block_duration=40):
    new_dataset = EmptySplitDataset(
        shape_full=dataset.shape,
        shape_block=(block_duration, ) + dataset.shape[1:],
        root=output_dir or dataset.root.parent,
        name="traces",
    )
    Parallel(n_jobs=20)(delayed(_extract_rois_block)(
        dataset,
        new_block,
        str(new_dataset.root / new_dataset.files[i_block]),
        rois=rois,
    ) for i_block, (_,
                    new_block) in enumerate(new_dataset.slices(
                        as_tuples=True)))

    trace_dset = new_dataset.finalize()
    traces = np.concatenate(
        [fl.load(str(f), "/traces") for f in trace_dset.files.flatten()], 1)
    first_file = trace_dset.files.flatten()[0]
    coords = fl.load(str(first_file), "/coords")
    areas = fl.load(str(first_file), "/areas")
    trace_data = dict(traces=traces, coords=coords, areas=areas)
    fl.save(str(trace_dset.root.parent / "traces.h5"), trace_data)

    return trace_data
Пример #3
0
def extract_traces_coords(dataset: SplitDataset,
                          coords,
                          output_dir=None,
                          block_duration=60,
                          n_jobs=5,
                          **kwargs):
    new_dataset = EmptySplitDataset(
        shape_full=dataset.shape,
        shape_block=(block_duration, ) + dataset.shape[1:],
        root=output_dir or dataset.root.parent,
        name="traces",
    )
    Parallel(n_jobs=n_jobs)(
        delayed(_extract_traces_coords)(dataset,
                                        new_block,
                                        str(new_dataset.root /
                                            new_dataset.files[i_block]),
                                        coords=coords,
                                        **kwargs)
        for i_block, (
            _, new_block) in enumerate(new_dataset.slices(as_tuples=True)))

    trace_dset = new_dataset.finalize()
    traces = np.concatenate(
        [fl.load(str(f), "/traces") for f in trace_dset.files.flatten()], 1)
    trace_data = dict(traces=traces, coords=coords)
    fl.save(str(trace_dset.root.parent / "traces.h5"), trace_data)

    return trace_data
Пример #4
0
def save_to_split_dataset(
    data,
    root_name,
    block_size=None,
    crop=None,
    padding=0,
    prefix="",
    compression="blosc",
):
    """Function to save block of data into a split_dataset."""

    new_name = prefix + ("_cropped" if crop is not None else "")
    padding = (data.padding if padding is not None
               and isinstance(data, Blocks) else padding)
    blocks = EmptySplitDataset(
        shape_full=data.shape,
        shape_block=data.shape_block if block_size is None else block_size,
        crop=crop,
        padding=padding,
        root=root_name,
        name=new_name,
    )
    for filename, (idxs, slices) in zip(blocks.files, blocks.slices()):
        fl.save(
            str(blocks.root / filename),
            {"stack_{}D".format(len(blocks.shape_cropped)): data[slices]},
            compression=compression,
        )

    return blocks.finalize()
Пример #5
0
 def test_compression_true(self):
     rs = np.random.RandomState(1234)
     with tmp_filename() as fn:
         x = rs.normal(size=(1000, 5))
         for comp in [None, True, 'blosc', 'zlib', ('zlib', 5)]:
             fl.save(fn, x, compression=comp)
             x1 = fl.load(fn)
             assert (x == x1).all()
Пример #6
0
def _corr_map_plane(dataset, block, out_file, time_lims, window_size):
    if time_lims is None:
        time_slice = slice(None)
    else:
        time_slice = slice(*time_lims)
    vid = dataset[(time_slice, ) + Blocks.block_to_slices(block)]
    cmap = ca.correlation_map(vid, window_size)
    fl.save(out_file, dict(stack_3D=cmap))
Пример #7
0
 def return_cached(*args, **kwargs):
     f_name = filename or func.__name__
     output_file = output_dir / (f_name + ".h5")
     if output_file.is_file():
         return fl.load(output_file)
     else:
         res = func(*args, **kwargs)
         fl.save(output_file, res)
         return res
Пример #8
0
 def save_chunk(self):
     self.logger.log_message("saved chunk")
     fl.save(
         Path(self.save_parameters.output_dir) /
         "original/{:04d}.h5".format(self.i_chunk),
         {"stack_4D": self.current_data[:self.i_in_chunk, :, :, :]},
         compression="blosc",
     )
     self.i_in_chunk = 0
     self.i_chunk += 1
Пример #9
0
def _merge_rois(rois: SplitDataset):
    def load_array(x):
        return fl.load(str(x), "/stack_3D")

    vfunc = np.vectorize(load_array, otypes=[np.object])
    loaded = vfunc(rois.files)
    merged_rois = merging.merge_rois(rois, loaded)
    fl.save(str(rois.root.parent / "merged_rois.h5"),
            dict(stack_3D=merged_rois))
    return merged_rois
Пример #10
0
    def test_load_multiple_groups(self):
        with tmp_filename() as fn:
            x = dict(one=np.ones(10), two='string', three=200)
            fl.save(fn, x)

            one, three = fl.load(fn, ['/one', '/three'])
            np.testing.assert_array_equal(one, x['one'])
            assert three == x['three']

            three, two = fl.load(fn, ['/three', '/two'])
            assert three == x['three']
            assert two == x['two']
Пример #11
0
def to_h5(results: Dict, task: str, method_details: Dict, args: Dict):
    """Export results in a single .h5 file for submission. 
    """
    output = {
        "method_results": results,
        "task": task,
        "method_details": method_details,
        "args": args
    }
    dd.save(f"{task}-{method_details['MethodTitle']}.h5",
            output,
            compression="blosc")
Пример #12
0
    def test_load_group(self):
        with tmp_filename() as fn:
            x = dict(one=np.ones(10), two='string')
            fl.save(fn, x)

            one = fl.load(fn, '/one')
            np.testing.assert_array_equal(one, x['one'])
            two = fl.load(fn, '/two')
            assert two == x['two']

            full = fl.load(fn, '/')
            np.testing.assert_array_equal(x['one'], full['one'])
            assert x['two'] == full['two']
Пример #13
0
    def complete_plane(self):
        fl.save(
            Path(self.save_parameters.output_dir) /
            "original/{:04d}.h5".format(self.i_block),
            {"stack_4D": self.current_data},
            compression="blosc",
        )
        self.i_block += 1

        if self.i_block % self.save_parameters.notification_frequency == 0 and \
                self.save_parameters.notification_email != "None":
            self.send_email_update(frame=self.current_data[self.i_in_plane - 1,
                                                           0, :, :])

        self.i_in_plane = 0
Пример #14
0
    def test_force_pickle(self):
        with tmp_filename() as fn:
            x = dict(one=dict(two=np.arange(10)),
                     three='string')
            xf = dict(one=dict(two=x['one']['two']),
                      three=x['three'])

            fl.save(fn, xf)
            xs = fl.load(fn)

            np.testing.assert_array_equal(x['one']['two'], xs['one']['two'])
            assert x['three'] == xs['three']

            # Try direct loading one
            two = fl.load(fn, '/one/two')
            np.testing.assert_array_equal(x['one']['two'], two)
Пример #15
0
def _dff(
    dataset,
    block,
    dest_filename,
    baseline_stack,
    multiplier=128,
    output_type=np.int16,
    subtract=0,
):
    stack = dataset[Blocks.block_to_slices(block)]
    baseline_sel = baseline_stack[Blocks.block_to_slices(
        block)[1:]]  # crop the corresponding slice of the baseline
    dffi = (multiplier * (stack - baseline_sel) /
            np.maximum(baseline_sel - subtract, 1)).astype(output_type)
    fl.save(dest_filename, dict(stack_4D=dffi), compression="blosc")
    return None
Пример #16
0
def _time_percentile(dataset,
                     block,
                     out_file,
                     method="mean",
                     percentile=50,
                     time_slice=None):
    if time_slice is None:
        time_slice = slice(None)
    else:
        time_slice = slice(*time_slice)

    vid = dataset[(time_slice, ) + Blocks.block_to_slices(block)]
    if method == "percentile":
        fl.save(out_file, dict(stack_3D=np.percentile(vid, percentile, 0)))
    elif method == "mean":
        fl.save(out_file, dict(stack_3D=np.mean(vid, 0)))
    else:
        raise AssertionError(f"Invalid method {method}")
Пример #17
0
 def save_block(self, i, array_to_write):
     """
     :param i:
     :param array_to_write:
     :return:
     """
     if not array_to_write.shape == self.shape_block:
         print("Array size smaller than block dim")
     fname = "{:03d}.h5".format(i)
     if self.verbose:
         print("Saving {}".format(str(self.path / fname)))
     self.files.append(fname)
     to_save = dict(
         stack_4D=array_to_write,
         position=self.block_starts[self.linear_to_cartesian(i) +
                                    (slice(None), )],
     )
     fl.save(str(self.path / fname), to_save, compression="blosc")
Пример #18
0
    def save_block_data(self, n, data, verbose=False):
        """Optional method to save data in a block. Often we don't use it,
        as we directly save data in the parallelized function. Might be good to
        find ways of centralizing saving here?
        :param n: n of the block we are saving in;
        :param data: data to be pured in the block;
        :param verbose:
        :return:
        """
        fname = "{:04d}.h5".format(n)
        if verbose:
            print("Saving ", str(self.root / fname))

            if data.shape != self.shape_block:
                print(" - data has different dimension from block!")

        to_save = {"stack": data}

        fl.save(str(self.root / fname), to_save, compression="blosc")
Пример #19
0
    def test_load_slice(self):
        with tmp_filename() as fn:
            x = np.arange(3 * 4 * 5).reshape((3, 4, 5))
            fl.save(fn, dict(x=x))

            s = fl.aslice[:2]
            xs = fl.load(fn, '/x', sel=s)
            np.testing.assert_array_equal(xs, x[s])

            s = fl.aslice[:, 1:3]
            xs = fl.load(fn, '/x', sel=s)
            np.testing.assert_array_equal(xs, x[s])

            xs = fl.load(fn, sel=s, unpack=True)
            np.testing.assert_array_equal(xs, x[s])

            fl.save(fn, x)
            xs = fl.load(fn, sel=s)
            np.testing.assert_array_equal(xs, x[s])
Пример #20
0
    def apply_crop(self, crop):
        """Take out the data with a crop"""
        # TODO there is the crop atrribute, which is a lazy crop, this should actually
        #  return a non-cropped dataset
        ds_cropped = EmptySplitDataset(
            shape_full=self.shape,
            shape_block=self.shape_block,
            padding=self.padding,
            crop=crop,
            root=self.root.parent,
            name=self.root.name + "_cropped",
        )
        # the slices iterator does not return just the slices, but also the indicesS
        for (i_slice, block_slices), file_name in zip(ds_cropped.slices(),
                                                      ds_cropped.files):
            fl.save(
                str(self.root / file_name),
                {"stack": self[block_slices]},
            )

        ds_cropped.finalize()
Пример #21
0
def _align_and_shift(
    dataset,
    block,
    ref,
    out_file,
    shift_plane,
    prefilter_sigma,
    upsample_factor,
    max_shift,
):
    stack = dataset[Blocks.block_to_slices(block)]
    shifted, shifts = align_single_planes_sobel(
        stack,
        np.fft.fftn(ref),
        prefilter_sigma=prefilter_sigma,
        upsample_factor=upsample_factor,
        maxshift=max_shift,
        offset=-shift_plane,
    )
    fl.save(out_file,
            dict(stack_4D=shifted, shifts=shifts),
            compression="blosc")

    print("Saved {}...".format(out_file))
Пример #22
0
if args.day2 is not None: day2 = args.day2
if args.hour1 is not None: hour1 = args.hour1
if args.hour2 is not None: hour2 = args.hour2

date = datetime(year, month1, day1, hour1)

while date < datetime(year, month2, day2, hour2):
    print('processing ', date)
    outfile = date.strftime('TPP%y%m%d%H.hdf5')
    fullname = os.path.join(maindir, date.strftime('%Y/%m'), outfile)
    fdd = ECMWF('FULL-EA', date)
    fdd._get_T()
    fdd._mkp()
    fdd.close()
    fde = fdd.shift2west(-20)
    fdf = fde.extract(lonRange=[-10, 160], latRange=[0, 50], varss='All')
    fdf._CPT()
    fdf._WMO()
    tpp = {}
    tpp['Twmo'] = fdf.d2d['Twmo']
    tpp['pwmo'] = fdf.d2d['pwmo']
    tpp['Tcold'] = fdf.d2d['Tcold']
    tpp['pcold'] = fdf.d2d['pcold']
    tpp['nlon'] = fdf.nlon
    tpp['nlat'] = fdf.nlat
    tpp['lats'] = fdf.attr['lats']
    tpp['lons'] = fdf.attr['lons']
    tpp['date'] = fdd.date
    fl.save(fullname, tpp, compression='zlib')
    date += timedelta(hours=3)
Пример #23
0
def align_volumes_with_filtering(
    dataset,
    output_dir=None,
    ref_window_halfsize=25,
    fft_reference=None,
    register_every=100,
    reg_halfwin=30,
    prefilter_sigma=3.3,
    block_size=120,
    n_jobs=10,
    verbose=False,
):
    """Aligns a dataset with prefiltering, by taking averages

    :param dataset: the input dataset
    :param output_dir: (optional, not recommended) the output folder
    :param ref_window_halfsize: the length of the time-average taken for the reference
    :param fft_reference: (optional) a fourier transform of a reference stack,
        if not supplied, one will be calculated from the middle of the dataset
    :param register_every: how many frames apart are the points which will
        be registered to the reference
    :param reg_halfwin: the length of the time window to take the average for
        registration
    :param prefilter_sigma: the width of the filter for sobel-prefiltering before
        the alignment
    :param block_size: the duration (in frames) of the aligned blocks
    :return:
    """
    time_middle = dataset.shape[0] // 2

    # prepare the destination
    new_dataset = EmptySplitDataset(
        root=output_dir or dataset.root.parent,
        name="aligned",
        shape_full=dataset.shape,
        shape_block=(block_size, ) + dataset.shape_block[1:],
    )

    # calculate the reference around the temporal middle of the dataset if
    # a reference is not provided.

    if fft_reference is None:
        fft_reference = np.fft.fftn(
            sobel_stack(
                np.mean(
                    dataset[time_middle - ref_window_halfsize:time_middle +
                            ref_window_halfsize, :, :, :, ],
                    0,
                ),
                prefilter_sigma,
            ))

    # set the frames at which the registration happens. Other shifts will
    # be interpolated
    if verbose:
        print("finding shifts...")
    shift_centres = range(reg_halfwin, dataset.shape[0] - reg_halfwin,
                          register_every)
    shift_times = np.array(list(shift_centres))

    # find the shifts in parallel
    shifts = Parallel(n_jobs=n_jobs)(
        delayed(_get_shifts)(dataset, t - reg_halfwin, t +
                             reg_halfwin, fft_reference, prefilter_sigma)
        for t in shift_centres)
    shifts = np.stack(shifts, 0)

    if verbose:
        print("Saving shifts...")
    # save the shifts
    fl.save(
        str(new_dataset.root / "shifts_sobel.h5"),
        dict(
            shift_times=shift_times,
            shifts=shifts,
            parameters=dict(
                ref_window_halfsize=ref_window_halfsize,
                fft_reference=fft_reference,
                register_every=register_every,
                reg_halfwin=reg_halfwin,
                prefilter_sigma=prefilter_sigma,
                block_size=block_size,
            ),
        ),
    )

    if verbose:
        print("Applying shifts...")
    # apply them in parallel
    Parallel(n_jobs=n_jobs)(delayed(_apply_shifts)(
        dataset,
        new_block,
        str(new_dataset.root / new_dataset.files[i_block]),
        shifts,
        shift_times,
    ) for i_block, (_,
                    new_block) in enumerate(new_dataset.slices(
                        as_tuples=True)))
    return new_dataset.finalize()
Пример #24
0
def _apply_shifts(dataset, block, out_file, shifts, shift_times):
    vid = dataset[Blocks.block_to_slices(block)]
    aligned = shift_stack(vid, range(block[0][0], block[0][1]), shifts,
                          shift_times)
    print(out_file)
    fl.save(out_file, dict(stack_4D=aligned, shifts=shifts))
Пример #25
0
def align_2p_volume(
    dataset,
    output_dir=None,
    reference=None,
    n_frames_ref=10,
    across_planes=None,
    prefilter_sigma=3.3,
    upsample_factor=10,
    max_shift=15,
    n_jobs=20,
    verbose=True,
):
    """Function for complete alignment of two-photon, planar acquired stack

    :param dataset: input H5Dataset
    :param output_dir: optional, output destination directory, subdirectory aligned will appear
    :param reference: optional, reference to align to
    :param n_frames_ref: number of frames to take as reference mean, if reference is being calculated
    :param across_planes: bool, True by default if reference is not provided, whether to align across planes
    :param prefilter_sigma: feature size to filter for better alignment. if < 0 no filtering will take place
    :param upsample_factor: granularity of subpixel shift
    :param max_shift: maximum shift allowed
    :param n_jobs: number of parallel jobs
    :return: reference to align dataset
    """

    # prepare the destination
    new_dataset = EmptySplitDataset(
        root=output_dir or dataset.root.parent,
        name="aligned",
        shape_full=dataset.shape,
        shape_block=(dataset.shape_block[0], 1) + dataset.shape_block[2:],
    )

    if verbose:
        print("Calculating filtered reference")
    if reference is None:
        t_mid = dataset.shape[0] // 2
        reference = dataset[t_mid:t_mid + n_frames_ref, :, :, :].mean(0)
        if across_planes is None:
            across_planes = False
    else:
        if across_planes is None:
            across_planes = True

    sob_ref = sobel_stack(reference, prefilter_sigma)

    n_planes = reference.shape[0]
    shifts_planes = np.zeros((n_planes, 2))

    centre_plane = int(n_planes // 2)

    if across_planes:
        if verbose:
            print("Registering across planes...")
        # Find between-planes shifts
        for i in range(centre_plane, reference.shape[0] - 1):
            s, _, _ = phase_cross_correlation(reference[i, :, :],
                                              reference[i + 1, :, :], 10)
            shifts_planes[i + 1, :] = shifts_planes[i, :] + s
        for i in range(centre_plane, 0, -1):
            s, _, _ = phase_cross_correlation(reference[i, :, :],
                                              reference[i - 1, :, :], 10)
            shifts_planes[i - 1, :] = shifts_planes[i, :] + s

    fl.save(dataset.root.parent / "shifts.h5", shifts_planes)

    if verbose:
        print("Aligning individual planes...")
    Parallel(n_jobs=n_jobs)(delayed(_align_and_shift)(
        dataset,
        new_block,
        sob_ref[i_block:i_block + 1, :, :],
        str(new_dataset.root / new_dataset.files[i_block]),
        shifts_planes[i_block, :],
        prefilter_sigma,
        upsample_factor,
        max_shift,
    ) for i_block, (_,
                    new_block) in enumerate(new_dataset.slices(
                        as_tuples=True)))

    return new_dataset.finalize()
Пример #26
0
        np.concatenate([rel_scores, amp_scores, reord_rel], 0).T,
        columns=[f"rel_{i}" for i in range(n_stims)] +
        [f"amp_{i}"
         for i in range(n_stims)] + [f"rel_reord_{i}" for i in range(n_stims)],
    )

    df["cid"] = [f"{fid}_{i:05.0f}" for i in range(n_cells)]  # cell ID
    df["gen"] = gen  # genotype
    df["gen_long"] = gen_long  # genotype
    df["fid"] = fid  # fish ID
    df["in_tectum"] = in_tectum  # if ROI is in tectum
    df["max_rel"] = np.nanmax(rel_scores, 0)  # maximum reliability
    df["max_rel_i"] = np.argmax(rel_scores, 0)  # maximum reliability position
    df["max_amp"] = np.nanmax(amp_scores, 0)  # maximum amplitude

    df["z"] = coords[:, 0]  # vertical pos, planes
    df["x"] = coords[:, 1]  # a-p pos, pixels
    df["y"] = coords[:, 2]  # l-r pos, pixels

    # rigid coordinate translation using manually defined translations:
    offsets = all_offsets[path.name]
    coords -= offsets
    df["z_trasf"] = coords[:, 0] * z_res  # vertical pos
    df["x_trasf"] = coords[:, 1] * PX_SIZE  # a-p pos, um
    df["y_trasf"] = coords[:, 2] * PX_SIZE  # l-r pos, um

    fl.save(path / "cell_df.h5", df)

# Save for quick loading in cumulative plots:
fl.save(IMAGING_DATA_MASTER_PATH / "stim_pos.h5", unique_stim_pos)
Пример #27
0
 def save(self, filename, compression='blosc'):
     flammkuchen.save(filename, self, compression=compression)
Пример #28
0
        Returns
        -------

        """
        return (self.cells[tup] for tup in self.cells
                if self.cells[tup] != -1 and self.cell_distance(cell, tup))

    def update(self, point, index):
        """updates the grid with the new point

        Parameters
        ----------
        point :

        index :


        Returns
        -------

        """
        self.cells[self.cellify(point)] = index

    def __str__(self):
        return self.cells.__str__()


if __name__ == "__main__":
    bg = 255 - poisson_disk_background((640, 640), 12, 2)
    fl.save("poisson_dense.h5", bg)
Пример #29
0
def _extract_traces_coords(dataset, block, out_file, coords, **kwargs):
    vid = dataset[Blocks.block_to_slices(block)]
    traces = extract_traces_around_points(vid, coords, **kwargs)
    fl.save(out_file, traces)
Пример #30
0
def _extract_rois_block(dataset, block, out_file, rois):
    vid = dataset[Blocks.block_to_slices(block)]
    traces = ca.extract_traces(vid, rois)
    fl.save(out_file, traces)