Exemple #1
0
def process_framestack(frames,
                       min_area=9,
                       verbose=False,
                       do_dfof_denoising=True,
                       baseline_fn=multi_scale_simple_baseline,
                       baseline_kw=dict(smooth_levels=(10, 20, 40, 80)),
                       pipeline=simple_pipeline_,
                       labeler=percentile_label,
                       labeler_kw=None):
    """
    Default pipeline to process a stack of frames containing Ca fluorescence to find astrocytic Ca events
    Input: F(t): temporal stack of frames (Nframes x Nx x Ny)
    Output: Collection of three frame stacks containting ΔF/F0 signals, one thresholded and one denoised, and a baseline F0(t):
            fseq.FStackColl([fsx, dfof_filtered, F0])
    """
    from imfun import fseq
    if verbose:
        print('calculating baseline F0(t)')
    #fs_f0 = get_baseline_frames(frames[:],baseline_fn=baseline_fn, baseline_kw=baseline_kw)
    fs_f0 = calculate_baseline_pca_asym(frames[:], verbose=True, niter=20)
    fs_f0 = fseq.from_array(fs_f0)
    fs_f0.meta['channel'] = 'F0'

    dfof = frames / fs_f0.data - 1

    if do_dfof_denoising:
        if verbose:
            print('filtering ΔF/F0 data')
        dfof = patch_pca_denoise2(dfof,
                                  spatial_filter=3,
                                  temporal_filter=1,
                                  npc=5)
    fs_dfof = fseq.from_array(dfof)
    fs_dfof.meta['channel'] = 'ΔF_over_F0'

    if verbose:
        print('detecting events')
    ## todo: decide, whether we actually need the cleaning step.
    ## another idea: use dfof for detection to avoid FP, use dfof_cleaned for reconstruction because of better SNR?
    ##               but need to show that FP is lower, TP is OK and FN is low for this combo
    ## motivation:   using filters in dfof_cleaned introduces spatial correlations, which may lead to higher FP
    ##               (with low amplitude though). Alternative option would be to guess a correct amplitude threshold
    ##               afterwards
    ## note: but need to test that on real data, e.g. on slices with OGB and gcamp
    fsx = make_enh4(dfof,
                    nhood=2,
                    kind='pca',
                    pipeline=pipeline,
                    labeler=labeler,
                    labeler_kw=labeler_kw)
    coll_ = EventCollection(fsx.data, min_area=min_area)
    meta = fsx.meta
    fsx = fseq.from_array(fsx.data * (coll_.to_filtered_array() > 0),
                          meta=meta)
    fscoll = fseq.FStackColl([fsx, fs_dfof, fs_f0])
    return fscoll
Exemple #2
0
 def load_timelapse(self, name):
     "Loads a TXY record"
     numframes = self.get_size('T')
     axes = self.get_axes()
     reader = bioformats.ImageReader(name)
     kw = dict(series=self.index, rescale=False)
     images = np.array(
         [reader.read(t=i, **kw) for i in range(1, numframes)])
     out = fseq.from_array(images)
     out.meta['axes'] = axes
     return out
Exemple #3
0
def load_record(name, channel_name = 'fluo', with_plot=True,ca_channel=1):
    name_low = name.lower()
    read_as_array = True
    if endswith_any(name_low, ('.tif', '.tiff')):
        reader = tifffile
    elif endswith_any(name_low, ('.lsm')):
        reader = fseq.from_lsm
        read_as_array = False
    elif endswith_any(name_low, ('.oib')):
            reader = fseq.from_oif
            read_as_array = False
    elif endswith_any(name_low, ('.czi',)):
        reader = czifile
    elif endswith_any(name_low, ('.mes',)):
        reader = fseq.from_mes
        read_as_array = False
    else:
        # todo: raise exception
        print("Can't find appropriate reader for input file format")
        return

    if read_as_array:
        frames = squeeze(reader.imread(name))
        fs = fseq.from_array(frames)
    else:
        fs = reader(name)
    fs.meta['channel'] = channel_name
    fs.meta['file_path'] = name

    if with_plot:
        fig,ax = plt.subplots(1,2, gridspec_kw=dict(width_ratios=(1,3)),figsize=(12,3))
        if isinstance(fs, fseq.FStackColl) and len(fs.stacks)>1:
            fsca = fs.stacks[ca_channel]
            fsca.meta['channel']=channel_name
            fsca.meta['file_path'] = name
        else:
            fsca = fs

        mf_raw = fsca.mean_frame()

        mf = simple_rescale(mf_raw)
        bright_mask = mf > np.percentile(mf, 50)
        v = array([np.mean(f[bright_mask]) for f in fs])
        ax[0].imshow(mf,cmap='gray')
        ax[1].plot(v)
        ax[1].set_title(os.path.basename(name)+': mean fluorescence')
        ax[1].set_xlabel('frame #')
        plt.grid(True)
    return fs
Exemple #4
0
def make_enh4(frames,
              pipeline=simple_pipeline_,
              labeler=percentile_label,
              kind='pca',
              nhood=5,
              stride=2,
              mask_of_interest=None,
              pipeline_kw=None,
              labeler_kw=None):
    from imfun import fseq
    #coll = signals_from_array_pca_cluster(frames,stride=2,dbscan_eps=0.05,nhood=5,walpha=0.5)
    if kind.lower() == 'corr':
        coll = signals_from_array_correlation(
            frames,
            stride=stride,
            nhood=nhood,
            mask_of_interest=mask_of_interest)
    elif kind.lower() == 'pca':
        coll = signals_from_array_pca_cluster(
            frames,
            stride=stride,
            nhood=nhood,
            mask_of_interest=mask_of_interest,
            ncomp=2,
        )
    else:
        coll = signals_from_array_avg(frames,
                                      stride=stride,
                                      patch_size=nhood * 2 + 1,
                                      mask_of_interest=mask_of_interest)
    print('\nTime-signals, grouped,  processing (may take long time) ...')
    if pipeline_kw is None:
        pipeline_kw = {}
    pipeline_kw.update(labeler=labeler, labeler_kw=labeler_kw)
    coll_enh = process_signals_parallel(
        coll,
        pipeline=pipeline,
        pipeline_kw=pipeline_kw,
    )
    print('Time-signals processed, recombining to video...')
    out = combine_weighted_signals(coll_enh, frames.shape)
    fsx = fseq.from_array(out)
    print('Done')
    fsx.meta['channel'] = '-'.join(['newrec4', kind])
    return fsx
Exemple #5
0
def make_enh5(dfof,
              twindow=50,
              nhood=5,
              stride=2,
              temporal_filter=3,
              verbose=False):
    from imfun import fseq
    amask = activity_mask_median_filtering(dfof, nw=7, verbose=verbose)
    nsf = mad_std(dfof, axis=0)
    dfof_denoised = svd_denoise_tslices(dfof,
                                        twindow,
                                        mask_of_interest=amask,
                                        temporal_filter=temporal_filter,
                                        verbose=verbose)
    mask_active = dfof_denoised > nsf
    mask_active = opening_of_closing(mask_active) + ndi.median_filter(
        mask_active, 3) > 0
    dfof_denoised2 = np.array(
        [avg_filter_greater(f, 0) for f in dfof_denoised * mask_active])
    fsx = fseq.from_array(dfof_denoised2)
    if verbose:
        print('Done')
    fsx.meta['channel'] = '-'.join(['newrec5'])
    return fsx
Exemple #6
0
def stabilize_motion(fs, args, nametag='',suff=None):
    "Try to remove motion artifacts by image registratio"
    morphology_channel = args.morphology_channel
    if isinstance(fs, fseq.FStackColl) and len(fs.stacks) > 1:
        fsm = fs.stacks[morphology_channel]
    else:
        fsm = fs

    if suff is None: suff = ''
    models = [isinstance(m,str) and m or m[0] for m in args.stab_model]
    suff = suff+'-' + '-'.join(models) + '-ch-%d'%(args.morphology_channel)
    #warps_name = fs.meta['file_path']+suff+'.npy'
    #warps_name = nametag+suff+'-warps.npy'
    warps_name = '-'.join((nametag, suff, 'warps.npy'))
    print('warps name:', warps_name)
    fsm_filtered = None
    newframes = None


    # If reusing previously calculating warps and want to re-make the movie
    if os.path.exists(warps_name) and (not args.with_motion_movies):
        final_warps = ofreg.warps.from_dct_encoded(warps_name)
        fsc = apply_warps_and_crop(fs, final_warps, args.verbose, args.ncpu)
        return fsc, final_warps


    if args.verbose:
        print('Filtering data')

    # Median filter. TODO: make optional via arguments
    #fsm.frame_filters = [partial(ndimage.median_filter, size=3)]
    #fsm_filtered = fseq.from_array(fsm[:])
    #fsm_filtered = ndi.median_filter(fsm[:], size=(5,3,3)).astype(ucats._dtype_)
    fsm_filtered = ucats.utils.clip_outliers(fsm[:],0.05, 99.95).astype(ucats._dtype_)
    # Removing global trend
    #fsm_filtered = fsm_filtered - fsm_filtered.mean(axis=(1,2))[:,None,None]


    #fsm.frame_filters = []
    #if args.verbose > 1: print('done spatial median filter')

    from imfun.core import fnutils
    if args.motion_with_pca_denoise:
        fsm_filtered,pcf = preprocess_for_registration_1(fsm_filtered, with_adaptive_filter=args.motion_with_adaptive_filter)
        pcf = None
        if args.verbose>1: print('done PCA-based denoising')
    else: pcf = None

    fsm_filtered = fsm_filtered.astype(ucats._dtype_)


    #fsm_filtered.frame_filters.append(lambda f: l2spline(f,1.5)-l2spline(f,30))
    #fsm_filtered = fseq.from_array(fsm_filtered[:])
    #if args.verbose>1: print('done flattening')
    if args.verbose: print('Done filtering')

    if os.path.exists(warps_name):
        print('Loading pre-calculated movement correction:', warps_name)
        final_warps = ofreg.warps.from_dct_encoded(warps_name)
    else:
        if args.verbose:
            print('No existing movement correction found, calculating...')
        operations = args.stab_model
        warp_history = []
        newframes = fsm_filtered
        for movement_model in operations:
            if not isinstance(movement_model,str):
                if len(movement_model)>1:
                    model, stab_type, model_params = movement_model
                else:
                    model, stab_type, model_params = movement_model[0], 'updated_template', {}
            else:
                model = movement_model
                model_params = {}
                stab_type = 'updated_template'

            if args.verbose > 1:
                print('correcting for {} using {} with params: {}'.format(model, stab_type, model_params))
            template = newframes[:10].mean(0)
            if stab_type == 'template':
                warps = stackreg.to_template(newframes, template, regfn=imgreg_dispatcher_[model],
                                             njobs=args.ncpu, **model_params)
            elif stab_type == 'updated_template':
                warps = stackreg.to_updated_template(newframes, template, njobs=args.ncpu,
                                                     regfn=imgreg_dispatcher_[model], **model_params)
            elif stab_type in ['multi', 'multi-templates', 'pca-templates']:
                templates, affs = fseq.frame_exemplars_pca_som(newframes,npc=len(fsm)//100+5)
                warps = stackreg.to_templates(newframes, templates, affs, regfn=imgreg_dispatcher_[model],
                                              njobs=args.ncpu,
                                              **model_params)
            warp_history.append(warps)
            newframes = ofreg.warps.map_warps(warps, newframes, njobs=args.ncpu).astype(ucats._dtype_)
            mx_warps = ucats.utils.max_shifts(warps, args.verbose)

        final_warps = [reduce(op.add, warpchain) for warpchain in zip(*warp_history)]
        ofreg.warps.to_dct_encoded(warps_name, final_warps)
        del warp_history, final_warps
        final_warps = ofreg.warps.from_dct_encoded(warps_name)
        # end else

    #mx_warps = ucats.max_shifts(final_warps, args.verbose)
    #fsc = ofreg.warps.map_warps(final_warps, fs, njobs=args.ncpu)
    #fsc.meta['file_path']=fs.meta['file_path']
    #fsc.meta['channel'] = fs.meta['channel']+'-sc'

    fsc = apply_warps_and_crop(fs, final_warps, args.verbose, args.ncpu)

    if isinstance(fs, fseq.FStackColl) and len(fs.stacks)>1:
        stacks = [fs.stacks[morphology_channel], fs.stacks[args.ca_channel]]
        stacks_c = [fsc.stacks[morphology_channel], fsc.stacks[args.ca_channel]]

        fs_show = fseq.FStackColl(stacks)
        fsc_show = fseq.FStackColl(stacks_c)

    else:
        fs_show = fs
        fsc_show = fsc


    if args.with_motion_movies:
        p1 = ui.Picker(fs_show)
        p2 = ui.Picker(fsc_show)
        clims = ui.harmonize_clims([p1,p2])
        p1.clims = clims
        p2.clims = clims
        pickers_list = [p1,p2]

        if (isinstance(fs, fseq.FStackColl) and len(fs.stacks) > 1) or (fsm_filtered is not None):
            if fsm_filtered is None:
                p3 = ui.Picker(fs.stacks[morphology_channel])
                newframes  = fsc.stacks[morphology_channel]
            else:
                p3 = ui.Picker(fseq.from_array(fsm_filtered))
                newframes = ofreg.warps.map_warps(final_warps, fsm_filtered)
            #print('------------------------------- New frames:', newframes)
            p4 = ui.Picker(fseq.from_array(newframes))
            #clims = ui.harmonize_clims([p3,p4])
            clims = [np.percentile(p3.frame_coll.stacks[0].data, (5,99.5))]
            p3.clims = clims
            p4.clims = clims
            pickers_list.extend([p3,p4])

        ui.pickers_to_movie(pickers_list, nametag+'-a-stabilization-%s.mp4'%suff,
                            codec=args.codec, writer=args.writer,titles=('raw', 'stabilized'))

    return fsc, final_warps # from stabilize_motion
Exemple #7
0
def process_record(fs, fname, series, args):
    #nametag = '-'.join((fname,series,args.suff,'threshold_%1.2f'%args.detection_var_threshold))
    nametag = '-'.join((fname,series,args.suff))
    threshold_tag = '-threshold_%1.2f'%args.detection_var_threshold
    print('nametag is:', nametag)
    print('threshold tag is:', threshold_tag)
    h5f  = None
    # II.  Stabilize motion artifacts
    fsc,_ = stabilize_motion(fs, args,nametag)

    if isinstance(fsc, fseq.FStackColl) and len(fsc.stacks) > 1:
        fsc = fsc.stacks[args.ca_channel]

    if args.no_events:
        return

    # I. -- Correcting for gain and offset --
    frames = fsc.data.astype(float32)
    gain_est,offset_est = ucats.exponential_family.estimate_gain_and_offset(frames,20,ntries=200,npatches=int(1e5),
                                                         with_plot=True,save_to=nametag+'-gain-offset.png')
    frames_x = (frames - offset_est)/gain_est
    frames_x[frames_x<0] = 0
    fsc_x = fseq.from_array(frames_x)

    np.save(nametag+'-gain-offset.npy', np.array((gain_est,offset_est)))
    # I.a -- Adaptive median filtering input data --
    # if args.detection_do_adaptive_median_filter:
    #     print("Performing adaptive median filtering of the raw fluorescence signal")
    #     frames = ucats.adaptive_median_filter(fsc.data.astype(float32),
    #                                           th=5, tsmooth=1, ssmooth=5)
    #     frames = frames.astype(float32)
    # else:
    #     frames = ucats.clip_outliers(frames,0.05, 99.95).astype(np.float32)


    # II. Process data
    F0 = None
    detected_name = nametag + threshold_tag +'-detected.h5'
    baseline_name = nametag+'-baseline.pickle'
    if False and (os.path.exists(detected_name)):
        #h5f = h5py.File(detected_name,'r')
        fsx = fseq.from_hdf5(detected_name)
        h5f = fsx.h5file
        print('loading existing results of event detection:', detected_name)
        if os.path.exists(baseline_name):
            print('loading existing fluorescence baseline frames')
            F0 = ucats.baselines.load_baseline_pickle(baseline_name)
        else:
            print('calculating baseline fluorescence')
            #F0 = ucats.calculate_baseline_pca_asym(frames, smooth=300, niter=20, verbose=args.verbose)
            _, F0 = ucats.denoising.block_svd_denoise_and_separate(frames_x, nhood=16, stride=16, min_comps=3,
                                                           baseline_smoothness=args.baseline_smoothness,
                                                           spatial_filter=3,
                                                           correct_spatial_components=False,
                                                           with_clusters=False)
            print('storing baseline fluorescence estimate')
            ucats.baselines.store_baseline_pickle(baseline_name,F0)

        F0 = fseq.from_array(F0)
        F0.meta['channel'] = 'F0'
        frec=F0.data

    else:
        print('Denoising frames and separating background via block-SVD in sqrt-ed data')
        #todo: take parameters from arguments to the script
        #print('Going in ~%d time-slices'% (2*np.ceil(len(frames)/args.detection_temporal_window)-1))
        #if args.detection_do_variance_stabilization:
        #    xt = ucats.Anscombe.transform(frames_x)
        #else:
        #    xt = frames_x
        xt = ucats.exponential_family.Anscombe.transform(frames_x)
        _process_kwargs = dict(
            tsvd_kw = dict(ssmooth=3,
                           tsmooth=5,
                           sstride=5,
                           do_pruning=True),
            second_stage_kw=dict(Nhood=100,clustering_algorithm="MiniBatchKMeans",),
            inverse_kw=dict(with_f0=True,),
            do_second_stage=True)

        # args.detection_loc_nhood,
        #                        stride=args.detection_loc_stride,
        #                        spatial_filter=args.detection_spatial_filter,
        #                        min_comps=args.detection_min_components,
        #                        with_clusters = args.detection_use_clusters,
        #                        svd_detection_plow = args.detection_low_percentile*2,
        #                        baseline_smoothness=args.baseline_smoothness,
        #                        spatial_min_cluster_size=5)
        #fdelta, fb = multiscale_process_frames(xt, twindow=args.detection_temporal_window, **_process_kwargs )
        #fdelta, fb = ucats.block_svd_separate_tslices(xt,twindow=args.detection_temporal_window, **_process_kwargs)
        #_process_kwargs = dict(sstride=5, ssmooth=3, tsmooth=3, do_pruning=0,)
        D_t,F0_t = ucats.denoising.patch_svd_denoise_frames(xt,
                                                  save_coll=nametag+'-coll2.pz',
                                                  **_process_kwargs)
        D_t = np.clip(D_t, 2*np.sqrt(3/8), np.max(D_t))
        F0_t = np.clip(F0_t, 2*np.sqrt(3/8), np.max(F0_t))
        F0, frec = (ucats.exponential_family.Anscombe.inverse_transform(ucats.utils.adaptive_median_filter(x)) for x in (F0_t, D_t))
        frec[frec<0] = 0
        #dfosd = ucats.to_zscore_frames(D_t-F0_t)
        dfof = (frec/F0 - 1)*(F0 > 0.05)
        #dfosd = np.clip(dfosd, *np.percentile(dfosd, (0.5,99.5)))
        df_signals = dfof.reshape(len(F0),-1).T
        labels = np.array([ucats.detection1d.simple_label(v,tau=0.5,threshold=args.detection_var_threshold/100)
                           for v in tqdm(df_signals)]).T.reshape(F0_t.shape)
        #labels2 = ucats.activity_mask_median_filtering(D_t/F0_t-1, nw=5)
        #labels = ucats.refine_mask_by_percentile_filter(labels | labels2, niter=10,with_cleanup=True,min_obj_size=10)
        labels = ucats.utils.refine_mask_by_percentile_filter(labels, niter=5,with_cleanup=True,min_obj_size=10)


        # III. Calculate ΔF/F
        #print('Calculating relative fluorescence changes')
        #th1 = ucats.percentile_th_frames(fdelta,2.0)
        #print('Fdelta dynamic range:', fdelta.min(), fdelta.max())
        #print('Fbase  dynamic range:', fb.min(), fb.max())
        #print('mad std dynamic range:', ucats.mad_std(xt-fdelta-fb, axis=0).min(), ucats.mad_std(xt-fb-fdelta, axis=0).max())
        #correction_bias = ucats.find_bias_frames(xt-fdelta-fb,3,ucats.mad_std(xt-fdelta-fb,axis=0))
        #print('Correction bias dynamic range:', np.min(correction_bias), np.max(correction_bias))
        #if any(np.isnan(correction_bias)):
        #    correction_bias = np.zeros(correction_bias.shape)
        #correction_bias[np.isnan(correction_bias)] = 0 ## can't find out so far why there are nans in some cases there. there shouldn't be
        #fdelta = ucats.adaptive_median_filter(fdelta,ssmooth=3,keep_clusters=True) # todo: make this optional

        #frec = ucats.adaptive_median_filter(frec)
        dFoF = ucats.utils.adaptive_median_filter(frec/F0-1)
        dFoFx = dFoF*labels
        #del labels, labels2, D_t, F0_t

        #if args.detection_do_variance_stabilization:
        #    frames_dn,F0 = ucats.convert_from_varstab(fdelta, fb + 0*correction_bias)
        #else:
        #    frames_dn, F0 = fdelta, fb
        print('storing baseline fluorescence estimate')
        ucats.baselines.store_baseline_pickle(baseline_name,F0)

        #nsdt = ucats.std_median(fdelta,axis=0)
        #mask_pipeline = lambda m: ucats.threshold_object_size(ucats.expand_mask_by_median_filter(m,niter=3),9)
        #mask1 = fdelta >= th1
        #mask2 = frames_dn/F0 >= 0.01 # 1% change from baseline
        #mask_final = mask_pipeline(ucats.select_overlapping(mask1,mask2))
        #mask = ucats.opening_of_closing((fdelta > th)*(fdelta>nsdt)*(fdelta/fb > 0.025))
        #mask = ucats.opening_of_closing((fdelta > th)*(fdelta/fb > 0.025))
        #frames_dn *= mask_final
        #dfofx = frames_dn/F0
        dFoF[np.abs(F0)<1e-5] = 0
        print('Baseline dynamic range:', np.min(F0), np.max(F0))
        #F0 =  0.25*fb**2
        #frames_dn = 0.25*fdelta**2 + 0.5*fdelta*fb
        #del fb, fdelta,mask_final

        coll_ = ucats.events.EventCollection(dFoFx,
                                      threshold=args.event_segmentation_threshold,
                                      min_area=args.event_min_area,
                                      min_duration=args.event_min_duration,
                                      peak_threshold=args.event_peak_threshold)
        dFoFx = dFoFx*(coll_.to_filtered_array()>0)
        channel_name = 'newrec9'
        # if args.detection_do_second_pass:
        #     print('Doing second pass...')
        #     dfof = (frames/F0).astype(float32)  - 1
        #     diff = dfof - dfofx
        #     mindiff = min(0, np.min(diff))
        #
        #     if args.detection_do_variance_stabilization:
        #         diff = 2*np.sqrt(diff - mindiff)
        #
        #     fcorr, b2 = ucats.block_svd_denoise_and_separate(diff, stride=2,nhood=5,baseline_smoothness=150)
        #
        #     if args.detection_do_variance_stabilization:
        #         offset = 2*(-mindiff)**0.5 if mindiff < 0 else 0
        #         fcorr,b2 = ucats.convert_from_varstab(fcorr, b2 + offset)
        #
        #     dfofx2 = dfofx + fcorr
        #
        #     coll_ = ucats.EventCollection(dfofx2,
        #                                   threshold=args.event_segmentation_threshold,
        #                                   min_area=args.event_min_area,
        #                                   min_duration=args.event_min_duration,
        #                                   peak_threshold=args.event_peak_threshold)
        #     dfofx2 = dfofx2*(             coll_.to_filtered_array()>0)
        #
        #     mx = ucats.select_overlapping(dfofx2>=0.025,dfofx[:]>=0.01)
        #     dfofx2[~mx] = 0
        #     channel_name = 'newrec8a'
        # else:
        #     dfofx2 = dfofx
        #     channel_name = 'newrec8'

        F0 = fseq.from_array(F0)
        F0.meta['channel'] = 'F0'

        #if args.no_skip_dark_areas:
        #    print('calculating well-stained and poorly-stained areas')
        #    colored_mask = dark_area_mask(F0.data.mean(0))
        #else:
        #    print('no color mask asked for')
        #    colored_mask = np.ones(F0[0].shape, np.bool)

        #f,ax = plt.subplots(1,1,figsize=(8,8));
        #ax.imshow(F0.data.mean(0),cmap='gray')
        #ax.imshow(ui.plots.mask4overlay2(colored_mask,alpha=0.5))
        #plt.tight_layout()
        #f.savefig(nametag+'-colored_mask.png')
        #plt.close(f)
        fsx = fseq.from_array(dFoFx)
        fsx.meta['channel'] = channel_name

        #meta = fsx.meta
        #fsx = fseq.from_array(fsx.data*(coll_.to_filtered_array()>0))
        #fsx.meta = meta
        #   del coll_
        print('--->Done')
        if args.do_save_enh:
            fsx.to_hdf5(detected_name, compress_level=3)


    # VI.  Make movies
    if args.verbose: print('Making movies of detected activity')
    #fsout = fseq.FStackColl([fsc,  fsx])
    #frames_out = F0.data*(asarray(fsx.data,float32)+1)
    #frames_out = F0.data
    frames_out=frec
    #frames_out = F0.data*(dfof_cleaned + 1)
    fsout = fseq.FStackColl([fseq.from_array(frames_out),  fsx])
    p = ui.Picker(fsout); p.start()
    p0 = ui.Picker(fseq.FStackColl([fsc_x]))
    p0._ccmap=dict(b=None,i=None,r=None,g=0)
    bgclim = np.percentile(frames_out,(1,99))
    bgclim[1] *= 1.25
    p0.clims[0] = bgclim
    p.clims[0] = bgclim
    #p.clims[1] = (0.025,0.25)
    mip = np.max(fsx.data,0)
    p.clims[1] = (args.event_segmentation_threshold, np.mean(mip[mip>0]))
    print('Testing clims: ',p.clims[1])
    p._ccmap = dict(b=None,i=None,r=1,g=0)
    #ui.pickers_to_movie([p],name+'-detected.mp4',writer='ffmpeg')
    ui.pickers_to_movie([p0, p],nametag+threshold_tag+'-b-detected.mp4', titles=('raw','processed'),
                        fps=args.fps,
                        codec=args.codec,
                        bitrate=args.bitrate,
                        writer=args.writer)

    print('segmenting and animating events')
    events = ucats.events.EventCollection(asarray(fsx.data,dtype=np.float32), dfof_frames=fsc_x.data/F0.data-1)
    if len(events.filtered_coll):
        events.to_csv(nametag+threshold_tag+'-events.csv')
    #animate_events(fsc.data, events,name+'-events-new4.mp4')
    animate_events(frames_out, events,args, nametag+threshold_tag+'-c-events.mp4')
    print('All done')
    if h5f:
        h5f.close()
    return # from process_record()