offset_mov = -np.min(m_orig[:100]) m_orig.resize(1, 1, downsample_ratio).play( gain=10, offset=offset_mov, fr=30, magnification=1) #%% start a cluster for parallel processing c, dview, n_processes = cm.cluster.setup_cluster( backend='local', n_processes=None, single_thread=False) #%%% MOTION CORRECTION # first we create a motion correction object with the parameters specified min_mov = cm.load(fname[0], subindices=range(200)).min() # this will be subtracted from the movie to make it non-negative mc = MotionCorrect(fname[0], min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) # note that the file is not loaded in memory #%% Run piecewise-rigid motion correction using NoRMCorre mc.motion_correct_rigid(save_movie=True) #%% m_els = cm.load(mc.fname_tot_rig) bord_px_els = np.max(np.ceil(np.abs(mc.shifts_rig))).astype(np.int) # maximum shift to be used for trimming against NaNs #%% compare with original movie cm.concatenate([m_orig.resize(1, 1, downsample_ratio) + offset_mov, m_els.resize(1, 1, downsample_ratio)], axis=2).play(fr=60, gain=1, magnification=1, offset=0) # press q to exit
# %% INITIALIZING t1 = time.time() # movie must be mostly positive for this to work # TODO : document # setting timer to see how the changement in functions make the code react on a same computer. min_mov = cm.load(fname[0], subindices=range(400)).min() mc_list = [] new_templ = None for each_file in fname: # TODO: needinfo how the classes works mc = MotionCorrect(each_file, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, num_splits_to_process_els=num_splits_to_process_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True, template=new_templ) new_templ = mc.total_template_rig m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) # TODO : needinfo pl.imshow(new_templ, cmap='gray') pl.pause(.1) mc_list.append(mc) # we are going to keep this part because it helps the user understand what we need. # needhelp why it is not the same as in the notebooks ? # TODO: show screenshot 2,3
# TODO : document # setting timer to see how the changement in functions make the code react on a same computer. min_mov = cm.load(fname[0], subindices=range(400)).min() mc_list = [] new_templ = np.load('projections/median_projection.npy') #new_templ = cm.load('projections/median_projection.tig') for each_file in fname: # TODO: needinfo how the classes works print(each_file) mc = MotionCorrect(each_file, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, num_splits_to_process_els=num_splits_to_process_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True, template=new_templ) new_templ = mc.total_template_rig m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) # TODO : needinfo pl.imshow(new_templ, cmap='gray') pl.pause(.1) mc_list.append(mc) # we are going to keep this part because it helps the user understand what we need. # needhelp why it is not the same as in the notebooks ?
def test_general(): """ General Test of pipeline with comparison against ground truth A shorter version than the demo pipeline that calls comparison for the real test work Raises: --------- params_movie params_cnmf rig correction cnmf on patch cnmf full frame not able to read the file no groundtruth """ #\bug #\warning global params_movie global params_diplay fname = params_movie['fname'] niter_rig = params_movie['niter_rig'] max_shifts = params_movie['max_shifts'] splits_rig = params_movie['splits_rig'] num_splits_to_process_rig = params_movie['num_splits_to_process_rig'] cwd = os.getcwd() fname = download_demo(fname[0]) m_orig = cm.load(fname) min_mov = m_orig[:400].min() comp = comparison.Comparison() comp.dims = np.shape(m_orig)[1:] ################ RIG CORRECTION ################# t1 = time.time() mc = MotionCorrect(fname, min_mov, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) comp.comparison['rig_shifts']['timer'] = time.time() - t1 comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig ########################################### if 'max_shifts' not in params_movie: fnames = params_movie['fname'] border_to_0 = 0 else: # elif not params_movie.has_key('overlaps'): fnames = mc.fname_tot_rig border_to_0 = bord_px_rig m_els = m_rig idx_xy = None add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive remove_init = 0 downsample_factor = 1 base_name = fname[0].split('/')[-1][:-4] name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=( 1, 1, downsample_factor), remove_init=remove_init, idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0) name_new.sort() if len(name_new) > 1: fname_new = cm.save_memmap_join( name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: print('One file only, not saving!') fname_new = name_new[0] Yr, dims, T = cm.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') Y = np.reshape(Yr, dims + (T,), order='F') if np.min(images) < 0: # TODO: should do this in an automatic fashion with a while loop at the 367 line raise Exception('Movie too negative, add_to_movie should be larger') if np.sum(np.isnan(images)) > 0: # TODO: same here raise Exception( 'Movie contains nan! You did not remove enough borders') Cn = cm.local_correlations(Y) Cn[np.isnan(Cn)] = 0 p = params_movie['p'] merge_thresh = params_movie['merge_thresh'] rf = params_movie['rf'] stride_cnmf = params_movie['stride_cnmf'] K = params_movie['K'] init_method = params_movie['init_method'] gSig = params_movie['gSig'] alpha_snmf = params_movie['alpha_snmf'] if params_movie['is_dendrites'] == True: if params_movie['init_method'] is not 'sparse_nmf': raise Exception('dendritic requires sparse_nmf') if params_movie['alpha_snmf'] is None: raise Exception('need to set a value for alpha_snmf') ################ CNMF PART PATCH ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'], method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie[ 'only_init_patch'], gnb=params_movie['gnb'], method_deconvolution='oasis') comp.cnmpatch = copy.copy(cnm) cnm = cnm.fit(images) A_tot = cnm.A C_tot = cnm.C YrA_tot = cnm.YrA b_tot = cnm.b f_tot = cnm.f # DISCARDING print(('Number of components:' + str(A_tot.shape[-1]))) final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) ####### A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1 comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()] #################### ######################## ################ CNMF PART FULL ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot, f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') cnm = cnm.fit(images) # DISCARDING A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_full'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_full'] fitness_delta_min = params_movie['fitness_delta_min_full'] Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True) ########## A_tot_full = A_tot.tocsc()[:, idx_components] C_tot_full = C_tot[idx_components] comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1 comp.comparison['cnmf_full_frame']['ourdata'] = [ A_tot_full.copy(), C_tot_full.copy()] #################### ######################## comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn) log_files = glob.glob('*_LOG_*') try: for log_file in log_files: os.remove(log_file) except: print('Cannot remove log files') ############ assertions ################## pb = False if (comp.information['differences']['params_movie']): print("you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)") pb = True if (comp.information['differences']['params_cnm']): print("you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)") pb = True if (comp.information['diff']['rig']['isdifferent']): print("the rigid shifts are different from the groundtruth ") pb = True if (comp.information['diff']['cnmpatch']['isdifferent']): print("the cnmf on patch produces different results than the groundtruth ") pb = True if (comp.information['diff']['cnmfull']['isdifferent']): print("the cnmf full frame produces different results than the groundtruth ") pb = True assert (not pb)
def create(): """ the function that will create a groundtruth A shorter version than the demo pipeline that calls comparison for the real test work Raise: ----- ('we now have ground truth\n') ('we were not able to read the file to compare it\n') """ # \bug # \warning global params_movie global params_diplay fname = params_movie['fname'] niter_rig = params_movie['niter_rig'] max_shifts = params_movie['max_shifts'] splits_rig = params_movie['splits_rig'] num_splits_to_process_rig = params_movie['num_splits_to_process_rig'] download_demo(fname[0]) fname = os.path.join(caiman_datadir(), 'example_movies', fname[0]) m_orig = cm.load(fname) min_mov = m_orig[:400].min() comp = comparison.Comparison() comp.dims = np.shape(m_orig)[1:] ################ RIG CORRECTION ################# t1 = time.time() mc = MotionCorrect(fname, min_mov, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) comp.comparison['rig_shifts']['timer'] = time.time() - t1 comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig ########################################### if 'max_shifts' not in params_movie: fnames = params_movie['fname'] border_to_0 = 0 else: # elif not params_movie.has_key('overlaps'): fnames = [mc.fname_tot_rig] border_to_0 = bord_px_rig m_els = m_rig idx_xy = None add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive remove_init = 0 downsample_factor = 1 base_name = fname[0].split('/')[-1][:-4] name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=(1, 1, downsample_factor), remove_init=remove_init, idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0) name_new.sort() if len(name_new) > 1: fname_new = cm.save_memmap_join(name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: print('One file only, not saving!') fname_new = name_new[0] Yr, dims, T = cm.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') Y = np.reshape(Yr, dims + (T, ), order='F') if np.min(images) < 0: # TODO: should do this in an automatic fashion with a while loop at the 367 line raise Exception('Movie too negative, add_to_movie should be larger') if np.sum(np.isnan(images)) > 0: # TODO: same here raise Exception( 'Movie contains nan! You did not remove enough borders') Cn = cm.local_correlations(Y) Cn[np.isnan(Cn)] = 0 p = params_movie['p'] merge_thresh = params_movie['merge_thresh'] rf = params_movie['rf'] stride_cnmf = params_movie['stride_cnmf'] K = params_movie['K'] init_method = params_movie['init_method'] gSig = params_movie['gSig'] alpha_snmf = params_movie['alpha_snmf'] if params_movie['is_dendrites'] == True: if params_movie['init_method'] != 'sparse_nmf': raise Exception('dendritic requires sparse_nmf') if params_movie['alpha_snmf'] is None: raise Exception('need to set a value for alpha_snmf') ################ CNMF PART PATCH ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'], method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie['only_init_patch'], gnb=params_movie['gnb'], method_deconvolution='oasis') comp.cnmpatch = copy.copy(cnm) cnm = cnm.fit(images) A_tot = cnm.A C_tot = cnm.C YrA_tot = cnm.YrA b_tot = cnm.b f_tot = cnm.f # DISCARDING print(('Number of components:' + str(A_tot.shape[-1]))) final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) ####### A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1 comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()] #################### ######################## ################ CNMF PART FULL ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot, f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') cnm = cnm.fit(images) # DISCARDING A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_full'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_full'] fitness_delta_min = params_movie['fitness_delta_min_full'] Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True) ########### A_tot_full = A_tot.tocsc()[:, idx_components] C_tot_full = C_tot[idx_components] comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1 comp.comparison['cnmf_full_frame']['ourdata'] = [ A_tot_full.copy(), C_tot_full.copy() ] #################### ######################## print(comp.dims) comp.save_with_compare(istruth=True, params=params_movie, Cn=Cn) log_files = glob.glob('*_LOG_*') for log_file in log_files: os.remove(log_file)
mov_tmp = cm.load(nms[0], subindices=range(400)) if mov_tmp.shape[1:] != templ.shape: diffx, diffy = np.subtract(mov_tmp.shape[1:], templ.shape) // 2 + 1 vmin, vmax = np.percentile(templ, 5), np.percentile(templ, 95) pl.imshow(templ, vmin=vmin, vmax=vmax) min_mov = np.nanmin(mov_tmp) mc_list = [] mc_templs_part = [] mc_templs = [] mc_fnames = [] for each_file in nms: # TODO: needinfo how the classes works mc = MotionCorrect(each_file, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(template=templ, save_movie=True) new_templ = mc.total_template_rig #TODO : needinfo pl.imshow(new_templ, cmap='gray') pl.pause(.1) mc_list += mc.shifts_rig mc_templs_part += mc.templates_rig mc_templs += [mc.total_template_rig] mc_fnames += mc.fname_tot_rig np.savez(os.path.join(fl, 'images/mot_corr_res.npz'), mc_list=mc_list, mc_templs_part=mc_templs_part, mc_fnames=mc_fnames, mc_templs=mc_templs)
def main(): pass # For compatibility between running under Spyder and the CLI # %% Load demo movie and ROIs fnames = download_demo( 'demo_voltage_imaging.hdf5', 'volpy') # file path to movie file (will download if not present) path_ROIs = download_demo( 'demo_voltage_imaging_ROIs.hdf5', 'volpy') # file path to ROIs file (will download if not present) file_dir = os.path.split(fnames)[0] #%% dataset dependent parameters # dataset dependent parameters fr = 400 # sample rate of the movie # motion correction parameters pw_rigid = False # flag for pw-rigid motion correction gSig_filt = (3, 3) # size of filter, in general gSig (see below), # change this one if algorithm does not work max_shifts = (5, 5) # maximum allowed rigid shift strides = ( 48, 48 ) # start a new patch for pw-rigid motion correction every x pixels overlaps = (24, 24 ) # overlap between pathes (size of patch strides+overlaps) max_deviation_rigid = 3 # maximum deviation allowed for patch with respect to rigid shifts border_nan = 'copy' opts_dict = { 'fnames': fnames, 'fr': fr, 'pw_rigid': pw_rigid, 'max_shifts': max_shifts, 'gSig_filt': gSig_filt, 'strides': strides, 'overlaps': overlaps, 'max_deviation_rigid': max_deviation_rigid, 'border_nan': border_nan } opts = volparams(params_dict=opts_dict) # %% play the movie (optional) # playing the movie using opencv. It requires loading the movie in memory. # To close the movie press q display_images = False if display_images: m_orig = cm.load(fnames) ds_ratio = 0.2 moviehandle = m_orig.resize(1, 1, ds_ratio) moviehandle.play(q_max=99.5, fr=40, magnification=4) # %% start a cluster for parallel processing c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) # %%% MOTION CORRECTION # first we create a motion correction object with the specified parameters mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion')) # Run correction do_motion_correction = True if do_motion_correction: mc.motion_correct(save_movie=True) else: mc_list = [ file for file in os.listdir(file_dir) if (os.path.splitext(os.path.split(fnames)[-1])[0] in file and '.mmap' in file) ] mc.mmap_file = [os.path.join(file_dir, mc_list[0])] print(f'reuse previously saved motion corrected file:{mc.mmap_file}') # %% compare with original movie if display_images: m_orig = cm.load(fnames) m_rig = cm.load(mc.mmap_file) ds_ratio = 0.2 moviehandle = cm.concatenate( [m_orig.resize(1, 1, ds_ratio), m_rig.resize(1, 1, ds_ratio)], axis=2) moviehandle.play(fr=40, q_max=99.5, magnification=4) # press q to exit # %% MEMORY MAPPING do_memory_mapping = True if do_memory_mapping: border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0 # you can include the boundaries of the FOV if you used the 'copy' option # during motion correction, although be careful about the components near # the boundaries # memory map the file in order 'C' fname_new = cm.save_memmap_join( mc.mmap_file, base_name='memmap_' + os.path.splitext(os.path.split(fnames)[-1])[0], add_to_mov=border_to_0, dview=dview) # exclude border else: mmap_list = [ file for file in os.listdir(file_dir) if ('memmap_' + os.path.splitext(os.path.split(fnames)[-1])[0]) in file ] fname_new = os.path.join(file_dir, mmap_list[0]) print(f'reuse previously saved memory mapping file:{fname_new}') # %% SEGMENTATION # create summary images img = mean_image(mc.mmap_file[0], window=1000, dview=dview) img = (img - np.mean(img)) / np.std(img) gaussian_blur = False # Use gaussian blur when there is too much noise in the video Cn = local_correlations_movie_offline(mc.mmap_file[0], fr=fr, window=fr * 4, stride=fr * 4, winSize_baseline=fr, remove_baseline=True, gaussian_blur=gaussian_blur, dview=dview).max(axis=0) img_corr = (Cn - np.mean(Cn)) / np.std(Cn) summary_images = np.stack([img, img, img_corr], axis=0).astype(np.float32) # ! save summary image, it is used in GUI cm.movie(summary_images).save(fnames[:-5] + '_summary_images.tif') #%% three methods for segmentation methods_list = [ 'manual_annotation', # manual annotation needs user to prepare annotated datasets same format as demo ROIs 'gui_annotation', # use gui to manually annotate neurons, but this is still under developing 'maskrcnn' ] # maskrcnn is a convolutional network trained for finding neurons using summary images method = methods_list[1] if method == 'manual_annotation': with h5py.File(path_ROIs, 'r') as fl: ROIs = fl['mov'][()] elif method == 'gui_annotation': # run volpy_gui file in the caiman/source_extraction/volpy folder # load the summary images you have just saved # save the ROIs to the video folder path_ROIs = caiman_datadir() + '/example_movies/volpy/gui_roi.hdf5' with h5py.File(path_ROIs, 'r') as fl: ROIs = fl['mov'][()] elif method == 'maskrcnn': # Important!! make sure install keras before using mask rcnn weights_path = download_model('mask_rcnn') ROIs = utils.mrcnn_inference( img=summary_images.transpose([1, 2, 0]), size_range=[5, 22], weights_path=weights_path, display_result=True ) # size parameter decides size range of masks to be selected # %% restart cluster to clean up memory cm.stop_server(dview=dview) c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False, maxtasksperchild=1) # %% parameters for trace denoising and spike extraction ROIs = ROIs # region of interests index = list(range(len(ROIs))) # index of neurons weights = None # reuse spatial weights context_size = 35 # number of pixels surrounding the ROI to censor from the background PCA flip_signal = True # Important!! Flip signal or not, True for Voltron indicator, False for others hp_freq_pb = 1 / 3 # parameter for high-pass filter to remove photobleaching threshold_method = 'simple' # 'simple' or 'adaptive_threshold' min_spikes = 10 # minimal spikes to be found threshold = 3.5 # threshold for finding spikes, increase threshold to find less spikes do_plot = False # plot detail of spikes, template for the last iteration ridge_bg = 0.5 # ridge regression regularizer strength for background removement, larger value specifies stronger regularization sub_freq = 20 # frequency for subthreshold extraction weight_update = 'ridge' # 'ridge' or 'NMF' for weight update n_iter = 2 opts_dict = { 'fnames': fname_new, 'ROIs': ROIs, 'index': index, 'weights': weights, 'context_size': context_size, 'flip_signal': flip_signal, 'hp_freq_pb': hp_freq_pb, 'threshold_method': threshold_method, 'min_spikes': min_spikes, 'threshold': threshold, 'do_plot': do_plot, 'ridge_bg': ridge_bg, 'sub_freq': sub_freq, 'weight_update': weight_update, 'n_iter': n_iter } opts.change_params(params_dict=opts_dict) #%% TRACE DENOISING AND SPIKE DETECTION vpy = VOLPY(n_processes=n_processes, dview=dview, params=opts) vpy.fit(n_processes=n_processes, dview=dview) #%% visualization display_images = True if display_images: print(np.where( vpy.estimates['locality'])[0]) # neurons that pass locality test idx = np.where(vpy.estimates['locality'] > 0)[0] utils.view_components(vpy.estimates, img_corr, idx) #%% reconstructed movie # note the negative spatial weights is cutoff if display_images: mv_all = utils.reconstructed_movie(vpy.estimates, fnames=mc.mmap_file, idx=idx, scope=(0, 1000), flip_signal=flip_signal) mv_all.play(fr=40) #%% save the result in .npy format save_result = True if save_result: np.save(os.path.join(file_dir, 'result_volpy_demo_voltage_imaging'), vpy.estimates) # %% STOP CLUSTER and clean up log files cm.stop_server(dview=dview) log_files = glob.glob('*_LOG_*') for log_file in log_files: os.remove(log_file)
#%% RUN ANALYSIS c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) #%% # movie must be mostly positive for this to work min_mov = cm.load(fname, subindices=range(400)).min() mc = MotionCorrect(fname, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, num_splits_to_process_els=num_splits_to_process_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) #%% mc.motion_correct_rigid(save_movie=True) # load motion corrected movie #%% pl.imshow(mc.total_template_rig, cmap='gray') #%% visualize templates cm.movie(np.array(mc.templates_rig)).play(fr=10,
def run_motion_correction(cropping_file, dview): """ This is the function for motion correction. Its goal is to take in a decoded and cropped .tif file, perform motion correction, and save the result as a .mmap file. This function is only runnable on the cn76 server because it requires parallel processing. Args: cropping_file: tif file after cropping dview: cluster Returns: row: pd.DataFrame object The row corresponding to the motion corrected analysis state. """ # Get output file paths data_dir = os.environ['DATA_DIR_LOCAL'] + 'data/interim/motion_correction/' sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,input,home_path,decoding_main FROM Analysis WHERE cropping_main=? ORDER BY motion_correction_v" val = [ cropping_file, ] cursor.execute(sql, val) result = cursor.fetchall() data = [] inter = [] for x in result: inter = x for y in inter: data.append(y) # Update the database if data[6] == 0: data[6] = 1 file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}" output_meta_pkl_file_path = f'meta/metrics/{file_name}.pkl' sql1 = "UPDATE Analysis SET motion_correction_meta=?,motion_correction_v=? WHERE cropping_main=? " val1 = [output_meta_pkl_file_path, data[6], cropping_file] cursor.execute(sql1, val1) else: data[6] += 1 file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}" output_meta_pkl_file_path = f'meta/metrics/{file_name}.pkl' sql2 = "INSERT INTO Analysis (motion_correction_meta,motion_correction_v) VALUES (?,?)" val2 = [output_meta_pkl_file_path, data[6]] cursor.execute(sql2, val2) database.commit() sql3 = "UPDATE Analysis SET decoding_main=?,decoding_v=?,mouse=?,session=?,trial=?,is_rest=?,input=?,home_path=?,cropping_v=?,cropping_main=? WHERE motion_correction_meta=? AND motion_correction_v=?" val3 = [ data[9], data[4], data[0], data[1], data[2], data[3], data[7], data[8], data[5], cropping_file, output_meta_pkl_file_path, data[6] ] cursor.execute(sql3, val3) database.commit() output_meta_pkl_file_path_full = data_dir + output_meta_pkl_file_path # Calculate movie minimum to subtract from movie cropping_file_full = os.environ['DATA_DIR_LOCAL'] + cropping_file min_mov = np.min(cm.load(cropping_file_full)) # Apply the parameters to the CaImAn algorithm sql5 = "SELECT motion_correct,pw_rigid,save_movie_rig,gSig_filt,max_shifts,niter_rig,strides,overlaps,upsample_factor_grid,num_frames_split,max_deviation_rigid,shifts_opencv,use_conda,nonneg_movie, border_nan FROM Analysis WHERE cropping_main=? " val5 = [ cropping_file, ] cursor.execute(sql5, val5) myresult = cursor.fetchall() para = [] aux = [] for x in myresult: aux = x for y in aux: para.append(y) parameters = { 'motion_correct': para[0], 'pw_rigid': para[1], 'save_movie_rig': para[2], 'gSig_filt': (para[3], para[3]), 'max_shifts': (para[4], para[4]), 'niter_rig': para[5], 'strides': (para[6], para[6]), 'overlaps': (para[7], para[7]), 'upsample_factor_grid': para[8], 'num_frames_split': para[9], 'max_deviation_rigid': para[10], 'shifts_opencv': para[11], 'use_cuda': para[12], 'nonneg_movie': para[13], 'border_nan': para[14] } caiman_parameters = parameters.copy() caiman_parameters['min_mov'] = min_mov opts = params.CNMFParams(params_dict=caiman_parameters) # Rigid motion correction (in both cases) logging.info('Performing rigid motion correction') t0 = datetime.datetime.today() # Create a MotionCorrect object mc = MotionCorrect([cropping_file_full], dview=dview, **opts.get_group('motion')) # Perform rigid motion correction mc.motion_correct_rigid(save_movie=parameters['save_movie_rig'], template=None) dt = int( (datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes logging.info(f' Rigid motion correction finished. dt = {dt} min') # Obtain template, rigid shifts and border pixels total_template_rig = mc.total_template_rig shifts_rig = mc.shifts_rig # Save template, rigid shifts and border pixels in a dictionary meta_pkl_dict = { 'rigid': { 'template': total_template_rig, 'shifts': shifts_rig, } } sql = "UPDATE Analysis SET duration_rigid=? WHERE motion_correction_meta=? AND motion_correction_v=? " val = [dt, output_meta_pkl_file_path, data[6]] cursor.execute(sql, val) if parameters['save_movie_rig'] == 1: # Load the movie saved by CaImAn, which is in the wrong # directory and is not yet cropped logging.info(f' Loading rigid movie for cropping') m_rig = cm.load(mc.fname_tot_rig[0]) logging.info(f' Loaded rigid movie for cropping') # Get the cropping points determined by the maximal rigid shifts x_, _x, y_, _y = get_crop_from_rigid_shifts(shifts_rig) # Crop the movie logging.info( f' Cropping and saving rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}' ) m_rig = m_rig.crop(x_, _x, y_, _y, 0, 0) meta_pkl_dict['rigid']['cropping_points'] = [x_, _x, y_, _y] sql = "UPDATE Analysis SET motion_correction_cropping_points_x1=?,motion_correction_cropping_points_x2=?,motion_correction_cropping_points_y1=?,motion_correction_cropping_points_y2=? WHERE motion_correction_meta=? AND motion_correction_v=? " val = [x_, _x, y_, _y, output_meta_pkl_file_path, data[6]] cursor.execute(sql, val) # Save the movie rig_role = 'alternate' if parameters['pw_rigid'] else 'main' fname_tot_rig = m_rig.save(data_dir + rig_role + '/' + file_name + '_rig' + '.mmap', order='C') logging.info(f' Cropped and saved rigid movie as {fname_tot_rig}') # Remove the remaining non-cropped movie os.remove(mc.fname_tot_rig[0]) sql = "UPDATE Analysis SET motion_correction_rig_role=? WHERE motion_correction_meta=? AND motion_correction_v=? " val = [fname_tot_rig, output_meta_pkl_file_path, data[6]] cursor.execute(sql, val) database.commit() # If specified in the parameters, apply piecewise-rigid motion correction if parameters['pw_rigid'] == 1: logging.info(f' Performing piecewise-rigid motion correction') t0 = datetime.datetime.today() # Perform non-rigid (piecewise rigid) motion correction. Use the rigid result as a template. mc.motion_correct_pwrigid(save_movie=True, template=total_template_rig) # Obtain template and filename total_template_els = mc.total_template_els fname_tot_els = mc.fname_tot_els[0] dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes meta_pkl_dict['pw_rigid'] = { 'template': total_template_els, 'x_shifts': mc.x_shifts_els, 'y_shifts': mc. y_shifts_els # removed them initially because they take up space probably } logging.info( f' Piecewise-rigid motion correction finished. dt = {dt} min') # Load the movie saved by CaImAn, which is in the wrong # directory and is not yet cropped logging.info(f' Loading pw-rigid movie for cropping') m_els = cm.load(fname_tot_els) logging.info(f' Loaded pw-rigid movie for cropping') # Get the cropping points determined by the maximal rigid shifts x_, _x, y_, _y = get_crop_from_pw_rigid_shifts( np.array(mc.x_shifts_els), np.array(mc.y_shifts_els)) # Crop the movie logging.info( f' Cropping and saving pw-rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}' ) m_els = m_els.crop(x_, _x, y_, _y, 0, 0) meta_pkl_dict['pw_rigid']['cropping_points'] = [x_, _x, y_, _y] # Save the movie fname_tot_els = m_els.save(data_dir + 'main/' + file_name + '_els' + '.mmap', order='C') logging.info(f'Cropped and saved rigid movie as {fname_tot_els}') # Remove the remaining non-cropped movie os.remove(mc.fname_tot_els[0]) sql = "UPDATE Analysis SET motion_correction_main=?, motion_correction_cropping_points_x1=?,motion_correction_cropping_points_x2=?,motion_correction_cropping_points_y1=?,motion_correction_cropping_points_y2=?,duration_pw_rigid=? WHERE motion_correction_meta=? AND motion_correction_v=? " val = [ fname_tot_els, x_, _x, y_, _y, dt, output_meta_pkl_file_path, data[6] ] cursor.execute(sql, val) database.commit() # Write meta results dictionary to the pkl file pkl_file = open(output_meta_pkl_file_path_full, 'wb') pickle.dump(meta_pkl_dict, pkl_file) pkl_file.close() return fname_tot_els, data[6]
c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) #%%% MOTION CORRECTION # first we create a motion correction object with the parameters specified min_mov = cm.load(fname[0], subindices=range(200)).min() # this will be subtracted from the movie to make it non-negative mc = MotionCorrect(fname[0], min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) # note that the file is not loaded in memory #%% Run piecewise-rigid motion correction using NoRMCorre mc.motion_correct_pwrigid(save_movie=True) m_els = cm.load(mc.fname_tot_els) bord_px_els = np.ceil( np.maximum(np.max(np.abs(mc.x_shifts_els)), np.max(np.abs(mc.y_shifts_els)))).astype(np.int) # maximum shift to be used for trimming against NaNs
def main(): pass # For compatibility between running under Spyder and the CLI #%% Select file(s) to be processed (download if not present) fnames = fname #%% First setup some parameters for data and motion correction n_processes = 4 # dataset dependent parameters nChannels = 1 channelToKeep = 0 fr = 30 # imaging rate in frames per second decay_time = 0.4 # length of a typical transient in seconds dxy = (2., 2.) # spatial resolution in x and y in (um per pixel) # note the lower than usual spatial resolution here max_shift_um = (12., 12.) # maximum shift in um patch_motion_um = (100., 100.) # patch size for non-rigid correction in um # motion correction parameters pw_rigid = False # flag to select rigid vs pw_rigid motion correction # maximum allowed rigid shift in pixels #max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)] max_shifts = (20, 20) # start a new patch for pw-rigid motion correction every x pixels #strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)]) strides = (32, 32) # overlap between pathes (size of patch in pixels: strides+overlaps) overlaps = (18, 18) # maximum deviation allowed for patch with respect to rigid shifts max_deviation_rigid = 5 # estimate minimum of movie #min_mov = np.min(tif.imread(fname[0])) mc_dict = { 'fnames': fnames, 'fr': fr, 'decay_time': decay_time, 'dxy': dxy, 'pw_rigid': pw_rigid, 'max_shifts': max_shifts, 'strides': strides, 'overlaps': overlaps, 'max_deviation_rigid': max_deviation_rigid, 'border_nan': 'min' } opts = params.CNMFParams(params_dict=mc_dict) # %% start a cluster for parallel processing c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=n_processes, single_thread=False) print('checkpoint 1: mcorrect start', flush=True) # %%% MOTION CORRECTION # first we create a motion correction object with the specified parameters mc = MotionCorrect(fname, dview=dview, **opts.get_group('motion')) # note that the file is not loaded in memory # %% Run (piecewise-rigid motion) correction using NoRMCorre mc.motion_correct(save_movie=True, template=mc_temp) border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0 if pw_rigid: mmap_files = mc.fname_tot_els else: mmap_files = mc.fname_tot_rig # delete mc object del mc print('mmap file list: {}'.format(mmap_files), flush=True) for n, mmap_file in enumerate(mmap_files): fname_new = cm.save_memmap([mmap_file], base_name='memmap_{}_{}'.format(n, jobid), order='C', border_to_0=border_to_0) (mov, frame_dims, num_frames) = cm.load_memmap(fname_new) mov = mov.reshape(list(frame_dims) + [num_frames]).transpose((2, 1, 0)) mov = mov[channelToKeep::nChannels] # currently format output based on input (stack or single-file) #if in_file_ext[n] in ['.tif', '.tiff']: if '.tif' in outfile[n]: tif.imsave(outfile[n], mov, imagej=True) else: print("saving output {}".format(outfile[n]), flush=True) if not os.path.exists(outfile[n]): os.mkdir(outfile[n]) for i in range(len(mov)): frame = mov[i] frame_file = os.path.join(outfile[n], 'frame{:06d}.tiff'.format(i)) tif.imsave(frame_file, frame.astype('float32'), imagej=True) # remove all temp mmap files and temp tif files print('Save complete. Removing temporary files.', flush=True) if tmpMovPath is not None: os.remove(tmpMovPath) rem = [os.remove(x) for x in mmap_files] if isinstance(fname_new, list): remnew = [os.remove(x) for x in fname_new] else: remnew = os.remove(fname_new) print('Motion correction script complete.', flush=True)
def run_alignment(mouse, sessions, motion_correction_v, cropping_v, dview): """ This is the main function for the alignment step. It applies methods from the CaImAn package used originally in motion correction to do alignment. """ for session in sessions: # Update the database file_name = f"mouse_{mouse}_session_{session}_alignment" sql1 = "UPDATE Analysis SET alignment_main=? WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=? " output_mmap_file_path = os.environ[ 'DATA_DIR_LOCAL'] + f'data/interim/alignment/main/{file_name}.mmap' val1 = [ output_mmap_file_path, mouse, session, motion_correction_v, cropping_v ] cursor.execute(sql1, val1) # Determine the output .mmap file name sql = "SELECT motion_correction_main FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=? " val = [mouse, session, motion_correction_v, cropping_v] cursor.execute(sql, val) result = cursor.fetchall() input_mmap_file_list = [] inter = [] for x in result: inter += x for y in inter: input_mmap_file_list.append(y) sql = "SELECT motion_correction_cropping_points_x1 FROM Analysis WHERE mouse = ? AND session=?AND motion_correction_v =? AND cropping_v=? " val = [mouse, session, motion_correction_v, cropping_v] cursor.execute(sql, val) result = cursor.fetchall() x_ = [] inter = [] for i in result: inter += i for j in range(0, len(inter)): x_.append(inter[j]) sql = "SELECT motion_correction_cropping_points_x2 FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=? " val = [mouse, session, motion_correction_v, cropping_v] cursor.execute(sql, val) result = cursor.fetchall() _x = [] inter = [] for i in result: inter += i for j in range(0, len(inter)): _x.append(inter[j]) sql = "SELECT motion_correction_cropping_points_y1 FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=?" val = [mouse, session, motion_correction_v, cropping_v] cursor.execute(sql, val) result = cursor.fetchall() _y = [] inter = [] for i in result: inter += i for j in range(0, len(inter)): _y.append(inter[j]) sql = "SELECT motion_correction_cropping_points_y2 FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=?" val = [mouse, session, motion_correction_v, cropping_v] cursor.execute(sql, val) result = cursor.fetchall() y_ = [] inter = [] for i in result: inter += i for j in range(0, len(inter)): y_.append(inter[j]) new_x1 = max(x_) new_x2 = max(_x) new_y1 = max(y_) new_y2 = max(_y) m_list = [] for i in range(len(input_mmap_file_list)): m = cm.load(input_mmap_file_list[i]) m = m.crop(new_x1 - x_[i], new_x2 - _x[i], new_y1 - y_[i], new_y2 - _y[i], 0, 0) m_list.append(m) # Concatenate them using the concat function m_concat = cm.concatenate(m_list, axis=0) fname = m_concat.save(output_mmap_file_path, order='C') # MOTION CORRECTING EACH INDIVIDUAL MOVIE WITH RESPECT TO A TEMPLATE MADE OF THE FIRST MOVIE logging.info( 'Performing motion correction on all movies with respect to a template made of the first movie.' ) t0 = datetime.datetime.today() # parameters alignment sql5 = "SELECT make_template_from_trial,gSig_filt,max_shifts,niter_rig,strides,overlaps,upsample_factor_grid,num_frames_split,max_deviation_rigid,shifts_opencv,use_conda,nonneg_movie, border_nan FROM Analysis WHERE alignment_main=? " val5 = [ file_name, ] cursor.execute(sql5, val5) myresult = cursor.fetchall() para = [] aux = [] for x in myresult: aux = x for y in aux: para.append(y) parameters = { 'make_template_from_trial': para[0], 'gSig_filt': (para[1], para[1]), 'max_shifts': (para[2], para[2]), 'niter_rig': para[3], 'strides': (para[4], para[4]), 'overlaps': (para[5], para[5]), 'upsample_factor_grid': para[6], 'num_frames_split': para[7], 'max_deviation_rigid': para[8], 'shifts_opencv': para[9], 'use_cuda': para[10], 'nonneg_movie': para[11], 'border_nan': para[12] } # Create a template of the first movie template_index = parameters['make_template_from_trial'] m0 = cm.load(input_mmap_file_list[1]) [x1, x2, y1, y2] = [x_, _x, y_, _y] for i in range(len(input_mmap_file_list)): m0 = m0.crop(new_x1 - x_[i], new_x2 - _x[i], new_y1 - y_[i], new_y2 - _y[i], 0, 0) m0_filt = cm.movie( np.array([ high_pass_filter_space(m_, parameters['gSig_filt']) for m_ in m0 ])) template0 = cm.motion_correction.bin_median( m0_filt.motion_correct( 5, 5, template=None)[0]) # may be improved in the future # Setting the parameters opts = params.CNMFParams(params_dict=parameters) # Create a motion correction object mc = MotionCorrect(fname, dview=dview, **opts.get_group('motion')) # Perform non-rigid motion correction mc.motion_correct(template=template0, save_movie=True) # Cropping borders x_ = math.ceil( abs(np.array(mc.shifts_rig)[:, 1].max() ) if np.array(mc.shifts_rig)[:, 1].max() > 0 else 0) _x = math.ceil( abs(np.array(mc.shifts_rig)[:, 1].min() ) if np.array(mc.shifts_rig)[:, 1].min() < 0 else 0) y_ = math.ceil( abs(np.array(mc.shifts_rig)[:, 0].max() ) if np.array(mc.shifts_rig)[:, 0].max() > 0 else 0) _y = math.ceil( abs(np.array(mc.shifts_rig)[:, 0].min() ) if np.array(mc.shifts_rig)[:, 0].min() < 0 else 0) # Load the motion corrected movie into memory movie = cm.load(mc.fname_tot_rig[0]) # Crop all movies to those border pixels movie.crop(x_, _x, y_, _y, 0, 0) sql1 = "UPDATE Analysis SET alignment_x1=?, alignment_x2 =?, alignment_y1=?, alignment_y2=? WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=?" val1 = [ x_, _x, y_, _y, mouse, session, motion_correction_v, cropping_v ] cursor.execute(sql1, val1) # save motion corrected and cropped movie output_mmap_file_path_tot = movie.save( os.environ['DATA_DIR_LOCAL'] + f'data/interim/alignment/main/{file_name}.mmap', order='C') logging.info( f' Cropped and saved rigid movie as {output_mmap_file_path_tot}') # Remove the remaining non-cropped movie os.remove(mc.fname_tot_rig[0]) # Create a timeline and store it sql = "SELECT trial FROM Analysis WHERE mouse = ? AND session=? AND motion_correction_v =? AND cropping_v=?" val = [mouse, session, motion_correction_v, cropping_v] cursor.execute(sql, val) result = cursor.fetchall() trial_index_list = [] inter = [] for i in result: inter += i for j in range(0, len(inter)): trial_index_list.append(inter[j]) timeline = [[trial_index_list[0], 0]] timepoints = [0] for i in range(1, len(m_list)): m = m_list[i] timeline.append( [trial_index_list[i], timeline[i - 1][1] + m.shape[0]]) timepoints.append(timepoints[i - 1] + m.shape[0]) timeline_pkl_file_path = os.environ[ 'DATA_DIR'] + f'/interim/alignment/meta/timeline/{file_name}.pkl' with open(timeline_pkl_file_path, 'wb') as f: pickle.dump(timeline, f) sql1 = "UPDATE Analysis SET alignment_timeline=? WHERE mouse = ? AND session=?AND motion_correction_v =? AND cropping_v=? " val1 = [ timeline_pkl_file_path, mouse, session, motion_correction_v, cropping_v ] cursor.execute(sql1, val1) timepoints.append(movie.shape[0]) dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes sql1 = "UPDATE Analysis SET alignment_duration_concatenation=? WHERE mouse = ? AND session=?AND motion_correction_v =? AND cropping_v=? " val1 = [dt, mouse, session, motion_correction_v, cropping_v] cursor.execute(sql1, val1) logging.info(f' Performed concatenation. dt = {dt} min.') ## modify all motion correction file to the aligned version data_dir = os.environ['DATA_DIR'] + '/interim/motion_correction/main/' for i in range(len(input_mmap_file_list)): alignment_v = 1 aligned_movie = movie[timepoints[i]:timepoints[i + 1]] motion_correction_output_aligned = aligned_movie.save( data_dir + file_name + '_els' + '.mmap', order='C') sql1 = "UPDATE Analysis SET motion_correct_align=?, alignment_v=?WHERE motion_correction_meta=? AND motion_correction_v=?" val1 = [ motion_correction_output_aligned, alignment_v, input_mmap_file_list[i], motion_correction_v ] cursor.execute(sql1, val1) database.commit() return