def _run_motion_correction(self, file_name, max_shifts, strides, overlaps, upsample_factor_grid, max_deviation_rigid): """ Private function that initiates motion correction from CaImAn package, and return the motion corrected file names and their respective pixel shifts. """ offset_orig = np.nanmin(self.data_orig[:1000]) G6Image = MotionCorrect(file_name, offset_orig, max_shifts=max_shifts, niter_rig=1, splits_rig=56, strides=strides, overlaps=overlaps, splits_els=56, upsample_factor_grid=upsample_factor_grid, shifts_opencv=True, max_deviation_rigid=max_deviation_rigid, nonneg_movie=True) G6Image.motion_correct_rigid(save_movie=True) G6Image.motion_correct_pwrigid(save_movie=True, template=G6Image.total_template_rig) name_rig = G6Image.fname_tot_rig name_pwrig = G6Image.fname_tot_els shifts_rig = G6Image.shifts_rig x_shifts_pwrig = G6Image.x_shifts_els y_shifts_pwrig = G6Image.y_shifts_els template_shape = G6Image.total_template_els.shape return name_rig, name_pwrig, shifts_rig, x_shifts_pwrig, y_shifts_pwrig, template_shape
def preprocess_data(base, image_folder='z0', scale=0.5, name='movie.tif', edge=5, corrected_name='movie_corrected.tif'): for root, dirs, files in os.walk(base): offset = -1 * len(image_folder) if image_folder == root[offset:]: print('Preprocessing ', root) fname = root oname = root[:(-1 - len(image_folder))] # Convert and crop stack try: saved_path = convert_and_crop_stack(fname, oname, scale, name) except (FileExistsError, FileNotFoundError) as e: print(oname, ' cannot be preprocessed, skipping...') continue # Motion correction, cut off edges mc = MotionCorrect(saved_path, 0) mc.motion_correct_rigid() with tifffile.TiffWriter(oname + os.sep + corrected_name, imagej=True) as tif: shifted = mc.apply_shifts_movie(saved_path) shifted = shifted[:, edge:-edge, edge:-edge] converted_stack = np.array(shifted * (2**16), dtype='uint16') tif.save(converted_stack)
def mc_vids(vids_fpath, mc_rigid_template): start = time.time() # estimated minimum value of the movie to produce an output that is positive min_mov = np.array([ cm.motion_correction.high_pass_filter_space(m_, gSig_filt) for m_ in cm.load(vids_fpath[0], subindices=range(400)) ]).min() mc = MotionCorrect(vids_fpath, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=1, splits_rig=splits_rig, num_splits_to_process_rig=None, shifts_opencv=True, nonneg_movie=True, gSig_filt=gSig_filt, border_nan=border_nan, is3D=False) mc.motion_correct_rigid(save_movie=(not doPwRigid), template=mc_rigid_template) shifts_rig = mc.shifts_rig template_rig = mc.total_template_rig if doPwRigid: mc.motion_correct_pwrigid(save_movie=True, template=template_rig) mc.total_template_rig = template_rig duration = time.time() - start logging.info('Motion correction done in %s', str(duration)) return mc, duration, shifts_rig
def motion_correct_rigid(self, fname): dview = None try: c, dview, n_processes = cm.cluster.setup_cluster( backend='local', n_processes=None, single_thread=False) niter_rig = 1 # number of iterations for rigid motion correction max_shifts = self.get_dict_param('max_shifts_rigid', 'tuple_int') # for parallelization split the movies in num_splits chuncks across time splits_rig = self.get_dict_param('splits_rig', 'single_int') # first we create a motion correction object with the parameters specified min_mov = cm.load(fname[0], subindices=range(200)).min() # this will be subtracted from the movie to make it non-negative mc = MotionCorrect(fname, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, border_nan='copy', shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) self.motion_correct = mc except Exception as e: raise e finally: cm.cluster.stop_server(dview=dview)
def correct_single_movie(data_folder, identifier, dview): #=======================================setup parameters============================================== # number of iterations for rigid motion correction niter_rig = 5 # maximum allowed rigid shift in pixels (view the movie to get a sense of motion) max_shifts = (30, 30) # for parallelization split the movies in num_splits chuncks across time # if none all the splits are processed and the movie is saved splits_rig = 56 # intervals at which patches are laid out for motion correction # num_splits_to_process_rig = None # create a new patch every x pixels for pw-rigid correction strides = (48, 48) # overlap between pathes (size of patch strides+overlaps) overlaps = (24, 24) # for parallelization split the movies in num_splits chuncks across time splits_els = 56 # num_splits_to_process_els = [28, None] # upsample factor to avoid smearing when merging patches upsample_factor_grid = 4 # maximum deviation allowed for patch with respect to rigid shifts max_deviation_rigid = 3 # if True, apply shifts fast way (but smoothing results) by using opencv shifts_opencv = True # if True, make the SAVED movie and template mostly nonnegative by removing min_mov from movie nonneg_movie = False # =======================================setup parameters============================================== fname = [f for f in os.listdir(data_folder) if f[-4:] == '.tif' and identifier in f] if len(fname) == 0: print('\ndid not find movie file in directory: {}.'.format(data_folder)) print('Do nothing.') return elif len(fname) > 1: fname.sort() print('\n') print('\n'.join(fname)) warnings.warn('more than one movie file in directory: {}. skip ...'.format(data_folder)) return else: fname = fname[0] print('\ncorrecting {} in directory {}.'.format(fname, data_folder)) # m_orig = cm.load(os.path.join(data_folder, fname)) # offset_mov = np.min(m_orig) # if the data has very negative values compute an offset value offset_mov = 0. # create a motion correction object# creat mc = MotionCorrect(os.path.join(data_folder, fname), offset_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=shifts_opencv, nonneg_movie=nonneg_movie) mc.motion_correct_rigid(save_movie=True) # load motion corrected movie m_rig = cm.load(mc.fname_tot_rig) m_rig = m_rig.astype(np.int16) save_name = os.path.splitext(fname)[0] + '_corrected.tif' tf.imsave(os.path.join(data_folder, save_name), m_rig) tf.imsave(os.path.join(data_folder, 'corrected_mean_projection.tif'), np.mean(m_rig, axis=0).astype(np.float32)) tf.imsave(os.path.join(data_folder, 'corrected_max_projection.tif'), np.max(m_rig, axis=0).astype(np.float32)) offset_f = h5py.File(os.path.join(data_folder, 'correction_offsets.hdf5')) offsets = mc.shifts_rig offsets = np.array([np.array(o) for o in offsets]).astype(np.float32) offset_dset = offset_f.create_dataset(name='file_0000', data=offsets) offset_dset.attrs['format'] = 'height, width' offset_dset.attrs['path'] = os.path.join(data_folder, fname) os.remove(mc.fname_tot_rig[0])
def test_general(): """ General Test of pipeline with comparison against ground truth A shorter version than the demo pipeline that calls comparison for the real test work Raises: --------- params_movie params_cnmf rig correction cnmf on patch cnmf full frame not able to read the file no groundtruth """ #\bug #\warning global params_movie global params_diplay fname = params_movie['fname'] niter_rig = params_movie['niter_rig'] max_shifts = params_movie['max_shifts'] splits_rig = params_movie['splits_rig'] num_splits_to_process_rig = params_movie['num_splits_to_process_rig'] cwd = os.getcwd() fname = download_demo(fname[0]) m_orig = cm.load(fname) min_mov = m_orig[:400].min() comp = comparison.Comparison() comp.dims = np.shape(m_orig)[1:] ################ RIG CORRECTION ################# t1 = time.time() mc = MotionCorrect(fname, min_mov, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) comp.comparison['rig_shifts']['timer'] = time.time() - t1 comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig ########################################### if 'max_shifts' not in params_movie: fnames = params_movie['fname'] border_to_0 = 0 else: # elif not params_movie.has_key('overlaps'): fnames = mc.fname_tot_rig border_to_0 = bord_px_rig m_els = m_rig idx_xy = None add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive remove_init = 0 downsample_factor = 1 base_name = fname[0].split('/')[-1][:-4] name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=(1, 1, downsample_factor), remove_init=remove_init, idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0) name_new.sort() if len(name_new) > 1: fname_new = cm.save_memmap_join(name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: logging.warning('One file only, not saving!') fname_new = name_new[0] Yr, dims, T = cm.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') Y = np.reshape(Yr, dims + (T, ), order='F') if np.min(images) < 0: # TODO: should do this in an automatic fashion with a while loop at the 367 line raise Exception('Movie too negative, add_to_movie should be larger') if np.sum(np.isnan(images)) > 0: # TODO: same here raise Exception( 'Movie contains nan! You did not remove enough borders') Cn = cm.local_correlations(Y) Cn[np.isnan(Cn)] = 0 p = params_movie['p'] merge_thresh = params_movie['merge_thresh'] rf = params_movie['rf'] stride_cnmf = params_movie['stride_cnmf'] K = params_movie['K'] init_method = params_movie['init_method'] gSig = params_movie['gSig'] alpha_snmf = params_movie['alpha_snmf'] if params_movie['is_dendrites'] == True: if params_movie['init_method'] is not 'sparse_nmf': raise Exception('dendritic requires sparse_nmf') if params_movie['alpha_snmf'] is None: raise Exception('need to set a value for alpha_snmf') ################ CNMF PART PATCH ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'], method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie['only_init_patch'], gnb=params_movie['gnb'], method_deconvolution='oasis') comp.cnmpatch = copy.copy(cnm) comp.cnmpatch.estimates = None cnm = cnm.fit(images) A_tot = cnm.estimates.A C_tot = cnm.estimates.C YrA_tot = cnm.estimates.YrA b_tot = cnm.estimates.b f_tot = cnm.estimates.f # DISCARDING logging.info(('Number of components:' + str(A_tot.shape[-1]))) final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) ####### A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1 comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()] #################### ######################## ################ CNMF PART FULL ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot, f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') cnm = cnm.fit(images) # DISCARDING A, C, b, f, YrA, sn = cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f, cnm.estimates.YrA, cnm.estimates.sn final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_full'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_full'] fitness_delta_min = params_movie['fitness_delta_min_full'] Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True) ########## A_tot_full = A_tot.tocsc()[:, idx_components] C_tot_full = C_tot[idx_components] comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1 comp.comparison['cnmf_full_frame']['ourdata'] = [ A_tot_full.copy(), C_tot_full.copy() ] #################### ######################## comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn) log_files = glob.glob('*_LOG_*') try: for log_file in log_files: os.remove(log_file) except: logging.warning('Cannot remove log files') ############ assertions ################## pb = False if (comp.information['differences']['params_movie']): logging.error( "you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)" ) pb = True if (comp.information['differences']['params_cnm']): logging.warning( "you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)" ) # pb = True if (comp.information['diff']['rig']['isdifferent']): logging.error("the rigid shifts are different from the groundtruth ") pb = True if (comp.information['diff']['cnmpatch']['isdifferent']): logging.error( "the cnmf on patch produces different results than the groundtruth " ) pb = True if (comp.information['diff']['cnmfull']['isdifferent']): logging.error( "the cnmf full frame produces different results than the groundtruth " ) pb = True assert (not pb)
def correct_single_movie(folder_path): #=======================================setup parameters============================================== # number of iterations for rigid motion correction niter_rig = 5 # maximum allowed rigid shift in pixels (view the movie to get a sense of motion) max_shifts = (30, 30) # for parallelization split the movies in num_splits chuncks across time # if none all the splits are processed and the movie is saved splits_rig = 56 # intervals at which patches are laid out for motion correction # num_splits_to_process_rig = None # create a new patch every x pixels for pw-rigid correction strides = (48, 48) # overlap between pathes (size of patch strides+overlaps) overlaps = (24, 24) # for parallelization split the movies in num_splits chuncks across time splits_els = 56 # num_splits_to_process_els = [28, None] # upsample factor to avoid smearing when merging patches upsample_factor_grid = 4 # maximum deviation allowed for patch with respect to rigid shifts max_deviation_rigid = 3 # if True, apply shifts fast way (but smoothing results) by using opencv shifts_opencv = True # if True, make the SAVED movie and template mostly nonnegative by removing min_mov from movie nonneg_movie = False # =======================================setup parameters============================================== offset_mov = 0. file_path = [f for f in os.listdir(folder_path) if f[-4:] == '.tif'] if len(file_path) == 0: raise LookupError( 'no tif file found in folder: {}'.format(folder_path)) elif len(file_path) > 1: raise LookupError( 'more than one tif files found in folder: {}'.format(folder_path)) else: file_path = os.path.join(folder_path, file_path[0]) # create a motion correction object# creat mc = MotionCorrect(file_path, offset_mov, dview=None, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=shifts_opencv, nonneg_movie=nonneg_movie) mc.motion_correct_rigid(save_movie=True) # load motion corrected movie m_rig = cm.load(mc.fname_tot_rig) m_rig = m_rig.astype(np.int16) save_name = os.path.splitext(file_path)[0] + '_corrected.tif' tf.imsave(os.path.join(folder_path, save_name), m_rig) tf.imsave(os.path.join(folder_path, 'corrected_mean_projection.tif'), np.mean(m_rig, axis=0).astype(np.float32)) tf.imsave(os.path.join(folder_path, 'corrected_max_projection.tif'), np.max(m_rig, axis=0).astype(np.float32)) offset_f = h5py.File(os.path.join(folder_path, 'correction_offsets.hdf5')) offsets = mc.shifts_rig offsets = np.array([np.array(o) for o in offsets]).astype(np.float32) offset_dset = offset_f.create_dataset(name='file_0000', data=offsets) offset_dset.attrs['format'] = 'height, width' offset_dset.attrs['path'] = file_path os.remove(mc.fname_tot_rig[0])
def motion_correct(video_path, max_shift, patch_stride, patch_overlap, use_multiprocessing=True): full_video_path = video_path directory = os.path.dirname(full_video_path) filename = os.path.basename(full_video_path) memmap_video = tifffile.memmap(video_path) if use_multiprocessing: if os.name == 'nt': backend = 'multiprocessing' else: backend = 'ipyparallel' # Create the cluster cm.stop_server() c, dview, n_processes = cm.cluster.setup_cluster(backend=backend, n_processes=None, single_thread=False) else: dview = None z_range = list(range(memmap_video.shape[1])) new_video_path = os.path.join(directory, "mc_video_temp.tif") shutil.copyfile(video_path, new_video_path) mc_video = tifffile.memmap(new_video_path).astype(np.uint16) mc_borders = [ None for z in z_range ] counter = 0 for z in z_range: print("Motion correcting plane z={}...".format(z)) video_path = os.path.join(directory, os.path.splitext(filename)[0] + "_z_{}_temp.tif".format(z)) tifffile.imsave(video_path, memmap_video[:, z, :, :]) mc_video[:, z, :, :] *= 0 # --- PARAMETERS --- # params_movie = {'fname': video_path, 'max_shifts': (max_shift, max_shift), # maximum allow rigid shift (2,2) 'niter_rig': 3, 'splits_rig': 1, # for parallelization split the movies in num_splits chuncks across time 'num_splits_to_process_rig': None, # if none all the splits are processed and the movie is saved 'strides': (patch_stride, patch_stride), # intervals at which patches are laid out for motion correction 'overlaps': (patch_overlap, patch_overlap), # overlap between pathes (size of patch strides+overlaps) 'splits_els': 1, # for parallelization split the movies in num_splits chuncks across time 'num_splits_to_process_els': [None], # if none all the splits are processed and the movie is saved 'upsample_factor_grid': 4, # upsample factor to avoid smearing when merging patches 'max_deviation_rigid': 3, # maximum deviation allowed for patch with respect to rigid shift } # load movie (in memory!) fname = params_movie['fname'] niter_rig = params_movie['niter_rig'] # maximum allow rigid shift max_shifts = params_movie['max_shifts'] # for parallelization split the movies in num_splits chuncks across time splits_rig = params_movie['splits_rig'] # if none all the splits are processed and the movie is saved num_splits_to_process_rig = params_movie['num_splits_to_process_rig'] # intervals at which patches are laid out for motion correction strides = params_movie['strides'] # overlap between pathes (size of patch strides+overlaps) overlaps = params_movie['overlaps'] # for parallelization split the movies in num_splits chuncks across time splits_els = params_movie['splits_els'] # if none all the splits are processed and the movie is saved num_splits_to_process_els = params_movie['num_splits_to_process_els'] # upsample factor to avoid smearing when merging patches upsample_factor_grid = params_movie['upsample_factor_grid'] # maximum deviation allowed for patch with respect to rigid # shift max_deviation_rigid = params_movie['max_deviation_rigid'] # --- RIGID MOTION CORRECTION --- # # Load the original movie m_orig = tifffile.memmap(fname) # m_orig = cm.load(fname) min_mov = np.min(m_orig) # movie must be mostly positive for this to work offset_mov = -min_mov # Create motion correction object mc = MotionCorrect(fname, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, strides= strides, overlaps= overlaps, splits_els=splits_els, num_splits_to_process_els=num_splits_to_process_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv = True, nonneg_movie = True, border_nan='min') # Do rigid motion correction mc.motion_correct_rigid(save_movie=False) # --- ELASTIC MOTION CORRECTION --- # # Do elastic motion correction mc.motion_correct_pwrigid(save_movie=True, template=mc.total_template_rig, show_template=False) # # Save elastic shift border bord_px_els = np.ceil(np.maximum(np.max(np.abs(mc.x_shifts_els)), np.max(np.abs(mc.y_shifts_els)))).astype(np.int) # np.savez(mc.fname_tot_els + "_bord_px_els.npz", bord_px_els) fnames = mc.fname_tot_els # name of the pw-rigidly corrected file. border_to_0 = bord_px_els # number of pixels to exclude fname_new = cm.save_memmap(fnames, base_name='memmap_z_{}'.format(z), order = 'C', border_to_0 = bord_px_els) # exclude borders # now load the file Yr, dims, T = cm.load_memmap(fname_new) d1, d2 = dims images = np.reshape(Yr.T, [T] + list(dims), order='F') mc_borders[z] = bord_px_els # images += np.amin(images) # print(np.amax(images)) # print(np.amin(images)) # print(type(images)) mc_video[:, z, :, :] = (images - np.amin(images)).astype(np.uint16) del m_orig os.remove(video_path) try: os.remove(mc.fname_tot_rig) os.remove(mc.fname_tot_els) except: pass counter += 1 if use_multiprocessing: if backend == 'multiprocessing': dview.close() else: try: dview.terminate() except: dview.shutdown() cm.stop_server() mmap_files = glob.glob(os.path.join(directory, '*.mmap')) for mmap_file in mmap_files: try: os.remove(mmap_file) except: pass log_files = glob.glob('Yr*_LOG_*') for log_file in log_files: os.remove(log_file) return mc_video, new_video_path, mc_borders
#%%% MOTION CORRECTION # first we create a motion correction object with the parameters specified min_mov = cm.load(fname[0], subindices=range(200)).min() # this will be subtracted from the movie to make it non-negative mc = MotionCorrect(fname[0], min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) # note that the file is not loaded in memory #%% Run piecewise-rigid motion correction using NoRMCorre mc.motion_correct_rigid(save_movie=True) #%% m_els = cm.load(mc.fname_tot_rig) bord_px_els = np.max(np.ceil(np.abs(mc.shifts_rig))).astype(np.int) # maximum shift to be used for trimming against NaNs #%% compare with original movie cm.concatenate([m_orig.resize(1, 1, downsample_ratio) + offset_mov, m_els.resize(1, 1, downsample_ratio)], axis=2).play(fr=60, gain=1, magnification=1, offset=0) # press q to exit #%% MEMORY MAPPING # memory map the file in order 'C' fnames = mc.fname_tot_rig # name of the pw-rigidly corrected file. border_to_0 = bord_px_els # number of pixels to exclude fname_new = cm.save_memmap(fnames, base_name='memmap_', order='C', border_to_0=border_to_0) # exclude borders
def test_general(): """ General Test of pipeline with comparison against ground truth A shorter version than the demo pipeline that calls comparison for the real test work Raises: --------- params_movie params_cnmf rig correction cnmf on patch cnmf full frame not able to read the file no groundtruth """ #\bug #\warning global params_movie global params_diplay fname = params_movie['fname'] niter_rig = params_movie['niter_rig'] max_shifts = params_movie['max_shifts'] splits_rig = params_movie['splits_rig'] num_splits_to_process_rig = params_movie['num_splits_to_process_rig'] cwd = os.getcwd() fname = download_demo(fname[0]) m_orig = cm.load(fname) min_mov = m_orig[:400].min() comp = comparison.Comparison() comp.dims = np.shape(m_orig)[1:] ################ RIG CORRECTION ################# t1 = time.time() mc = MotionCorrect(fname, min_mov, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) comp.comparison['rig_shifts']['timer'] = time.time() - t1 comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig ########################################### if 'max_shifts' not in params_movie: fnames = params_movie['fname'] border_to_0 = 0 else: # elif not params_movie.has_key('overlaps'): fnames = mc.fname_tot_rig border_to_0 = bord_px_rig m_els = m_rig idx_xy = None add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive remove_init = 0 downsample_factor = 1 base_name = fname[0].split('/')[-1][:-4] name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=( 1, 1, downsample_factor), remove_init=remove_init, idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0) name_new.sort() if len(name_new) > 1: fname_new = cm.save_memmap_join( name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: print('One file only, not saving!') fname_new = name_new[0] Yr, dims, T = cm.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') Y = np.reshape(Yr, dims + (T,), order='F') if np.min(images) < 0: # TODO: should do this in an automatic fashion with a while loop at the 367 line raise Exception('Movie too negative, add_to_movie should be larger') if np.sum(np.isnan(images)) > 0: # TODO: same here raise Exception( 'Movie contains nan! You did not remove enough borders') Cn = cm.local_correlations(Y) Cn[np.isnan(Cn)] = 0 p = params_movie['p'] merge_thresh = params_movie['merge_thresh'] rf = params_movie['rf'] stride_cnmf = params_movie['stride_cnmf'] K = params_movie['K'] init_method = params_movie['init_method'] gSig = params_movie['gSig'] alpha_snmf = params_movie['alpha_snmf'] if params_movie['is_dendrites'] == True: if params_movie['init_method'] is not 'sparse_nmf': raise Exception('dendritic requires sparse_nmf') if params_movie['alpha_snmf'] is None: raise Exception('need to set a value for alpha_snmf') ################ CNMF PART PATCH ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'], method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie[ 'only_init_patch'], gnb=params_movie['gnb'], method_deconvolution='oasis') comp.cnmpatch = copy.copy(cnm) cnm = cnm.fit(images) A_tot = cnm.A C_tot = cnm.C YrA_tot = cnm.YrA b_tot = cnm.b f_tot = cnm.f # DISCARDING print(('Number of components:' + str(A_tot.shape[-1]))) final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) ####### A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1 comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()] #################### ######################## ################ CNMF PART FULL ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot, f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') cnm = cnm.fit(images) # DISCARDING A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_full'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_full'] fitness_delta_min = params_movie['fitness_delta_min_full'] Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True) ########## A_tot_full = A_tot.tocsc()[:, idx_components] C_tot_full = C_tot[idx_components] comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1 comp.comparison['cnmf_full_frame']['ourdata'] = [ A_tot_full.copy(), C_tot_full.copy()] #################### ######################## comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn) log_files = glob.glob('*_LOG_*') try: for log_file in log_files: os.remove(log_file) except: print('Cannot remove log files') ############ assertions ################## pb = False if (comp.information['differences']['params_movie']): print("you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)") pb = True if (comp.information['differences']['params_cnm']): print("you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)") pb = True if (comp.information['diff']['rig']['isdifferent']): print("the rigid shifts are different from the groundtruth ") pb = True if (comp.information['diff']['cnmpatch']['isdifferent']): print("the cnmf on patch produces different results than the groundtruth ") pb = True if (comp.information['diff']['cnmfull']['isdifferent']): print("the cnmf full frame produces different results than the groundtruth ") pb = True assert (not pb)
mc_templs_part = [] mc_templs = [] mc_fnames = [] for each_file in nms: #TODO: needinfo how the classes works mc = MotionCorrect( each_file, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(template=templ, save_movie=True) new_templ = mc.total_template_rig #TODO : needinfo pl.imshow(new_templ, cmap='gray') pl.pause(.1) mc_list += mc.shifts_rig mc_templs_part += mc.templates_rig mc_templs += [mc.total_template_rig] mc_fnames += mc.fname_tot_rig np.savez(os.path.join(fl, 'images/mot_corr_res.npz'), mc_list=mc_list, mc_templs_part=mc_templs_part, mc_fnames=mc_fnames, mc_templs=mc_templs)
def run_motion_correction(cropping_file, dview): """ This is the function for motion correction. Its goal is to take in a decoded and cropped .tif file, perform motion correction, and save the result as a .mmap file. This function is only runnable on the cn76 server because it requires parallel processing. Args: cropping_file: tif file after cropping dview: cluster Returns: row: pd.DataFrame object The row corresponding to the motion corrected analysis state. """ # Get output file paths data_dir = os.environ['DATA_DIR_LOCAL'] + 'data/interim/motion_correction/' sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,input,home_path,decoding_main FROM Analysis WHERE cropping_main=? ORDER BY motion_correction_v" val = [ cropping_file, ] cursor.execute(sql, val) result = cursor.fetchall() data = [] inter = [] for x in result: inter = x for y in inter: data.append(y) # Update the database if data[6] == 0: data[6] = 1 file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}" output_meta_pkl_file_path = f'meta/metrics/{file_name}.pkl' sql1 = "UPDATE Analysis SET motion_correction_meta=?,motion_correction_v=? WHERE cropping_main=? " val1 = [output_meta_pkl_file_path, data[6], cropping_file] cursor.execute(sql1, val1) else: data[6] += 1 file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}" output_meta_pkl_file_path = f'meta/metrics/{file_name}.pkl' sql2 = "INSERT INTO Analysis (motion_correction_meta,motion_correction_v) VALUES (?,?)" val2 = [output_meta_pkl_file_path, data[6]] cursor.execute(sql2, val2) database.commit() sql3 = "UPDATE Analysis SET decoding_main=?,decoding_v=?,mouse=?,session=?,trial=?,is_rest=?,input=?,home_path=?,cropping_v=?,cropping_main=? WHERE motion_correction_meta=? AND motion_correction_v=?" val3 = [ data[9], data[4], data[0], data[1], data[2], data[3], data[7], data[8], data[5], cropping_file, output_meta_pkl_file_path, data[6] ] cursor.execute(sql3, val3) database.commit() output_meta_pkl_file_path_full = data_dir + output_meta_pkl_file_path # Calculate movie minimum to subtract from movie cropping_file_full = os.environ['DATA_DIR_LOCAL'] + cropping_file min_mov = np.min(cm.load(cropping_file_full)) # Apply the parameters to the CaImAn algorithm sql5 = "SELECT motion_correct,pw_rigid,save_movie_rig,gSig_filt,max_shifts,niter_rig,strides,overlaps,upsample_factor_grid,num_frames_split,max_deviation_rigid,shifts_opencv,use_conda,nonneg_movie, border_nan FROM Analysis WHERE cropping_main=? " val5 = [ cropping_file, ] cursor.execute(sql5, val5) myresult = cursor.fetchall() para = [] aux = [] for x in myresult: aux = x for y in aux: para.append(y) parameters = { 'motion_correct': para[0], 'pw_rigid': para[1], 'save_movie_rig': para[2], 'gSig_filt': (para[3], para[3]), 'max_shifts': (para[4], para[4]), 'niter_rig': para[5], 'strides': (para[6], para[6]), 'overlaps': (para[7], para[7]), 'upsample_factor_grid': para[8], 'num_frames_split': para[9], 'max_deviation_rigid': para[10], 'shifts_opencv': para[11], 'use_cuda': para[12], 'nonneg_movie': para[13], 'border_nan': para[14] } caiman_parameters = parameters.copy() caiman_parameters['min_mov'] = min_mov opts = params.CNMFParams(params_dict=caiman_parameters) # Rigid motion correction (in both cases) logging.info('Performing rigid motion correction') t0 = datetime.datetime.today() # Create a MotionCorrect object mc = MotionCorrect([cropping_file_full], dview=dview, **opts.get_group('motion')) # Perform rigid motion correction mc.motion_correct_rigid(save_movie=parameters['save_movie_rig'], template=None) dt = int( (datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes logging.info(f' Rigid motion correction finished. dt = {dt} min') # Obtain template, rigid shifts and border pixels total_template_rig = mc.total_template_rig shifts_rig = mc.shifts_rig # Save template, rigid shifts and border pixels in a dictionary meta_pkl_dict = { 'rigid': { 'template': total_template_rig, 'shifts': shifts_rig, } } sql = "UPDATE Analysis SET duration_rigid=? WHERE motion_correction_meta=? AND motion_correction_v=? " val = [dt, output_meta_pkl_file_path, data[6]] cursor.execute(sql, val) if parameters['save_movie_rig'] == 1: # Load the movie saved by CaImAn, which is in the wrong # directory and is not yet cropped logging.info(f' Loading rigid movie for cropping') m_rig = cm.load(mc.fname_tot_rig[0]) logging.info(f' Loaded rigid movie for cropping') # Get the cropping points determined by the maximal rigid shifts x_, _x, y_, _y = get_crop_from_rigid_shifts(shifts_rig) # Crop the movie logging.info( f' Cropping and saving rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}' ) m_rig = m_rig.crop(x_, _x, y_, _y, 0, 0) meta_pkl_dict['rigid']['cropping_points'] = [x_, _x, y_, _y] sql = "UPDATE Analysis SET motion_correction_cropping_points_x1=?,motion_correction_cropping_points_x2=?,motion_correction_cropping_points_y1=?,motion_correction_cropping_points_y2=? WHERE motion_correction_meta=? AND motion_correction_v=? " val = [x_, _x, y_, _y, output_meta_pkl_file_path, data[6]] cursor.execute(sql, val) # Save the movie rig_role = 'alternate' if parameters['pw_rigid'] else 'main' fname_tot_rig = m_rig.save(data_dir + rig_role + '/' + file_name + '_rig' + '.mmap', order='C') logging.info(f' Cropped and saved rigid movie as {fname_tot_rig}') # Remove the remaining non-cropped movie os.remove(mc.fname_tot_rig[0]) sql = "UPDATE Analysis SET motion_correction_rig_role=? WHERE motion_correction_meta=? AND motion_correction_v=? " val = [fname_tot_rig, output_meta_pkl_file_path, data[6]] cursor.execute(sql, val) database.commit() # If specified in the parameters, apply piecewise-rigid motion correction if parameters['pw_rigid'] == 1: logging.info(f' Performing piecewise-rigid motion correction') t0 = datetime.datetime.today() # Perform non-rigid (piecewise rigid) motion correction. Use the rigid result as a template. mc.motion_correct_pwrigid(save_movie=True, template=total_template_rig) # Obtain template and filename total_template_els = mc.total_template_els fname_tot_els = mc.fname_tot_els[0] dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes meta_pkl_dict['pw_rigid'] = { 'template': total_template_els, 'x_shifts': mc.x_shifts_els, 'y_shifts': mc. y_shifts_els # removed them initially because they take up space probably } logging.info( f' Piecewise-rigid motion correction finished. dt = {dt} min') # Load the movie saved by CaImAn, which is in the wrong # directory and is not yet cropped logging.info(f' Loading pw-rigid movie for cropping') m_els = cm.load(fname_tot_els) logging.info(f' Loaded pw-rigid movie for cropping') # Get the cropping points determined by the maximal rigid shifts x_, _x, y_, _y = get_crop_from_pw_rigid_shifts( np.array(mc.x_shifts_els), np.array(mc.y_shifts_els)) # Crop the movie logging.info( f' Cropping and saving pw-rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}' ) m_els = m_els.crop(x_, _x, y_, _y, 0, 0) meta_pkl_dict['pw_rigid']['cropping_points'] = [x_, _x, y_, _y] # Save the movie fname_tot_els = m_els.save(data_dir + 'main/' + file_name + '_els' + '.mmap', order='C') logging.info(f'Cropped and saved rigid movie as {fname_tot_els}') # Remove the remaining non-cropped movie os.remove(mc.fname_tot_els[0]) sql = "UPDATE Analysis SET motion_correction_main=?, motion_correction_cropping_points_x1=?,motion_correction_cropping_points_x2=?,motion_correction_cropping_points_y1=?,motion_correction_cropping_points_y2=?,duration_pw_rigid=? WHERE motion_correction_meta=? AND motion_correction_v=? " val = [ fname_tot_els, x_, _x, y_, _y, dt, output_meta_pkl_file_path, data[6] ] cursor.execute(sql, val) database.commit() # Write meta results dictionary to the pkl file pkl_file = open(output_meta_pkl_file_path_full, 'wb') pickle.dump(meta_pkl_dict, pkl_file) pkl_file.close() return fname_tot_els, data[6]
def main(index, row, parameters, dview): ''' This is the function for motion correction. Its goal is to take in a decoded and cropped .tif file, perform motion correction, and save the result as a .mmap file. This function is only runnable on the cn76 server because it requires parralel processing. Args: index: tuple The index of the analysis state to be motion corrected. row: pd.DataFrame object The row corresponding to the analysis state to be motion corrected. Returns: index: tuple The index of the motion corrected analysis state. row: pd.DataFrame object The row corresponding to the motion corrected analysis state. ''' # Forcing parameters if not parameters['pw_rigid']: parameters['save_movie_rig'] = True # Get input file input_tif_file_path = eval(row.loc['cropping_output'])['main'] if not os.path.isfile(input_tif_file_path): input_tif_file_path = src.pipeline.get_expected_file_path( 'cropping', index, 'main/', 'tif') if not os.path.isfile(input_tif_file_path): logging.error( 'Cropping file not found. Cancelling motion correction.') return index, row # Get output file paths data_dir = 'data/interim/motion_correction/' file_name = src.pipeline.create_file_name(step_index, index) output_meta_pkl_file_path = data_dir + f'meta/metrics/{file_name}.pkl' # Create a dictionary with the output output = { 'meta': { 'analysis': { 'analyst': os.environ['ANALYST'], 'date': datetime.datetime.today().strftime("%m-%d-%Y"), 'time': datetime.datetime.today().strftime("%H:%M:%S") }, 'metrics': { 'other': output_meta_pkl_file_path } } } row.loc['motion_correction_parameters'] = str(parameters) # Calculate movie minimum to subtract from movie min_mov = np.min(cm.load(input_tif_file_path)) # Apply the parameters to the CaImAn algorithm caiman_parameters = parameters.copy() caiman_parameters['min_mov'] = min_mov opts = params.CNMFParams(params_dict=caiman_parameters) # Rigid motion correction (in both cases) logging.info(f'{index} Performing rigid motion correction') t0 = datetime.datetime.today() # Create a MotionCorrect object mc = MotionCorrect([input_tif_file_path], dview=dview, **opts.get_group('motion')) # Perform rigid motion correction mc.motion_correct_rigid(save_movie=parameters['save_movie_rig'], template=None) dt = int( (datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes logging.info(f'{index} Rigid motion correction finished. dt = {dt} min') # Obtain template, rigid shifts and border pixels total_template_rig = mc.total_template_rig shifts_rig = mc.shifts_rig # Save template, rigid shifts and border pixels in a dictionary meta_pkl_dict = { 'rigid': { 'template': total_template_rig, 'shifts': shifts_rig, } } output['meta']['duration'] = {'rigid': dt} if parameters['save_movie_rig']: # Load the movie saved by CaImAn, which is in the wrong # directory and is not yet cropped logging.info(f'{index} Loading rigid movie for cropping') m_rig = cm.load(mc.fname_tot_rig[0]) logging.info(f'{index} Loaded rigid movie for cropping') # Get the cropping points determined by the maximal rigid shifts x_, _x, y_, _y = get_crop_from_rigid_shifts(shifts_rig) # Crop the movie logging.info( f'{index} Cropping and saving rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}' ) m_rig = m_rig.crop(x_, _x, y_, _y, 0, 0) meta_pkl_dict['rigid']['cropping_points'] = [x_, _x, y_, _y] # Save the movie rig_role = 'alternate' if parameters['pw_rigid'] else 'main' fname_tot_rig = m_rig.save(data_dir + rig_role + '/' + file_name + '_rig' + '.mmap', order='C') logging.info( f'{index} Cropped and saved rigid movie as {fname_tot_rig}') # Store the total path in output output[rig_role] = fname_tot_rig # Remove the remaining non-cropped movie os.remove(mc.fname_tot_rig[0]) # If specified in the parameters, apply piecewise-rigid motion correction if parameters['pw_rigid']: logging.info(f'{index} Performing piecewise-rigid motion correction') t0 = datetime.datetime.today() # Perform non-rigid (piecewise rigid) motion correction. Use the rigid result as a template. mc.motion_correct_pwrigid(save_movie=True, template=total_template_rig) # Obtain template and filename total_template_els = mc.total_template_els fname_tot_els = mc.fname_tot_els[0] dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes meta_pkl_dict['pw_rigid'] = { 'template': total_template_els, 'x_shifts': mc.x_shifts_els, 'y_shifts': mc. y_shifts_els # removed them initially because they take up space probably } output['meta']['duration']['pw_rigid'] = dt logging.info( f'{index} Piecewise-rigid motion correction finished. dt = {dt} min' ) # Load the movie saved by CaImAn, which is in the wrong # directory and is not yet cropped logging.info(f'{index} Loading pw-rigid movie for cropping') m_els = cm.load(fname_tot_els) logging.info(f'{index} Loaded pw-rigid movie for cropping') # Get the cropping points determined by the maximal rigid shifts x_, _x, y_, _y = get_crop_from_pw_rigid_shifts( np.array(mc.x_shifts_els), np.array(mc.y_shifts_els)) # Crop the movie logging.info( f'{index} Cropping and saving pw-rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}' ) m_els = m_els.crop(x_, _x, y_, _y, 0, 0) meta_pkl_dict['pw_rigid']['cropping_points'] = [x_, _x, y_, _y] # Save the movie fname_tot_els = m_els.save(data_dir + 'main/' + file_name + '_els' + '.mmap', order='C') logging.info( f'{index} Cropped and saved rigid movie as {fname_tot_els}') # Remove the remaining non-cropped movie os.remove(mc.fname_tot_els[0]) # Store the total path in output output['main'] = fname_tot_els # Write meta results dictionary to the pkl file pkl_file = open(output_meta_pkl_file_path, 'wb') pickle.dump(meta_pkl_dict, pkl_file) pkl_file.close() # Write necessary variables to the trial index and row row.loc['motion_correction_output'] = str(output) row.loc['motion_correction_parameters'] = str(parameters) # Compute the basic metrics 'crispness' get_metrics(index, row, crispness=True) # Create source extraction images in advance: logging.info(f'{index} Creating corr and pnr images in advance') index, row = src.steps.source_extraction.get_corr_pnr(index, row) logging.info(f'{index} Created corr and pnr images') return index, row
vmin, vmax = np.percentile(templ, 5), np.percentile(templ, 95) pl.imshow(templ, vmin=vmin, vmax=vmax) min_mov = np.nanmin(mov_tmp) mc_list = [] mc_templs_part = [] mc_templs = [] mc_fnames = [] for each_file in nms: # TODO: needinfo how the classes works mc = MotionCorrect(each_file, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(template=templ, save_movie=True) new_templ = mc.total_template_rig #TODO : needinfo pl.imshow(new_templ, cmap='gray') pl.pause(.1) mc_list += mc.shifts_rig mc_templs_part += mc.templates_rig mc_templs += [mc.total_template_rig] mc_fnames += mc.fname_tot_rig np.savez(os.path.join(fl, 'images/mot_corr_res.npz'), mc_list=mc_list, mc_templs_part=mc_templs_part, mc_fnames=mc_fnames, mc_templs=mc_templs) print([os.path.split(nm)[-1] for nm in nms]) print([np.int(os.path.getsize(nm) / 1e+9 * 100) / 100. for nm in nms])
def main(): pass # For compatibility between running under Spyder and the CLI #%% First setup some parameters for data and motion correction # dataset dependent parameters fname = ['Sue_2x_3000_40_-46.tif'] # filename to be processed fr = 30 # imaging rate in frames per second decay_time = 0.4 # length of a typical transient in seconds dxy = (2., 2.) # spatial resolution in x and y in (um per pixel) max_shift_um = (12., 12.) # maximum shift in um patch_motion_um = (100., 100.) # patch size for non-rigid motion correction in um # motion correction parameters pwrigid_motion_correct = True # flag to select rigid vs pw_rigid motion correction max_shifts = tuple([int(a/b) for a, b in zip(max_shift_um, dxy)]) # maximum allow rigid shift in pixels # for parallelization split the movies in num_splits chuncks across time splits_rig = 56 # start a new patch for pw-rigid motion correction every x pixels strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)]) # overlap between pathes (size of patch strides+overlaps) overlaps = (24, 24) # for parallelization split the movies in num_splits chuncks across time splits_els = 56 upsample_factor_grid = 4 # upsample factor to avoid smearing when merging patches # maximum deviation allowed for patch with respect to rigid shifts max_deviation_rigid = 3 #%% download the dataset if it's not present in your folder if fname[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: fname = [download_demo(fname[0])] #%% play the movie # playing the movie using opencv. It requires loading the movie in memory. # To close the video press q display_images = False if display_images: m_orig = cm.load_movie_chain(fname) downsample_ratio = 0.2 moviehandle = m_orig.resize(1, 1, downsample_ratio) moviehandle.play(q_max=99.5, fr=60, magnification=2) #%% start a cluster for parallel processing c, dview, n_processes = cm.cluster.setup_cluster( backend='local', n_processes=None, single_thread=False) #%%% MOTION CORRECTION # first we create a motion correction object with the parameters specified min_mov = cm.load(fname[0], subindices=range(200)).min() # this will be subtracted from the movie to make it non-negative mc = MotionCorrect(fname, min_mov, dview=dview, max_shifts=max_shifts, splits_rig=splits_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, border_nan='copy', upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) # note that the file is not loaded in memory #%% Run piecewise-rigid motion correction using NoRMCorre if pwrigid_motion_correct: mc.motion_correct_pwrigid(save_movie=True) m_els = cm.load(mc.fname_tot_els) bord_px_els = np.ceil(np.maximum(np.max(np.abs(mc.x_shifts_els)), np.max(np.abs(mc.y_shifts_els)))).astype(np.int) fnames = mc.fname_tot_els # name of the pw-rigidly corrected file. else: mc.motion_correct_rigid(save_movie=True) m_els = cm.load(mc.fname_tot_rig) bord_px_els = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int) fnames = mc.fname_tot_rig # name of the rigidly corrected file. # maximum shift to be used for trimming against NaNs #%% compare with original movie if display_images: downsample_ratio = 0.2 moviehandle = cm.concatenate([m_orig.resize(1, 1, downsample_ratio) - min_mov, m_els.resize(1, 1, downsample_ratio)], axis=2) moviehandle.play(fr=60, q_max=99.5, magnification=2) # press q to exit #%% MEMORY MAPPING # memory map the file in order 'C' border_to_0 = bord_px_els # exclude borders due to motion correction # border_to_0 = 0 if mc.border_nan is 'copy' else bord_px_els # you can include boundaries if you used the 'copy' option in the motion # correction, although be careful abou the components near the boundaries fname_new = cm.save_memmap(fnames, base_name='memmap_', order='C', border_to_0=border_to_0) # exclude borders # now load the file Yr, dims, T = cm.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') # load frames in python format (T x X x Y) #%% restart cluster to clean up memory cm.stop_server(dview=dview) c, dview, n_processes = cm.cluster.setup_cluster( backend='local', n_processes=None, single_thread=False) #%% parameters for source extraction and deconvolution p = 1 # order of the autoregressive system gnb = 2 # number of global background components merge_thresh = 0.8 # merging threshold, max correlation allowed # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50 rf = 15 stride_cnmf = 6 # amount of overlap between the patches in pixels K = 4 # number of components per patch gSig = [4, 4] # expected half size of neurons # initialization method (if analyzing dendritic data using 'sparse_nmf') method_init = 'greedy_roi' # parameters for component evaluation opts = params.CNMFParams(dims=dims, fr=fr, decay_time=decay_time, method_init=method_init, gSig=gSig, merge_thresh=merge_thresh, p=p, gnb=gnb, k=K, rf=rf, stride=stride_cnmf, rolling_sum=True) #%% RUN CNMF ON PATCHES # First extract spatial and temporal components on patches and combine them # for this step deconvolution is turned off (p=0) opts.set('temporal', {'p': 0}) cnm = cnmf.CNMF(n_processes, params=opts, dview=dview) cnm = cnm.fit(images) #%% plot contours of found components Cn = cm.local_correlations(images.transpose(1, 2, 0)) Cn[np.isnan(Cn)] = 0 cnm.estimates.plot_contours(img=Cn) plt.title('Contour plots of found components') #%% COMPONENT EVALUATION # the components are evaluated in three ways: # a) the shape of each component must be correlated with the data # b) a minimum peak SNR is required over the length of a transient # c) each shape passes a CNN based classifier min_SNR = 2.5 # signal to noise ratio for accepting a component rval_thr = 0.8 # space correlation threshold for accepting a component cnn_thr = 0.8 # threshold for CNN based classifier cnm.params.set('quality', {'fr': fr, 'decay_time': decay_time, 'min_SNR': min_SNR, 'rval_thr': rval_thr, 'use_cnn': True, 'min_cnn_thr': cnn_thr}) cnm.estimates.evaluate_components(images, cnm.params, dview=dview) #%% PLOT COMPONENTS cnm.estimates.plot_contours(img=Cn, idx=cnm.estimates.idx_components) #%% VIEW TRACES (accepted and rejected) if display_images: cnm.estimates.view_components(images, img=Cn, idx=cnm.estimates.idx_components) cnm.estimates.view_components(images, img=Cn, idx=cnm.estimates.idx_components_bad) #%% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution cnm.dview = None cnm2 = deepcopy(cnm) cnm2.dview = dview cnm2.params.set('patch', {'rf': None}) cnm2.params.set('temporal', {'p': p}) cnm2 = cnm2.fit(images) #%% Extract DF/F values cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250) #%% Show final traces cnm2.estimates.view_components(Yr, img=Cn) #%% reconstruct denoised movie (press q to exit) if display_images: cnm2.estimates.play_movie(images, q_max=99.9, gain_res=2, magnification=2, bpx=border_to_0, include_bck=True) #%% STOP CLUSTER and clean up log files cm.stop_server(dview=dview) log_files = glob.glob('*_LOG_*') for log_file in log_files: os.remove(log_file)
#%% RUN ANALYSIS c, dview, n_processes = cm.cluster.setup_cluster( backend='local', n_processes=None, single_thread=False) #%% # movie must be mostly positive for this to work min_mov = cm.load(fname, subindices=range(400)).min() mc = MotionCorrect(fname, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, num_splits_to_process_els=num_splits_to_process_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) #%% mc.motion_correct_rigid(save_movie=True) # load motion corrected movie #%% pl.imshow(mc.total_template_rig, cmap='gray') #%% visualize templates cm.movie(np.array(mc.templates_rig)).play( fr=10, gain=5, magnification=2, offset=offset_mov) #%% plot rigid shifts pl.close() pl.plot(mc.shifts_rig) pl.legend(['x shifts', 'y shifts']) pl.xlabel('frames') pl.ylabel('pixels') #%% inspect movie bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int)
# TODO : document # setting timer to see how the changement in functions make the code react on a same computer. min_mov = cm.load(fname[0], subindices=range(400)).min() mc_list = [] new_templ = None for each_file in fname: # TODO: needinfo how the classes works mc = MotionCorrect(each_file, min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, num_splits_to_process_els=num_splits_to_process_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True, template=new_templ) new_templ = mc.total_template_rig m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) # TODO : needinfo pl.imshow(new_templ, cmap='gray') pl.pause(.1) mc_list.append(mc) # we are going to keep this part because it helps the user understand what we need. # needhelp why it is not the same as in the notebooks ? # TODO: show screenshot 2,3 # %% # load motion corrected movie m_rig = cm.load(mc.fname_tot_rig)
def _estimate(self, dataset): """ Parameters ---------- Returns ------- displacements : array (2, num_frames*num_cycles)-array of integers giving the estimated displacement of each frame """ ncpus = int(mp.cpu_count() / 4.0) if ncpus == 0: ncpus = 1 verbose = self._params['verbose'] if self._params['max_displacement'] is None: max_displacement = (1e15, 1e15) else: max_displacement = self._params['max_displacement'] displacements = [] if 'dview' in locals(): dview.terminate() # c, dview, n_processes = cb.cluster.setup_cluster( # backend='local', n_processes=ncpus, single_thread=False) dview = None num_iter = 3 # number of times the algorithm is run splits = ncpus * 2 # for parallelization split the movies in num_splits chuncks across time shifts_opencv = True # apply shifts fast way (but smoothing results) save_movie_rigid = False # save the movies vs just get the template upsample_factor_grid = 4 max_deviation_rigid = 3 for sequence in dataset: t0 = time.time() num_frames = sequence.shape[0] num_planes = sequence.shape[1] num_channels = sequence.shape[4] strides = (int(sequence.shape[2] / 10.0), int(sequence.shape[3] / 10.0)) overlaps = (int(sequence.shape[2] / 20.0), int(sequence.shape[3] / 20.0)) if num_channels > 1: raise NotImplementedError("Error: only one colour channel \ can be used for DFT motion correction. Using channel 1.") # get results into a shape sima likes frame_shifts = np.zeros((num_frames, num_planes, 2)) for plane_idx in range(num_planes): min_mov = np.min(sequence[:100, :, :, :, :]) mc = MotionCorrect(self._params['savedir'], min_mov, dview=dview, max_shifts=max_displacement, niter_rig=num_iter, splits_rig=splits, strides=strides, overlaps=overlaps, splits_els=splits, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=shifts_opencv, nonneg_movie=True) sys.stdout.write("Applying motion correction... ") sys.stdout.flush() mc.motion_correct_rigid(save_movie=save_movie_rigid) sys.stdout.write("done\n") # fname_tot_rig, total_template_rig, templates_rig, shifts_rig = \ # cb.motion_correction.motion_correct_batch_rigid( # self._params['savedir'], max_displacement, dview = dview, splits = splits, # num_splits_to_process = num_splits_to_process, num_iter = num_iter, # template = None, shifts_opencv = shifts_opencv, # save_movie_rigid = save_movie_rigid) frame_shifts[:, plane_idx] = mc.shifts_rig # (mc.x_shifts_els, mc.y_shifts_els)# shifts_rig displacements.append(np.round(frame_shifts).astype(np.int)) total_time = time.time() - t0 if verbose: print(' Total time for plane ' + str(plane_idx + 1) + ': ' + str(total_time) + ' s') cb.cluster.stop_server() return displacements