#%% visualize components # pl.figure(); pl.subplot(1, 3, 1) crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=0.9) pl.subplot(1, 3, 2) crd = plot_contours(A.tocsc()[:, idx_blobs], Cn, thr=0.9) pl.subplot(1, 3, 3) crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=0.9) #%% #idx_very_nice=[2, 19, 23, 27,32,43,45,49,51,94,100] # idx_very_nice=np.array(idx_very_nice)[np.array([3,4,8,10])] # idx_very_nice=idx_blobs[idx_very_nice] idx_very_nice = idx_blobs view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_very_nice]), C[ idx_very_nice, :], b, f, dims[0], dims[1], YrA=YrA[idx_very_nice, :], img=Cn) #%% new_m = cm.movie(np.reshape(A.tocsc()[ :, idx_blobs] * C[idx_blobs] + b.dot(f), dims + (-1,), order='F').transpose([2, 0, 1])) new_m.play(fr=30, backend='opencv', gain=7., magnification=3.) #%% new_m = cm.movie(np.reshape(A.tocsc()[:, idx_blobs] * C[idx_blobs] + b * np.median(f), dims + (-1,), order='F').transpose([2, 0, 1])) new_m.play(fr=30, backend='opencv', gain=7., magnification=3.) #%% new_m = cm.movie(np.reshape(A.tocsc()[ :, idx_blobs] * C[idx_blobs], dims + (-1,), order='F').transpose([2, 0, 1])) new_m.play(fr=30, backend='opencv', gain=30., magnification=3.) #%% # idx_to_show=[0,1,5,8,14,17,18,23,24,25,26,28,29,31,32,33,34,36,43,45,47,51,53,54,57,60,61,62,63,64,65,66,67,71,72,74,75,78,79,80,81,91,95,96,97,99,102] #cm.view_patches_bar(Yr,scipy.sparse.coo_matrix(A.tocsc()[:,sure_in_idx[idx_to_show]]),C[sure_in_idx[idx_to_show],:],b,f, dims[0],dims[1], YrA=YrA[sure_in_idx[idx_to_show],:],img=np.mean(Y,-1))
def main(): pass # For compatibility between running under Spyder and the CLI #%% First setup some parameters # dataset dependent parameters display_images = False # Set this to true to show movies and plots fname = ['Sue_2x_3000_40_-46.tif'] # filename to be processed fr = 30 # imaging rate in frames per second decay_time = 0.4 # length of a typical transient in seconds # motion correction parameters niter_rig = 1 # number of iterations for rigid motion correction max_shifts = (6, 6) # maximum allow rigid shift # for parallelization split the movies in num_splits chuncks across time splits_rig = 56 # start a new patch for pw-rigid motion correction every x pixels strides = (48, 48) # overlap between pathes (size of patch strides+overlaps) overlaps = (24, 24) # for parallelization split the movies in num_splits chuncks across time splits_els = 56 upsample_factor_grid = 4 # upsample factor to avoid smearing when merging patches # maximum deviation allowed for patch with respect to rigid shifts max_deviation_rigid = 3 # parameters for source extraction and deconvolution p = 1 # order of the autoregressive system gnb = 2 # number of global background components merge_thresh = 0.8 # merging threshold, max correlation allowed # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50 rf = 15 stride_cnmf = 6 # amount of overlap between the patches in pixels K = 4 # number of components per patch gSig = [4, 4] # expected half size of neurons # initialization method (if analyzing dendritic data using 'sparse_nmf') init_method = 'greedy_roi' is_dendrites = False # flag for analyzing dendritic data # sparsity penalty for dendritic data analysis through sparse NMF alpha_snmf = None # parameters for component evaluation min_SNR = 2.5 # signal to noise ratio for accepting a component rval_thr = 0.8 # space correlation threshold for accepting a component cnn_thr = 0.8 # threshold for CNN based classifier #%% download the dataset if it's not present in your folder if fname[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: fname = [download_demo(fname[0])] #%% play the movie # playing the movie using opencv. It requires loading the movie in memory. To # close the video press q m_orig = cm.load_movie_chain(fname[:1]) downsample_ratio = 0.2 offset_mov = -np.min(m_orig[:100]) moviehandle = m_orig.resize(1, 1, downsample_ratio) if display_images: moviehandle.play(gain=10, offset=offset_mov, fr=30, magnification=2) #%% start a cluster for parallel processing c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) #%%% MOTION CORRECTION # first we create a motion correction object with the parameters specified min_mov = cm.load(fname[0], subindices=range(200)).min() # this will be subtracted from the movie to make it non-negative mc = MotionCorrect(fname[0], min_mov, dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, strides=strides, overlaps=overlaps, splits_els=splits_els, upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid, shifts_opencv=True, nonneg_movie=True) # note that the file is not loaded in memory #%% Run piecewise-rigid motion correction using NoRMCorre mc.motion_correct_pwrigid(save_movie=True) m_els = cm.load(mc.fname_tot_els) bord_px_els = np.ceil( np.maximum(np.max(np.abs(mc.x_shifts_els)), np.max(np.abs(mc.y_shifts_els)))).astype(np.int) # maximum shift to be used for trimming against NaNs #%% compare with original movie moviehandle = cm.concatenate([ m_orig.resize(1, 1, downsample_ratio) + offset_mov, m_els.resize(1, 1, downsample_ratio) ], axis=2) display_images = False if display_images: moviehandle.play(fr=60, q_max=99.5, magnification=2, offset=0) # press q to exit #%% MEMORY MAPPING # memory map the file in order 'C' fnames = mc.fname_tot_els # name of the pw-rigidly corrected file. border_to_0 = bord_px_els # number of pixels to exclude fname_new = cm.save_memmap(fnames, base_name='memmap_', order='C', border_to_0=bord_px_els) # exclude borders # now load the file Yr, dims, T = cm.load_memmap(fname_new) d1, d2 = dims images = np.reshape(Yr.T, [T] + list(dims), order='F') # load frames in python format (T x X x Y) #%% restart cluster to clean up memory cm.stop_server(dview=dview) c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) #%% RUN CNMF ON PATCHES # First extract spatial and temporal components on patches and combine them # for this step deconvolution is turned off (p=0) t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=merge_thresh, p=0, dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1, method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=False, gnb=gnb, border_pix=bord_px_els) cnm = cnm.fit(images) #%% plot contours of found components Cn = cm.local_correlations(images.transpose(1, 2, 0)) Cn[np.isnan(Cn)] = 0 plt.figure() crd = plot_contours(cnm.A, Cn, thr=0.9) plt.title('Contour plots of found components') #%% COMPONENT EVALUATION # the components are evaluated in three ways: # a) the shape of each component must be correlated with the data # b) a minimum peak SNR is required over the length of a transient # c) each shape passes a CNN based classifier idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \ estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, fr, decay_time, gSig, dims, dview=dview, min_SNR=min_SNR, r_values_min=rval_thr, use_cnn=False, thresh_cnn_min=cnn_thr) #%% PLOT COMPONENTS if display_images: plt.figure() plt.subplot(121) crd_good = cm.utils.visualization.plot_contours(cnm.A[:, idx_components], Cn, thr=.8, vmax=0.75) plt.title('Contour plots of accepted components') plt.subplot(122) crd_bad = cm.utils.visualization.plot_contours( cnm.A[:, idx_components_bad], Cn, thr=.8, vmax=0.75) plt.title('Contour plots of rejected components') #%% VIEW TRACES (accepted and rejected) if display_images: view_patches_bar(Yr, cnm.A.tocsc()[:, idx_components], cnm.C[idx_components], cnm.b, cnm.f, dims[0], dims[1], YrA=cnm.YrA[idx_components], img=Cn) view_patches_bar(Yr, cnm.A.tocsc()[:, idx_components_bad], cnm.C[idx_components_bad], cnm.b, cnm.f, dims[0], dims[1], YrA=cnm.YrA[idx_components_bad], img=Cn) #%% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution A_in, C_in, b_in, f_in = cnm.A[:, idx_components], cnm.C[ idx_components], cnm.b, cnm.f cnm2 = cnmf.CNMF(n_processes=1, k=A_in.shape[-1], gSig=gSig, p=p, dview=dview, merge_thresh=merge_thresh, Ain=A_in, Cin=C_in, b_in=b_in, f_in=f_in, rf=None, stride=None, gnb=gnb, method_deconvolution='oasis', check_nan=True) cnm2 = cnm2.fit(images) #%% Extract DF/F values F_dff = detrend_df_f(cnm2.A, cnm2.b, cnm2.C, cnm2.f, YrA=cnm2.YrA, quantileMin=8, frames_window=250) #%% Show final traces cnm2.view_patches(Yr, dims=dims, img=Cn) #%% STOP CLUSTER and clean up log files cm.stop_server(dview=dview) log_files = glob.glob('*_LOG_*') for log_file in log_files: os.remove(log_file) #%% reconstruct denoised movie denoised = cm.movie(cnm2.A.dot(cnm2.C) + cnm2.b.dot(cnm2.f)).reshape( dims + (-1, ), order='F').transpose([2, 0, 1]) #%% play along side original data moviehandle = cm.concatenate([ m_els.resize(1, 1, downsample_ratio), denoised.resize(1, 1, downsample_ratio) ], axis=2) if display_images: moviehandle.play(fr=60, gain=15, magnification=2, offset=0) # press q to exit
idx_components_r = np.where(r_values >= .5)[0] idx_components_raw = np.where(fitness_raw < -20)[0] idx_components_delta = np.where(fitness_delta < -20)[0] idx_components = np.union1d(idx_components_r, idx_components_raw) idx_components = np.union1d(idx_components, idx_components_delta) idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) #%% # pl.figure() crd = plot_contours(A_tot.tocsc()[:, idx_components], Cn2, thr=0.9) #%% view_patches_bar(Yr, scipy.sparse.coo_matrix(A_tot.tocsc()[:, idx_components_bad]), C_tot[ idx_components_bad, :], b_tot, f_tot, dims[0], dims[1], YrA=YrA_tot[idx_components_bad, :], img=Cn2) #%% A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] #%% #%% # cnm2 = cnmf.CNMF(n_processes, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview, Ain=A_tot, Cin=C_tot, # f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') cnm_refine = cnmf.CNMF(n_processes, method_init='greedy_roi', k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, rf=None, stride=None, p=p, dview=dview, Ain=A_tot, Cin=C_tot, f_in=f_tot, method_deconvolution='oasis', skip_refinement=True, normalize_init=False, options_local_NMF=None, minibatch_shape=100, minibatch_suff_stat=5, update_num_comps=True, rval_thr=rval_thr, thresh_fitness_delta=thresh_fitness_delta, thresh_fitness_raw=thresh_fitness_raw, batch_update_suff_stat=True,
idx_components = np.union1d(idx_components_r, idx_components_raw) idx_components = np.union1d(idx_components, idx_components_delta) idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) #%% # pl.figure() crd = plot_contours(A_tot.tocsc()[:, idx_components], Cn2, thr=0.9) #%% view_patches_bar(Yr, scipy.sparse.coo_matrix(A_tot.tocsc()[:, idx_components_bad]), C_tot[idx_components_bad, :], b_tot, f_tot, dims[0], dims[1], YrA=YrA_tot[idx_components_bad, :], img=Cn2) #%% A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] #%% #%% # cnm2 = cnmf.CNMF(n_processes, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview, Ain=A_tot, Cin=C_tot, # f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') cnm_refine = cnmf.CNMF(n_processes, method_init='greedy_roi', k=A_tot.shape,
print(' ***** ') print((len(r_values))) print((len(idx_components))) #%% try: A_off = A_off.toarray()[:, idx_components] except: A_off = A_off[:, idx_components] C_off = C_off[idx_components] # OASISinstances = OASISinstances[()] #%% pl.figure() crd = plot_contours(scipy.sparse.coo_matrix(A_off), Cn, thr=0.9) #%% view_patches_bar(None, scipy.sparse.coo_matrix( A_off[:, :]), C_off[:, :], b_off, f_off, dims_off[0], dims_off[1], YrA=YrA[:, :], img=Cn) #%% A_off_thr = cm.source_extraction.cnmf.spatial.threshold_components(A_off[:, :], dims_off, medw=None, thr_method='max', maxthr=0.2, nrgthr=0.99, extract_cc=True, se=None, ss=None, dview=dview) A_off_thr = A_off_thr > 0 size_neurons = A_off_thr.sum(0) A_off_thr = A_off_thr[:, (size_neurons > min_size_neuro) & (size_neurons < max_size_neuro)] C_off_thr = C_off[(size_neurons > min_size_neuro) & (size_neurons < max_size_neuro), :116000] print(A_off_thr.shape) C_off_thr = np.array([CC.reshape([-1, n_frames_per_bin]).max(1) for CC in C_off_thr])
def main(): pass # For compatibility between running under Spyder and the CLI #%% load data fname = os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif') Y = cm.load(fname).astype(np.float32) # # used as a background image Cn = cm.local_correlations(Y.transpose(1, 2, 0)) #%% set up some parameters # frame rate (Hz) fr = 10 # approximate length of transient event in seconds decay_time = 0.5 # expected half size of neurons gSig = [6, 6] # order of AR indicator dynamics p = 1 # minimum SNR for accepting new components min_SNR = 3.5 # correlation threshold for new component inclusion rval_thr = 0.90 # number of background components gnb = 3 # set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics) # number of shapes to be updated each time (put this to a finite small value to increase speed) max_comp_update_shape = np.inf # maximum number of expected components used for memory pre-allocation (exaggerate here) expected_comps = 50 # number of timesteps to consider when testing new neuron candidates N_samples = np.ceil(fr * decay_time) # exceptionality threshold thresh_fitness_raw = log_ndtr(-min_SNR) * N_samples # total length of file T1 = Y.shape[0] # set up CNMF initialization parameters # merging threshold, max correlation allowed merge_thresh = 0.8 # number of frames for initialization (presumably from the first file) initbatch = 400 # size of patch patch_size = 32 # amount of overlap between patches stride = 3 # max number of components in each patch K = 4 #%% obtain initial batch file used for initialization # memory map file (not needed) fname_new = Y[:initbatch].save(os.path.join(caiman_datadir(), 'example_movies', 'demo.mmap'), order='C') Yr, dims, T = cm.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') Cn_init = cm.local_correlations(np.reshape(Yr, dims + (T, ), order='F')) #%% RUN (offline) CNMF algorithm on the initial batch pl.close('all') cnm_init = cnmf.CNMF(2, k=K, gSig=gSig, merge_thresh=merge_thresh, fr=fr, p=p, rf=patch_size // 2, stride=stride, skip_refinement=False, normalize_init=False, options_local_NMF=None, minibatch_shape=100, minibatch_suff_stat=5, update_num_comps=True, rval_thr=rval_thr, thresh_fitness_delta=-50, gnb=gnb, decay_time=decay_time, thresh_fitness_raw=thresh_fitness_raw, batch_update_suff_stat=False, max_comp_update_shape=max_comp_update_shape, expected_comps=expected_comps, dview=None, min_SNR=min_SNR) cnm_init = cnm_init.fit(images) print(('Number of components:' + str(cnm_init.estimates.A.shape[-1]))) pl.figure() crd = plot_contours(cnm_init.estimates.A.tocsc(), Cn_init, thr=0.9) #%% run (online) OnACID algorithm cnm = deepcopy(cnm_init) cnm.params.data['dims'] = (60, 80) cnm._prepare_object(np.asarray(Yr), T1) t = initbatch Y_ = cm.load(fname)[initbatch:].astype(np.float32) for frame_count, frame in enumerate(Y_): cnm.fit_next(t, frame.copy().reshape(-1, order='F')) t += 1 #%% extract the results C, f = cnm.estimates.C_on[gnb:cnm.M], cnm.estimates.C_on[:gnb] A, b = cnm.estimates.Ab[:, gnb:cnm.M], cnm.estimates.Ab[:, :gnb] print(('Number of components:' + str(A.shape[-1]))) #%% pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes) use_CNN = True if use_CNN: # threshold for CNN classifier thresh_cnn = 0.1 from caiman.components_evaluation import evaluate_components_CNN predictions, final_crops = evaluate_components_CNN( A, dims, gSig, model_name=os.path.join(caiman_datadir(), 'model', 'cnn_model')) A_exclude, C_exclude = A[:, predictions[:, 1] < thresh_cnn], C[ predictions[:, 1] < thresh_cnn] A, C = A[:, predictions[:, 1] >= thresh_cnn], C[predictions[:, 1] >= thresh_cnn] noisyC = cnm.estimates.noisyC[gnb:cnm.M] YrA = noisyC[predictions[:, 1] >= thresh_cnn] - C else: YrA = cnm.estimates.noisyC[gnb:cnm.M] - C #%% plot results pl.figure() crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9) view_patches_bar(Yr, A, C, b, f, dims[0], dims[1], YrA, img=Cn)
try: A_off = A_off.toarray()[:, idx_components] except: A_off = A_off[:, idx_components] C_off = C_off[idx_components] # OASISinstances = OASISinstances[()] #%% pl.figure() crd = plot_contours(scipy.sparse.coo_matrix(A_off), Cn, thr=0.9) #%% view_patches_bar(None, scipy.sparse.coo_matrix(A_off[:, :]), C_off[:, :], b_off, f_off, dims_off[0], dims_off[1], YrA=YrA[:, :], img=Cn) #%% A_off_thr = cm.source_extraction.cnmf.spatial.threshold_components( A_off[:, :], dims_off, medw=None, thr_method='max', maxthr=0.2, nrgthr=0.99, extract_cc=True, se=None,
f = cnm.f snt = cnm.sn print(('Number of components:' + str(A.shape[-1]))) # %% pl.figure() # TODO: show screenshot 12` # TODO : change the way it is used crd = plot_contours(A, Cn, thr=params_display['thr_plot']) # %% # TODO: needinfo view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f, dims[0], dims[1], YrA=YrA[:, :], img=Cn) #%% c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) #%% thredshold components min_size_neuro = 3 * 2 * np.pi max_size_neuro = (2 * radius)**2 * np.pi A_thr = cm.source_extraction.cnmf.spatial.threshold_components( A.tocsc()[:, :].toarray(), dims,
crd = plot_contours(cnm_init.A.tocsc(), Cn_init, thr=0.9) #%% RUN ALGORITHM ONLINE cnm = deepcopy(cnm_init) cnm._prepare_object(np.asarray(Yr), T1, expected_comps) cnm.max_comp_update_shape = np.inf cnm.update_num_comps = True t = cnm.initbatch Y_ = cm.load(fname)[initbatch:].astype(np.float32) for frame_count, frame in enumerate(Y_): cnm.fit_next(t, frame.copy().reshape(-1, order='F')) t += 1 C = cnm.C_on[cnm.gnb:cnm.M] A = cnm.Ab[:, cnm.gnb:cnm.M] print(('Number of components:' + str(A.shape[-1]))) #%% pl.figure() crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9) #%% view_patches_bar(Yr, A, C, cnm.b, cnm.f, dims[0], dims[1], YrA=cnm.noisyC[cnm.gnb:cnm.M] - C, img=Cn)
max_comp_update_shape=max_comp_update_shape, deconv_flag=False, use_dense=False, simultaneously=False, n_refit=0) time_init = time() - t1 #%% Plot initialization results if ploton: crd = plot_contours(cnm_init.A.tocsc(), Cn_init, thr=0.9) A, C, b, f, YrA, sn = cnm_init.A, cnm_init.C, cnm_init.b, cnm_init.f, cnm_init.YrA, cnm_init.sn view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f, dims[0], dims[1], YrA=YrA[:, :], img=Cn_init) #%% create a function for plotting results in real time if needed def create_frame(cnm2, img_norm, captions): A, b = cnm2.Ab[:, cnm2.gnb:], cnm2.Ab[:, :cnm2.gnb].toarray() C, f = cnm2.C_on[cnm2.gnb:cnm2.M, :], cnm2.C_on[:cnm2.gnb, :] # inferred activity due to components (no background) comps_frame = A.dot(C[:, t - 1]).reshape( cnm2.dims, order='F') * img_norm / np.max(img_norm) comps_frame_ = A.dot(C[:, t - 1]).reshape( cnm2.dims, order='F') * img_norm / np.max(img_norm) / 3
# TODO: show screenshot 14 pl.subplot(1, 2, 1) crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot']) pl.subplot(1, 2, 2) crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=params_display['thr_plot']) # %% # TODO: needinfo view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components]), C[idx_components, :], b, f, dims[0], dims[1], YrA=YrA[idx_components, :], img=Cn) # %% view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components_bad]), C[idx_components_bad, :], b, f, dims[0], dims[1], YrA=YrA[idx_components_bad, :], img=Cn)
A=A, C=C, b=b, f=f, YrA=YrA, sn=sn, d1=d1, d2=d2, idx_components=idx_components, idx_components_bad=idx_components_bad, fitness_raw=fitness_raw, fitness_delta=fitness_delta, r_values=r_values) # we save it # %% # TODO: show screenshot 14 pl.subplot(1, 2, 1) crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot']) pl.subplot(1, 2, 2) crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=params_display['thr_plot']) # %% # TODO: needinfo view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components]), C[idx_components, :], b, f, dims[0], dims[1], YrA=YrA[idx_components, :], img=Cn) # %% view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components_bad]), C[idx_components_bad, :], b, f, dims[0], dims[1], YrA=YrA[idx_components_bad, :], img=Cn) #%% LOAD DATA params_display = { 'downsample_ratio': .2, 'thr_plot': 0.8 } try: fname_new = fname_new[()] except: pass #analysis_file = '/mnt/ceph/neuro/jeremie_analysis/neurofinder.03.00.test/Yr_d1_498_d2_467_d3_1_order_C_frames_2250_._results_analysis.npz' with np.load(os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[1][:-4] + 'results_analysis.npz')) as ld:
C = cnm.C YrA = cnm.YrA b = cnm.b f = cnm.f snt = cnm.sn print(('Number of components:' + str(A.shape[-1]))) # %% pl.figure() # TODO: show screenshot 12` # TODO : change the way it is used crd = plot_contours(A, Cn, thr=params_display['thr_plot']) # %% # TODO: needinfo view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f, dims[0], dims[1], YrA=YrA[:, :], img=Cn) #%% c, dview, n_processes = cm.cluster.setup_cluster( backend='local', n_processes=None, single_thread=False) #%% thredshold components min_size_neuro = 3 * 2 * np.pi max_size_neuro = (2 * radius)**2 * np.pi A_thr = cm.source_extraction.cnmf.spatial.threshold_components(A.tocsc()[:, :].toarray(), dims, medw=None, thr_method='max', maxthr=0.2, nrgthr=0.99, extract_cc=True, se=None, ss=None, dview=dview) A_thr = A_thr > 0 size_neurons = A_thr.sum(0) idx_size_neuro = np.where((size_neurons > min_size_neuro) & (size_neurons < max_size_neuro))[0] A_thr = A_thr[:, idx_size_neuro]
#%% save_results = True if save_results: np.savez(os.path.join(os.path.split(fname_new)[0], 'results_analysis.npz'), Cn=Cn, A=A.todense( ), C=C, b=b, f=f, YrA=YrA, sn=sn, d1=d1, d2=d2, idx_components=idx_components, idx_components_bad=idx_components_bad) #%% visualize components # pl.figure(); pl.subplot(1, 2, 1) crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=0.9) #pl.subplot(1, 3, 2) #crd = plot_contours(A.tocsc()[:, idx_blobs], Cn, thr=0.9) pl.subplot(1, 2, 2) crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=0.9) #%% view_patches_bar(Yr, A.tocsc()[:, idx_components], C[ idx_components, :], b, f, dims[0], dims[1], YrA=YrA[idx_components, :], img=Cn) #%% view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components_bad]), C[ idx_components_bad, :], b, f, dims[0], dims[1], YrA=YrA[idx_components_bad, :], img=Cn) #%% STOP CLUSTER and clean up log files cm.stop_server() log_files = glob.glob('Yr*_LOG_*') for log_file in log_files: os.remove(log_file) #
dview=dview) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) # %% # TODO: show screenshot 13 pl.figure() crd = plot_contours(A_tot.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot']) #%% idds = idx_components view_patches_bar(Yr, scipy.sparse.coo_matrix(A_tot.tocsc()[:, idds]), C_tot[idds, :], b_tot, f_tot, dims[0], dims[1], YrA=YrA_tot[idds, :], img=Cn) # %% A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] # %% rerun updating the components to refine t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview,
use_CNN = False if use_CNN: print ("... using CNN classifier ...") thresh_cnn = 0.1 # threshold for CNN classifier from caiman.components_evaluation import evaluate_components_CNN predictions,final_crops = evaluate_components_CNN(A,dims,gSig,model_name = '/home/cat/Downloads/CaImAn/use_cases/CaImAnpaper/cnn_model') A_exclude, C_exclude = A[:,predictions[:,1]<thresh_cnn], C[predictions[:,1]<thresh_cnn] A, C = A[:,predictions[:,1]>=thresh_cnn], C[predictions[:,1]>=thresh_cnn] noisyC = cnm.noisyC[cnm.gnb:cnm.M] YrA = noisyC[predictions[:,1]>=thresh_cnn] - C else: print ("...skipping CNN classifier...") YrA = cnm.noisyC[cnm.gnb:cnm.M] - C #%% plot results if False: pl.figure() crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9) view_patches_bar(Yr, A, C, b, f, dims[0], dims[1], YrA, img=Cn) #SAVE DATA print ("\n\n...saving .npz file containing all processed data: ", fname[:-4]+"_processed.npz") np.savez(fname[:-4]+"_processed.npz", C=C, A=A, Y_=Y_,Yr=Yr, b=cnm.b, f=cnm.f,YrA=cnm.noisyC[cnm.gnb:cnm.M] - C, Cn=Cn) print ("\n... Clean exit!...\n\n\n")
if save_movie: out.release() out = None cv2.destroyAllWindows() #%% save results (optional) save_results = False if save_results: np.savez('results_analysis_online_MOT_CORR.npz', Cn=Cn, Ab=cnm2.Ab, Cf=cnm2.C_on, b=cnm2.b, f=cnm2.f, dims=cnm2.dims, tottime=tottime, noisyC=cnm2.noisyC, shifts=shifts) #%% create correlation image or raw data with applied shifts Y_off = cm.load_movie_chain(fls)[200:].apply_shifts(shifts,interpolation='cubic') Cn = Y_off.local_correlations(swap_dim=False) pl.figure(); crd = cm.utils.visualization.plot_contours(cnm2.Ab[:,cnm2.gnb:], Cn, thr=0.95, display_numbers = False) #%% extract results from the objects and do some plotting A, b = cnm2.Ab[:, cnm2.gnb:], cnm2.Ab[:, :cnm2.gnb].toarray() C, f = cnm2.C_on[cnm2.gnb:cnm2.M, t-t//epochs:t], cnm2.C_on[:cnm2.gnb, t-t//epochs:t] noisyC = cnm2.noisyC[:,t-t//epochs:t] b_trace = [osi.b for osi in cnm2.OASISinstances] if hasattr(cnm2, 'OASISinstances') else [0]*C.shape[0] pl.figure() crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9) view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f, dims[0], dims[1], YrA=noisyC[cnm2.gnb:cnm2.M] - C, img=Cn)
def main(): pass # For compatibility between running under Spyder and the CLI #%% download and list all files to be processed # folder inside ./example_movies where files will be saved fld_name = 'Mesoscope' download_demo('Tolias_mesoscope_1.hdf5', fld_name) download_demo('Tolias_mesoscope_2.hdf5', fld_name) download_demo('Tolias_mesoscope_3.hdf5', fld_name) # folder where files are located folder_name = os.path.join(caiman_datadir(), 'example_movies', fld_name) extension = 'hdf5' # extension of files # read all files to be processed fls = glob.glob(folder_name + '/*' + extension) # your list of files should look something like this print(fls) #%% Set up some parameters # frame rate (Hz) fr = 15 # approximate length of transient event in seconds decay_time = 0.5 # expected half size of neurons gSig = (3, 3) # order of AR indicator dynamics p = 1 # minimum SNR for accepting new components min_SNR = 2.5 # correlation threshold for new component inclusion rval_thr = 0.85 # spatial downsampling factor (increases speed but may lose some fine structure) ds_factor = 1 # number of background components gnb = 2 # recompute gSig if downsampling is involved gSig = tuple(np.ceil(np.array(gSig) / ds_factor).astype('int')) # flag for online motion correction mot_corr = True # maximum allowed shift during motion correction max_shift = np.ceil(10. / ds_factor).astype('int') # set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics) # number of shapes to be updated each time (put this to a finite small value to increase speed) max_comp_update_shape = np.inf # number of files used for initialization init_files = 1 # number of files used for online online_files = len(fls) - 1 # number of frames for initialization (presumably from the first file) initbatch = 200 # maximum number of expected components used for memory pre-allocation (exaggerate here) expected_comps = 300 # initial number of components K = 2 # number of timesteps to consider when testing new neuron candidates N_samples = np.ceil(fr * decay_time) # exceptionality threshold thresh_fitness_raw = scipy.special.log_ndtr(-min_SNR) * N_samples # number of passes over the data epochs = 2 # upper bound for number of frames in each file (used right below) len_file = 1000 # total length of all files (if not known use a large number, then truncate at the end) T1 = len(fls) * len_file * epochs #%% Initialize movie # load only the first initbatch frames and possibly downsample them if ds_factor > 1: Y = cm.load(fls[0], subindices=slice(0, initbatch, None)).astype( np.float32).resize(1. / ds_factor, 1. / ds_factor) else: Y = cm.load(fls[0], subindices=slice(0, initbatch, None)).astype(np.float32) if mot_corr: # perform motion correction on the first initbatch frames mc = Y.motion_correct(max_shift, max_shift) Y = mc[0].astype(np.float32) borders = np.max(mc[1]) else: Y = Y.astype(np.float32) # minimum value of movie. Subtract it to make the data non-negative img_min = Y.min() Y -= img_min img_norm = np.std(Y, axis=0) # normalizing factor to equalize the FOV img_norm += np.median(img_norm) Y = Y / img_norm[None, :, :] # normalize data _, d1, d2 = Y.shape dims = (d1, d2) # dimensions of FOV Yr = Y.to_2D().T # convert data into 2D array Cn_init = Y.local_correlations(swap_dim=False) # compute correlation image #pl.imshow(Cn_init) #pl.title('Correlation Image on initial batch') #pl.colorbar() bnd_Y = np.percentile(Y, (0.001, 100 - 0.001)) # plotting boundaries for Y #%% initialize OnACID with bare initialization cnm_init = bare_initialization(Y[:initbatch].transpose(1, 2, 0), init_batch=initbatch, k=K, gnb=gnb, gSig=gSig, p=p, minibatch_shape=100, minibatch_suff_stat=5, update_num_comps=True, rval_thr=rval_thr, thresh_fitness_raw=thresh_fitness_raw, batch_update_suff_stat=True, max_comp_update_shape=max_comp_update_shape, deconv_flag=False, use_dense=False, simultaneously=False, n_refit=0) #%% Plot initialization results crd = plot_contours(cnm_init.A.tocsc(), Cn_init, thr=0.9) A, C, b, f, YrA, sn = cnm_init.A, cnm_init.C, cnm_init.b, cnm_init.f, cnm_init.YrA, cnm_init.sn view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f, dims[0], dims[1], YrA=YrA[:, :], img=Cn_init) bnd_AC = np.percentile(A.dot(C), (0.001, 100 - 0.005)) bnd_BG = np.percentile(b.dot(f), (0.001, 100 - 0.001)) #%% create a function for plotting results in real time if needed def create_frame(cnm2, img_norm, captions): A, b = cnm2.Ab[:, cnm2.gnb:], cnm2.Ab[:, :cnm2.gnb].toarray() C, f = cnm2.C_on[cnm2.gnb:cnm2.M, :], cnm2.C_on[:cnm2.gnb, :] # inferred activity due to components (no background) frame_plot = (frame_cor.copy() - bnd_Y[0]) / np.diff(bnd_Y) comps_frame = A.dot(C[:, t - 1]).reshape(cnm2.dims, order='F') bgkrnd_frame = b.dot(f[:, t - 1]).reshape( cnm2.dims, order='F') # denoised frame (components + background) denoised_frame = comps_frame + bgkrnd_frame denoised_frame = (denoised_frame.copy() - bnd_Y[0]) / np.diff(bnd_Y) comps_frame = (comps_frame.copy() - bnd_AC[0]) / np.diff(bnd_AC) if show_residuals: #all_comps = np.reshape(cnm2.Yres_buf.mean(0), cnm2.dims, order='F') all_comps = np.reshape(cnm2.mean_buff, cnm2.dims, order='F') all_comps = np.minimum(np.maximum(all_comps, 0) * 2 + 0.25, 255) else: all_comps = np.array(A.sum(-1)).reshape(cnm2.dims, order='F') # spatial shapes frame_comp_1 = cv2.resize( np.concatenate([frame_plot, all_comps * 1.], axis=-1), (2 * np.int(cnm2.dims[1] * resize_fact), np.int(cnm2.dims[0] * resize_fact))) frame_comp_2 = cv2.resize( np.concatenate([comps_frame, denoised_frame], axis=-1), (2 * np.int(cnm2.dims[1] * resize_fact), np.int(cnm2.dims[0] * resize_fact))) frame_pn = np.concatenate([frame_comp_1, frame_comp_2], axis=0).T vid_frame = np.repeat(frame_pn[:, :, None], 3, axis=-1) vid_frame = np.minimum((vid_frame * 255.), 255).astype('u1') if show_residuals and cnm2.ind_new: add_v = np.int(cnm2.dims[1] * resize_fact) for ind_new in cnm2.ind_new: cv2.rectangle(vid_frame, (int(ind_new[0][1] * resize_fact), int(ind_new[1][1] * resize_fact) + add_v), (int(ind_new[0][0] * resize_fact), int(ind_new[1][0] * resize_fact) + add_v), (255, 0, 255), 2) cv2.putText(vid_frame, captions[0], (5, 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1) cv2.putText(vid_frame, captions[1], (np.int(cnm2.dims[0] * resize_fact) + 5, 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1) cv2.putText(vid_frame, captions[2], (5, np.int(cnm2.dims[1] * resize_fact) + 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1) cv2.putText(vid_frame, captions[3], (np.int(cnm2.dims[0] * resize_fact) + 5, np.int(cnm2.dims[1] * resize_fact) + 20), fontFace=5, fontScale=0.8, color=(0, 255, 0), thickness=1) cv2.putText(vid_frame, 'Frame = ' + str(t), (vid_frame.shape[1] // 2 - vid_frame.shape[1] // 10, vid_frame.shape[0] - 20), fontFace=5, fontScale=0.8, color=(0, 255, 255), thickness=1) return vid_frame #%% Prepare object for OnACID cnm2 = deepcopy(cnm_init) save_init = False # flag for saving initialization object. Useful if you want to check OnACID with different parameters but same initialization if save_init: cnm_init.dview = None save_object(cnm_init, fls[0][:-4] + '_DS_' + str(ds_factor) + '.pkl') cnm_init = load_object(fls[0][:-4] + '_DS_' + str(ds_factor) + '.pkl') path_to_cnn_residual = os.path.join(caiman_datadir(), 'model', 'cnn_model_online.h5') cnm2._prepare_object(np.asarray(Yr), T1, expected_comps, idx_components=None, min_num_trial=3, max_num_added=3, path_to_model=path_to_cnn_residual, sniper_mode=False, use_peak_max=False, q=0.5) cnm2.thresh_CNN_noisy = 0.5 #%% Run OnACID and optionally plot results in real time epochs = 1 cnm2.Ab_epoch = [] # save the shapes at the end of each epoch t = cnm2.initbatch # current timestep tottime = [] Cn = Cn_init.copy() # flag for removing components with bad shapes remove_flag = False T_rm = 650 # remove bad components every T_rm frames rm_thr = 0.1 # CNN classifier removal threshold # flag for plotting contours of detected components at the end of each file plot_contours_flag = False # flag for showing results video online (turn off flags for improving speed) play_reconstr = True # flag for saving movie (file could be quite large..) save_movie = False movie_name = os.path.join( folder_name, 'sniper_meso_0.995_new.avi') # name of movie to be saved resize_fact = 1.2 # image resizing factor if online_files == 0: # check whether there are any additional files process_files = fls[:init_files] # end processing at this file init_batc_iter = [initbatch] # place where to start end_batch = T1 else: process_files = fls[:init_files + online_files] # additional files # where to start reading at each file init_batc_iter = [initbatch] + [0] * online_files shifts = [] show_residuals = True if show_residuals: caption = 'Mean Residual Buffer' else: caption = 'Identified Components' captions = ['Raw Data', 'Inferred Activity', caption, 'Denoised Data'] if save_movie and play_reconstr: fourcc = cv2.VideoWriter_fourcc('8', 'B', 'P', 'S') # fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter( movie_name, fourcc, 30.0, tuple([int(2 * x * resize_fact) for x in cnm2.dims])) for iter in range(epochs): if iter > 0: # if not on first epoch process all files from scratch process_files = fls[:init_files + online_files] init_batc_iter = [0] * (online_files + init_files) # np.array(fls)[np.array([1,2,3,4,5,-5,-4,-3,-2,-1])]: for file_count, ffll in enumerate(process_files): print('Now processing file ' + ffll) Y_ = cm.load(ffll, subindices=slice(init_batc_iter[file_count], T1, None)) # update max-correlation (and perform offline motion correction) just for illustration purposes if plot_contours_flag: if ds_factor > 1: Y_1 = Y_.resize(1. / ds_factor, 1. / ds_factor, 1) else: Y_1 = Y_.copy() if mot_corr: templ = (cnm2.Ab.data[:cnm2.Ab.indptr[1]] * cnm2.C_on[0, t - 1]).reshape(cnm2.dims, order='F') * img_norm newcn = (Y_1 - img_min).motion_correct( max_shift, max_shift, template=templ)[0].local_correlations(swap_dim=False) Cn = np.maximum(Cn, newcn) else: Cn = np.maximum(Cn, Y_1.local_correlations(swap_dim=False)) old_comps = cnm2.N # number of existing components for frame_count, frame in enumerate(Y_): # now process each file if np.isnan(np.sum(frame)): raise Exception('Frame ' + str(frame_count) + ' contains nan') if t % 100 == 0: print( 'Epoch: ' + str(iter + 1) + '. ' + str(t) + ' frames have beeen processed in total. ' + str(cnm2.N - old_comps) + ' new components were added. Total number of components is ' + str(cnm2.Ab.shape[-1] - gnb)) old_comps = cnm2.N t1 = time() # count time only for the processing part frame_ = frame.copy().astype(np.float32) # if ds_factor > 1: frame_ = cv2.resize(frame_, img_norm.shape[::-1]) # downsampling frame_ -= img_min # make data non-negative if mot_corr: # motion correct templ = cnm2.Ab.dot(cnm2.C_on[:cnm2.M, t - 1]).reshape( cnm2.dims, order='F') * img_norm frame_cor, shift = motion_correct_iteration_fast( frame_, templ, max_shift, max_shift) shifts.append(shift) else: templ = None frame_cor = frame_ frame_cor = frame_cor / img_norm # normalize data-frame cnm2.fit_next(t, frame_cor.reshape( -1, order='F')) # run OnACID on this frame # store time tottime.append(time() - t1) t += 1 if t % T_rm == 0 and remove_flag: prd, _ = evaluate_components_CNN(cnm2.Ab[:, gnb:], dims, gSig) ind_rem = np.where(prd[:, 1] < rm_thr)[0].tolist() cnm2.remove_components(ind_rem) print('Removing ' + str(len(ind_rem)) + ' components') if t % 1000 == 0 and plot_contours_flag: pl.cla() A = cnm2.Ab[:, cnm2.gnb:] # update the contour plot every 1000 frames crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9) pl.pause(1) if play_reconstr: # generate movie with the results vid_frame = create_frame(cnm2, img_norm, captions) if save_movie: out.write(vid_frame) if t - initbatch < 100: #for rp in np.int32(np.ceil(np.exp(-np.arange(1,100)/30)*20)): for rp in range(len(cnm2.ind_new) * 2): out.write(vid_frame) cv2.imshow('frame', vid_frame) if t - initbatch < 100: for rp in range(len(cnm2.ind_new) * 2): cv2.imshow('frame', vid_frame) if cv2.waitKey(1) & 0xFF == ord('q'): break print('Cumulative processing speed is ' + str((t - initbatch) / np.sum(tottime))[:5] + ' frames per second.') # save the shapes at the end of each epoch cnm2.Ab_epoch.append(cnm2.Ab.copy()) if save_movie: out.release() cv2.destroyAllWindows() #%% save results (optional) save_results = False if save_results: np.savez('results_analysis_online_MOT_CORR.npz', Cn=Cn, Ab=cnm2.Ab, Cf=cnm2.C_on, b=cnm2.b, f=cnm2.f, dims=cnm2.dims, tottime=tottime, noisyC=cnm2.noisyC, shifts=shifts) #%% extract results from the objects and do some plotting A, b = cnm2.Ab[:, cnm2.gnb:], cnm2.Ab[:, :cnm2.gnb].toarray() C, f = cnm2.C_on[cnm2.gnb:cnm2.M, t - t // epochs:t], cnm2.C_on[:cnm2.gnb, t - t // epochs:t] noisyC = cnm2.noisyC[:, t - t // epochs:t] b_trace = [osi.b for osi in cnm2.OASISinstances] if hasattr( cnm2, 'OASISinstances') else [0] * C.shape[0] pl.figure() crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9) view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f, dims[0], dims[1], YrA=noisyC[cnm2.gnb:cnm2.M] - C, img=Cn)
thresh_cnn_min=cnn_thr, gSig_range = [list(np.add(i*2,a)) for i,a in enumerate([gSig]*5)]) #%% PLOT COMPONENTS plt.figure() plt.subplot(121) crd_good = cm.utils.visualization.plot_contours( cnm2.A[:, idx_components], Cn, thr=.95, vmax=0.25) plt.title('Contour plots of accepted components') plt.subplot(122) crd_bad = cm.utils.visualization.plot_contours( cnm2.A[:, idx_components_bad], Cn, thr=.95, vmax=0.25) plt.title('Contour plots of rejected components') #%% VIEW TRACES (accepted and rejected) view_patches_bar(Yr, cnm2.A.tocsc()[:, idx_components], cnm2.C[idx_components], cnm2.b, cnm2.f, dims[0], dims[1], YrA=cnm2.YrA[idx_components], img=Cn) view_patches_bar(Yr, cnm2.A.tocsc()[:, idx_components_bad], cnm2.C[idx_components_bad], cnm2.b, cnm2.f, dims[0], dims[1], YrA=cnm2.YrA[idx_components_bad], img=Cn) #%% Extract DF/F values F_dff = detrend_df_f(cnm2.A, cnm2.b, cnm2.C, cnm2.f, YrA=cnm2.YrA, quantileMin=8, frames_window=250) #%% Show final traces cnm2.view_patches(Yr, dims=dims, img=Cn) #%% STOP CLUSTER and clean up log files cm.stop_server(dview=dview)
traces = C + YrA idx_components, idx_components_bad = cm.components_evaluation.estimate_components_quality( traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) print(' ***** ') print((len(traces))) print((len(idx_components))) #%% save results np.savez(os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[1][:-4] + 'results_analysis_ZEBRA.npz'), Cn=Cn, A=A.todense( ), C=C, b=b, f=f, YrA=YrA, sn=sn, d1=d1, d2=d2, idx_components=idx_components, idx_components_bad=idx_components_bad) #%% pl.subplot(1, 2, 1) crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=0.9) pl.subplot(1, 2, 2) crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=0.9) #%% view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components]), C[ idx_components, :], b, f, dims[0], dims[1], YrA=YrA[idx_components, :], img=Cn) #%% view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components_bad]), C[ idx_components_bad, :], b, f, dims[0], dims[1], YrA=YrA[idx_components_bad, :], img=Cn) #%% STOP CLUSTER and clean up log files cm.stop_server() log_files = glob.glob('*_LOG_*') for log_file in log_files: os.remove(log_file) #%% reconstruct denoised movie denoised = cm.movie(A.dot(C) + b.dot(f)).reshape(dims + (-1,), order='F').transpose([2, 0, 1]) #%% denoised.play(gain=3, offset=-50, fr=50, magnification=3) #%% reconstruct denoised movie without background
vmax=0.75) plt.title('Contour plots of accepted components') plt.subplot(122) crd_bad = cm.utils.visualization.plot_contours(cnm.A[:, idx_components_bad], Cn, thr=.8, vmax=0.75) plt.title('Contour plots of rejected components') #%% VIEW TRACES (accepted and rejected) view_patches_bar(Yr, cnm.A.tocsc()[:, idx_components], cnm.C[idx_components], cnm.b, cnm.f, dims[0], dims[1], YrA=cnm.YrA[idx_components], img=Cn) view_patches_bar(Yr, cnm.A.tocsc()[:, idx_components_bad], cnm.C[idx_components_bad], cnm.b, cnm.f, dims[0], dims[1], YrA=cnm.YrA[idx_components_bad], img=Cn)
batch_update_suff_stat=True, max_comp_update_shape=max_comp_update_shape, deconv_flag=False, use_dense=True, simultaneously=False, n_refit=0) #%% Plot initialization results crd = plot_contours(cnm_init.A.tocsc(), Cn_init, thr=0.9) A, C, b, f, YrA, sn = cnm_init.A, cnm_init.C, cnm_init.b, cnm_init.f, cnm_init.YrA, cnm_init.sn view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f, dims[0], dims[1], YrA=YrA[:, :], img=Cn_init) #%% Prepare object for OnACID save_init = False # flag for saving initialization object. Useful if you want to check OnACID with different parameters but same initialization if save_init: cnm_init.dview = None save_object(cnm_init, fls[0][:-4] + '_DS_' + str(ds_factor) + '.pkl') cnm_init = load_object(fls[0][:-4] + '_DS_' + str(ds_factor) + '.pkl') cnm_init._prepare_object(np.asarray(Yr), T1,
pl.subplot(1, 3, 1) crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=0.9) pl.subplot(1, 3, 2) crd = plot_contours(A.tocsc()[:, idx_blobs], Cn, thr=0.9) pl.subplot(1, 3, 3) crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=0.9) #%% #idx_very_nice=[2, 19, 23, 27,32,43,45,49,51,94,100] # idx_very_nice=np.array(idx_very_nice)[np.array([3,4,8,10])] # idx_very_nice=idx_blobs[idx_very_nice] idx_very_nice = idx_blobs view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_very_nice]), C[idx_very_nice, :], b, f, dims[0], dims[1], YrA=YrA[idx_very_nice, :], img=Cn) #%% new_m = cm.movie( np.reshape(A.tocsc()[:, idx_blobs] * C[idx_blobs] + b.dot(f), dims + (-1, ), order='F').transpose([2, 0, 1])) new_m.play(fr=30, backend='opencv', gain=7., magnification=3.) #%% new_m = cm.movie( np.reshape(A.tocsc()[:, idx_blobs] * C[idx_blobs] + b * np.median(f), dims + (-1, ), order='F').transpose([2, 0, 1]))