def fit(self, images): """ This method uses the cnmf algorithm to find sources in data. it is calling everyfunction from the cnmf folder you can find out more at how the functions are called and how they are laid out at the ipython notebook Parameters: ---------- images : mapped np.ndarray of shape (t,x,y[,z]) containing the images that vary over time. Returns: -------- self: updated using the cnmf algorithm with C,A,S,b,f computed according to the given initial values Raise: ------ raise Exception('You need to provide a memory mapped file as input if you use patches!!') See Also: -------- ..image::docs/img/quickintro.png http://www.cell.com/neuron/fulltext/S0896-6273(15)01084-3 """ # Todo : to compartiment T = images.shape[0] self.initbatch = T dims = images.shape[1:] Y = np.transpose(images, list(range(1, len(dims) + 1)) + [0]) Yr = np.transpose(np.reshape(images, (T, -1), order='F')) print((T,) + dims) # Make sure filename is pointed correctly (numpy sets it to None sometimes) try: Y.filename = images.filename Yr.filename = images.filename except AttributeError: # if no memmapping cause working with small data pass # update/set all options that depend on data dimensions self.options['spatial_params']['dims'] = dims # number of rows, columns [and depths] self.options['spatial_params']['medw'] = (3,) * len(dims) # window of median filter # Morphological closing structuring element self.options['spatial_params']['se'] = np.ones((3,) * len(dims), dtype=np.uint8) # Binary element for determining connectivity self.options['spatial_params']['ss'] = np.ones((3,) * len(dims), dtype=np.uint8) print(('using ' + str(self.n_processes) + ' processes')) if self.n_pixels_per_process is None: avail_memory_per_process = psutil.virtual_memory()[1] / 2.**30 / self.n_processes mem_per_pix = 3.6977678498329843e-09 self.n_pixels_per_process = np.int(avail_memory_per_process / 8. / mem_per_pix / T) self.n_pixels_per_process = np.int(np.minimum( self.n_pixels_per_process, np.prod(dims) // self.n_processes)) self.options['preprocess_params']['n_pixels_per_process'] = self.n_pixels_per_process self.options['spatial_params']['n_pixels_per_process'] = self.n_pixels_per_process # if self.block_size is None: # self.block_size = self.n_pixels_per_process # # if self.num_blocks_per_run is None: # self.num_blocks_per_run = 20 # number of pixels to process at the same time for dot product. Make it # smaller if memory problems self.options['temporal_params']['block_size'] = self.block_size self.options['temporal_params']['num_blocks_per_run'] = self.num_blocks_per_run self.options['spatial_params']['block_size'] = self.block_size self.options['spatial_params']['num_blocks_per_run'] = self.num_blocks_per_run print(('using ' + str(self.n_pixels_per_process) + ' pixels per process')) print(('using ' + str(self.block_size) + ' block_size')) options = self.options if self.rf is None: # no patches print('preprocessing ...') Yr, sn, g, psx = preprocess_data(Yr, dview=self.dview, **options['preprocess_params']) if self.Ain is None: print('initializing ...') if self.alpha_snmf is not None: options['init_params']['alpha_snmf'] = self.alpha_snmf self.Ain, self.Cin, self.b_in, self.f_in, center = initialize_components( Y, sn = sn, options_total = options, **options['init_params']) if self.only_init: # only return values after initialization nA = np.squeeze(np.array(np.sum(np.square(self.Ain), axis=0))) nr = nA.size Cin = scipy.sparse.coo_matrix(self.Cin) YA = (self.Ain.T.dot(Yr).T) * scipy.sparse.spdiags(old_div(1., nA), 0, nr, nr) AA = ((self.Ain.T.dot(self.Ain)) * scipy.sparse.spdiags(old_div(1., nA), 0, nr, nr)) self.YrA = YA - Cin.T.dot(AA) self.A = self.Ain self.C = Cin.todense() if self.remove_very_bad_comps: print('removing bad components : ') final_frate = 10 r_values_min = 0.5 # threshold on space consistency fitness_min = -15 # threshold on time variability fitness_delta_min = -15 Npeaks = 10 traces = np.array(self.C) print('estimating the quality...') idx_components, idx_components_bad, fitness_raw,\ fitness_delta, r_values = components_evaluation.estimate_components_quality( traces, Y, self.A, np.array(self.C), self.b_in, self.f_in, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True, N=5) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) self.C = self.C[idx_components] self.A = self.A[:, idx_components] self.YrA = self.YrA[:, idx_components] self.sn = sn self.b = self.b_in self.f = self.f_in self.g = g self.bl = None self.c1 = None self.neurons_sn = None return self print('update spatial ...') A, b, Cin, self.f_in = update_spatial_components(Yr, C=self.Cin, f=self.f_in, b_in=self.b_in, A_in=self.Ain, sn=sn, dview=self.dview, **options['spatial_params']) print('update temporal ...') if not self.skip_refinement: # set this to zero for fast updating without deconvolution options['temporal_params']['p'] = 0 else: options['temporal_params']['p'] = self.p print('deconvolution ...') options['temporal_params']['method'] = self.method_deconvolution C, A, b, f, S, bl, c1, neurons_sn, g, YrA, lam = update_temporal_components( Yr, A, b, Cin, self.f_in, dview=self.dview, **options['temporal_params']) if not self.skip_refinement: print('refinement...') if self.do_merge: print('merge components ...') A, C, nr, merged_ROIs, S, bl, c1, sn1, g1 = merge_components( Yr, A, b, C, f, S, sn, options[ 'temporal_params'], options['spatial_params'], dview=self.dview, bl=bl, c1=c1, sn=neurons_sn, g=g, thr=self.merge_thresh, mx=50, fast_merge=True) print((A.shape)) print('update spatial ...') A, b, C, f = update_spatial_components( Yr, C=C, f=f, A_in=A, sn=sn, b_in=b, dview=self.dview, **options['spatial_params']) # set it back to original value to perform full deconvolution options['temporal_params']['p'] = self.p print('update temporal ...') C, A, b, f, S, bl, c1, neurons_sn, g1, YrA, lam = update_temporal_components( Yr, A, b, C, f, dview=self.dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params']) else: g1 = g # todo : ask for those.. C, f, S, bl, c1, neurons_sn, g1, YrA = C, f, S, bl, c1, neurons_sn, g, YrA else: # use patches if self.stride is None: self.stride = np.int(self.rf * 2 * .1) print(('**** Setting the stride to 10% of 2*rf automatically:' + str(self.stride))) if type(images) is np.ndarray: raise Exception( 'You need to provide a memory mapped file as input if you use patches!!') if self.only_init: options['patch_params']['only_init'] = True if self.alpha_snmf is not None: options['init_params']['alpha_snmf'] = self.alpha_snmf A, C, YrA, b, f, sn, optional_outputs = run_CNMF_patches(images.filename, dims + (T,), options, rf=self.rf, stride=self.stride, dview=self.dview, memory_fact=self.memory_fact, gnb=self.gnb, border_pix=self.border_pix, low_rank_background=self.low_rank_background) # options = CNMFSetParms(Y, self.n_processes, p=self.p, gSig=self.gSig, K=A.shape[ # -1], thr=self.merge_thresh, n_pixels_per_process=self.n_pixels_per_process, # block_size=self.block_size, check_nan=self.check_nan) # options['temporal_params']['method'] = self.method_deconvolution print("merging") merged_ROIs = [0] while len(merged_ROIs) > 0: A, C, nr, merged_ROIs, S, bl, c1, sn_n, g = merge_components(Yr, A, [], np.array(C), [], np.array( C), [], options['temporal_params'], options['spatial_params'], dview=self.dview, thr=self.merge_thresh, mx=np.Inf) # print('update spatial ...') # A, b, C, f = update_spatial_components( # Yr, C = C, f = f, A_in = A, sn=sn, b_in = b, dview=self.dview, **options['spatial_params']) print("update temporal") C, A, b, f, S, bl, c1, neurons_sn, g1, YrA, lam = update_temporal_components( Yr, A, b, C, f, dview=self.dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params']) self.A = A self.C = C self.b = b self.f = f self.S = S self.YrA = YrA self.sn = sn self.g = g1 self.bl = bl self.c1 = c1 self.neurons_sn = neurons_sn self.lam = lam self.dims = dims return self
final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] # threshold on time variability (if nonsparse activity) fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot # TODO: todocument idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) print(time.time() - t1) t_d = time.time() - t1 # %% # TODO: show screenshot 13 if isscreen: pl.figure() crd = plot_contours(A_tot.tocsc()[:, idx_components],
def test_general(): """ General Test of pipeline with comparison against ground truth A shorter version than the demo pipeline that calls comparison for the real test work Raises: --------- params_movie params_cnmf rig correction cnmf on patch cnmf full frame not able to read the file no groundtruth """ #\bug #\warning global params_movie global params_diplay fname = params_movie['fname'] niter_rig = params_movie['niter_rig'] max_shifts = params_movie['max_shifts'] splits_rig = params_movie['splits_rig'] num_splits_to_process_rig = params_movie['num_splits_to_process_rig'] cwd = os.getcwd() fname = download_demo(fname[0]) m_orig = cm.load(fname) min_mov = m_orig[:400].min() comp = comparison.Comparison() comp.dims = np.shape(m_orig)[1:] ################ RIG CORRECTION ################# t1 = time.time() mc = MotionCorrect(fname, min_mov, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) comp.comparison['rig_shifts']['timer'] = time.time() - t1 comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig ########################################### if 'max_shifts' not in params_movie: fnames = params_movie['fname'] border_to_0 = 0 else: # elif not params_movie.has_key('overlaps'): fnames = mc.fname_tot_rig border_to_0 = bord_px_rig m_els = m_rig idx_xy = None add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive remove_init = 0 downsample_factor = 1 base_name = fname[0].split('/')[-1][:-4] name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=(1, 1, downsample_factor), remove_init=remove_init, idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0) name_new.sort() if len(name_new) > 1: fname_new = cm.save_memmap_join(name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: logging.warning('One file only, not saving!') fname_new = name_new[0] Yr, dims, T = cm.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') Y = np.reshape(Yr, dims + (T, ), order='F') if np.min(images) < 0: # TODO: should do this in an automatic fashion with a while loop at the 367 line raise Exception('Movie too negative, add_to_movie should be larger') if np.sum(np.isnan(images)) > 0: # TODO: same here raise Exception( 'Movie contains nan! You did not remove enough borders') Cn = cm.local_correlations(Y) Cn[np.isnan(Cn)] = 0 p = params_movie['p'] merge_thresh = params_movie['merge_thresh'] rf = params_movie['rf'] stride_cnmf = params_movie['stride_cnmf'] K = params_movie['K'] init_method = params_movie['init_method'] gSig = params_movie['gSig'] alpha_snmf = params_movie['alpha_snmf'] if params_movie['is_dendrites'] == True: if params_movie['init_method'] is not 'sparse_nmf': raise Exception('dendritic requires sparse_nmf') if params_movie['alpha_snmf'] is None: raise Exception('need to set a value for alpha_snmf') ################ CNMF PART PATCH ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'], method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie['only_init_patch'], gnb=params_movie['gnb'], method_deconvolution='oasis') comp.cnmpatch = copy.copy(cnm) comp.cnmpatch.estimates = None cnm = cnm.fit(images) A_tot = cnm.estimates.A C_tot = cnm.estimates.C YrA_tot = cnm.estimates.YrA b_tot = cnm.estimates.b f_tot = cnm.estimates.f # DISCARDING logging.info(('Number of components:' + str(A_tot.shape[-1]))) final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) ####### A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1 comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()] #################### ######################## ################ CNMF PART FULL ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot, f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') cnm = cnm.fit(images) # DISCARDING A, C, b, f, YrA, sn = cnm.estimates.A, cnm.estimates.C, cnm.estimates.b, cnm.estimates.f, cnm.estimates.YrA, cnm.estimates.sn final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_full'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_full'] fitness_delta_min = params_movie['fitness_delta_min_full'] Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True) ########## A_tot_full = A_tot.tocsc()[:, idx_components] C_tot_full = C_tot[idx_components] comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1 comp.comparison['cnmf_full_frame']['ourdata'] = [ A_tot_full.copy(), C_tot_full.copy() ] #################### ######################## comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn) log_files = glob.glob('*_LOG_*') try: for log_file in log_files: os.remove(log_file) except: logging.warning('Cannot remove log files') ############ assertions ################## pb = False if (comp.information['differences']['params_movie']): logging.error( "you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)" ) pb = True if (comp.information['differences']['params_cnm']): logging.warning( "you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)" ) # pb = True if (comp.information['diff']['rig']['isdifferent']): logging.error("the rigid shifts are different from the groundtruth ") pb = True if (comp.information['diff']['cnmpatch']['isdifferent']): logging.error( "the cnmf on patch produces different results than the groundtruth " ) pb = True if (comp.information['diff']['cnmfull']['isdifferent']): logging.error( "the cnmf full frame produces different results than the groundtruth " ) pb = True assert (not pb)
def extract_masks(scan, mmap_scan, num_components=200, num_background_components=1, merge_threshold=0.8, init_on_patches=True, init_method='greedy_roi', soma_diameter=(14, 14), snmf_alpha=None, patch_size=(50, 50), proportion_patch_overlap=0.2, num_components_per_patch=5, num_processes=8, num_pixels_per_process=5000, fps=15): """ Extract masks from multi-photon scans using CNMF. Uses constrained non-negative matrix factorization to find spatial components (masks) and their fluorescence traces in a scan. Default values work well for somatic scans. Performed operations are: [Initialization on full image | Initialization on patches -> merge components] -> spatial update -> temporal update -> merge components -> spatial update -> temporal update :param np.array scan: 3-dimensional scan (image_height, image_width, num_frames). :param np.memmap mmap_scan: 2-d scan (image_height * image_width, num_frames) :param int num_components: An estimate of the number of spatial components in the scan :param int num_background_components: Number of components to model the background. :param int merge_threshold: Maximal temporal correlation allowed between the activity of overlapping components before merging them. :param bool init_on_patches: If True, run the initialization methods on small patches of the scan rather than on the whole image. :param string init_method: Initialization method for the components. 'greedy_roi': Look for a gaussian-shaped patch, apply rank-1 NMF, store components, calculate residual scan and repeat for num_components. 'sparse_nmf': Regularized non-negative matrix factorization (as impl. in sklearn) :param (float, float) soma_diameter: Estimated neuron size in y and x (pixels). Used in'greedy_roi' initialization to search for neurons of this size. :param int snmf_alpha: Regularization parameter (alpha) for sparse NMF (if used). :param (float, float) patch_size: Size of the patches in y and x (pixels). :param float proportion_patch_overlap: Patches are sampled in a sliding window. This controls how much overlap is between adjacent patches (0 for none, 0.9 for 90%). :param int num_components_per_patch: Number of components per patch (used if init_on_patches=True) :param int num_processes: Number of processes to run in parallel. None for as many processes as available cores. :param int num_pixels_per_process: Number of pixels that a process handles each iteration. :param fps: Frame rate. Used for temporal downsampling and to remove bad components. :returns: Weighted masks (image_height x image_width x num_components). Inferred location of each component. :returns: Denoised fluorescence traces (num_components x num_frames). :returns: Masks for background components (image_height x image_width x num_background_components). :returns: Traces for background components (image_height x image_width x num_background_components). :returns: Raw fluorescence traces (num_components x num_frames). Fluorescence of each component in the scan minus activity from other components and background. ..warning:: The produced number of components is not exactly what you ask for because some components will be merged or deleted. ..warning:: Better results if scans are nonnegative. """ # Get some params image_height, image_width, num_frames = scan.shape # Start processes log('Starting {} processes...'.format(num_processes)) pool = mp.Pool(processes=num_processes) # Initialize components log('Initializing components...') if init_on_patches: # TODO: Redo this (per-patch initialization) in a nicer/more efficient way # Make sure they are integers patch_size = np.array(patch_size) half_patch_size = np.int32(np.round(patch_size / 2)) num_components_per_patch = int(round(num_components_per_patch)) patch_overlap = np.int32(np.round(patch_size * proportion_patch_overlap)) # Create options dictionary (needed for run_CNMF_patches) options = {'patch_params': {'ssub': 'UNUSED.', 'tsub': 'UNUSED', 'nb': num_background_components, 'only_init': True, 'skip_refinement': 'UNUSED.', 'remove_very_bad_comps': False}, # remove_very_bads_comps unnecesary (same as default) 'preprocess_params': {'check_nan': False}, # check_nan is unnecessary (same as default value) 'spatial_params': {'nb': num_background_components}, # nb is unnecessary, it is pased to the function and in init_params 'temporal_params': {'p': 0, 'method': 'UNUSED.', 'block_size': 'UNUSED.'}, 'init_params': {'K': num_components_per_patch, 'gSig': np.array(soma_diameter)/2, 'gSiz': None, 'method': init_method, 'alpha_snmf': snmf_alpha, 'nb': num_background_components, 'ssub': 1, 'tsub': max(int(fps / 2), 1), 'options_local_NMF': 'UNUSED.', 'normalize_init': True, 'rolling_sum': True, 'rolling_length': 100, 'min_corr': 'UNUSED', 'min_pnr': 'UNUSED', 'deconvolve_options_init': 'UNUSED', 'ring_size_factor': 'UNUSED', 'center_psf': 'UNUSED'}, # gSiz, ssub, tsub, options_local_NMF, normalize_init, rolling_sum unnecessary (same as default values) 'merging' : {'thr': 'UNUSED.'}} # Initialize per patch res = map_reduce.run_CNMF_patches(mmap_scan.filename, (image_height, image_width, num_frames), options, rf=half_patch_size, stride=patch_overlap, gnb=num_background_components, dview=pool) initial_A, initial_C, YrA, initial_b, initial_f, pixels_noise, _ = res # Merge spatially overlapping components merged_masks = ['dummy'] while len(merged_masks) > 0: res = merging.merge_components(mmap_scan, initial_A, initial_b, initial_C, initial_f, initial_C, pixels_noise, {'p': 0, 'method': 'cvxpy'}, spatial_params='UNUSED', dview=pool, thr=merge_threshold, mx=np.Inf) initial_A, initial_C, num_components, merged_masks, S, bl, c1, neurons_noise, g = res # Delete log files (one per patch) log_files = glob.glob('caiman*_LOG_*') for log_file in log_files: os.remove(log_file) else: from scipy.sparse import csr_matrix if init_method == 'greedy_roi': res = _greedyROI(scan, num_components, soma_diameter, num_background_components) log('Refining initial components (HALS)...') res = initialization.hals(scan, res[0].reshape([image_height * image_width, -1], order='F'), res[1], res[2].reshape([image_height * image_width, -1], order='F'), res[3], maxIter=3) initial_A, initial_C, initial_b, initial_f = res else: print('Warning: Running sparse_nmf initialization on the entire field of view ' 'takes a lot of time.') res = initialization.initialize_components(scan, K=num_components, nb=num_background_components, method=init_method, alpha_snmf=snmf_alpha) initial_A, initial_C, initial_b, initial_f, _ = res initial_A = csr_matrix(initial_A) log(initial_A.shape[-1], 'components found...') # Remove bad components (based on spatial consistency and spiking activity) log('Removing bad components...') good_indices, _ = components_evaluation.estimate_components_quality(initial_C, scan, initial_A, initial_C, initial_b, initial_f, final_frate=fps, r_values_min=0.7, fitness_min=-20, fitness_delta_min=-20, dview=pool) initial_A = initial_A[:, good_indices] initial_C = initial_C[good_indices] log(initial_A.shape[-1], 'components remaining...') # Estimate noise per pixel log('Calculating noise per pixel...') pixels_noise, _ = pre_processing.get_noise_fft_parallel(mmap_scan, num_pixels_per_process, pool) # Update masks log('Updating masks...') A, b, C, f = spatial.update_spatial_components(mmap_scan, initial_C, initial_f, initial_A, b_in=initial_b, sn=pixels_noise, dims=(image_height, image_width), method='dilate', dview=pool, n_pixels_per_process=num_pixels_per_process, nb=num_background_components) # Update traces (no impulse response modelling p=0) log('Updating traces...') res = temporal.update_temporal_components(mmap_scan, A, b, C, f, nb=num_background_components, block_size=10000, p=0, method='cvxpy', dview=pool) C, A, b, f, S, bl, c1, neurons_noise, g, YrA, _ = res # Merge components log('Merging overlapping (and temporally correlated) masks...') merged_masks = ['dummy'] while len(merged_masks) > 0: res = merging.merge_components(mmap_scan, A, b, C, f, S, pixels_noise, {'p': 0, 'method': 'cvxpy'}, 'UNUSED', dview=pool, thr=merge_threshold, bl=bl, c1=c1, sn=neurons_noise, g=g) A, C, num_components, merged_masks, S, bl, c1, neurons_noise, g = res # Refine masks log('Refining masks...') A, b, C, f = spatial.update_spatial_components(mmap_scan, C, f, A, b_in=b, sn=pixels_noise, dims=(image_height, image_width), method='dilate', dview=pool, n_pixels_per_process=num_pixels_per_process, nb=num_background_components) # Refine traces log('Refining traces...') res = temporal.update_temporal_components(mmap_scan, A, b, C, f, nb=num_background_components, block_size=10000, p=0, method='cvxpy', dview=pool) C, A, b, f, S, bl, c1, neurons_noise, g, YrA, _ = res # Removing bad components (more stringent criteria) log('Removing bad components...') good_indices, _ = components_evaluation.estimate_components_quality(C + YrA, scan, A, C, b, f, final_frate=fps, r_values_min=0.8, fitness_min=-40, fitness_delta_min=-40, dview=pool) A = A.toarray()[:, good_indices] C = C[good_indices] YrA = YrA[good_indices] log(A.shape[-1], 'components remaining...') # Stop processes log('Done.') pool.close() # Get results masks = A.reshape((image_height, image_width, -1), order='F') # h x w x num_components traces = C # num_components x num_frames background_masks = b.reshape((image_height, image_width, -1), order='F') # h x w x num_components background_traces = f # num_background_components x num_frames raw_traces = C + YrA # num_components x num_frames # Rescale traces to match scan range scaling_factor = np.sum(masks**2, axis=(0, 1)) / np.sum(masks, axis=(0, 1)) traces = traces * np.expand_dims(scaling_factor, -1) raw_traces = raw_traces * np.expand_dims(scaling_factor, -1) masks = masks / scaling_factor background_scaling_factor = np.sum(background_masks**2, axis=(0, 1)) / np.sum(background_masks, axis=(0,1)) background_traces = background_traces * np.expand_dims(background_scaling_factor, -1) background_masks = background_masks / background_scaling_factor return masks, traces, background_masks, background_traces, raw_traces
# %% pl.figure() # TODO: show screenshot 12` # TODO : change the way it is used crd = plot_contours(A_tot, Cn, thr=params_display['thr_plot']) # %% DISCARD LOW QUALITY COMPONENT final_frate = params_movie['final_frate'] r_values_min = params_movie['r_values_min_patch'] # threshold on space consistency fitness_min = params_movie['fitness_delta_min_patch'] # threshold on time variability # threshold on time variability (if nonsparse activity) fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot # TODO: todocument idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) # %% # TODO: show screenshot 13 pl.figure() crd = plot_contours(A_tot.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot']) # %% A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] # %% rerun updating the components to refine t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview, Ain=A_tot, Cin=C_tot, b_in = b_tot, f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis',gnb = params_movie['gnb'],
def test_general(): """ General Test of pipeline with comparison against ground truth A shorter version than the demo pipeline that calls comparison for the real test work Raises: --------- params_movie params_cnmf rig correction cnmf on patch cnmf full frame not able to read the file no groundtruth """ #\bug #\warning global params_movie global params_diplay fname = params_movie['fname'] niter_rig = params_movie['niter_rig'] max_shifts = params_movie['max_shifts'] splits_rig = params_movie['splits_rig'] num_splits_to_process_rig = params_movie['num_splits_to_process_rig'] cwd = os.getcwd() fname = download_demo(fname[0]) m_orig = cm.load(fname) min_mov = m_orig[:400].min() comp = comparison.Comparison() comp.dims = np.shape(m_orig)[1:] ################ RIG CORRECTION ################# t1 = time.time() mc = MotionCorrect(fname, min_mov, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig, num_splits_to_process_rig=num_splits_to_process_rig, shifts_opencv=True, nonneg_movie=True) mc.motion_correct_rigid(save_movie=True) m_rig = cm.load(mc.fname_tot_rig) bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int) comp.comparison['rig_shifts']['timer'] = time.time() - t1 comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig ########################################### if 'max_shifts' not in params_movie: fnames = params_movie['fname'] border_to_0 = 0 else: # elif not params_movie.has_key('overlaps'): fnames = mc.fname_tot_rig border_to_0 = bord_px_rig m_els = m_rig idx_xy = None add_to_movie = -np.nanmin(m_els) + 1 # movie must be positive remove_init = 0 downsample_factor = 1 base_name = fname[0].split('/')[-1][:-4] name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=( 1, 1, downsample_factor), remove_init=remove_init, idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0) name_new.sort() if len(name_new) > 1: fname_new = cm.save_memmap_join( name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: print('One file only, not saving!') fname_new = name_new[0] Yr, dims, T = cm.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') Y = np.reshape(Yr, dims + (T,), order='F') if np.min(images) < 0: # TODO: should do this in an automatic fashion with a while loop at the 367 line raise Exception('Movie too negative, add_to_movie should be larger') if np.sum(np.isnan(images)) > 0: # TODO: same here raise Exception( 'Movie contains nan! You did not remove enough borders') Cn = cm.local_correlations(Y) Cn[np.isnan(Cn)] = 0 p = params_movie['p'] merge_thresh = params_movie['merge_thresh'] rf = params_movie['rf'] stride_cnmf = params_movie['stride_cnmf'] K = params_movie['K'] init_method = params_movie['init_method'] gSig = params_movie['gSig'] alpha_snmf = params_movie['alpha_snmf'] if params_movie['is_dendrites'] == True: if params_movie['init_method'] is not 'sparse_nmf': raise Exception('dendritic requires sparse_nmf') if params_movie['alpha_snmf'] is None: raise Exception('need to set a value for alpha_snmf') ################ CNMF PART PATCH ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'], method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie[ 'only_init_patch'], gnb=params_movie['gnb'], method_deconvolution='oasis') comp.cnmpatch = copy.copy(cnm) cnm = cnm.fit(images) A_tot = cnm.A C_tot = cnm.C YrA_tot = cnm.YrA b_tot = cnm.b f_tot = cnm.f # DISCARDING print(('Number of components:' + str(A_tot.shape[-1]))) final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) ####### A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1 comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()] #################### ######################## ################ CNMF PART FULL ################# t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot, f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis') cnm = cnm.fit(images) # DISCARDING A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_full'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_full'] fitness_delta_min = params_movie['fitness_delta_min_full'] Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True) ########## A_tot_full = A_tot.tocsc()[:, idx_components] C_tot_full = C_tot[idx_components] comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1 comp.comparison['cnmf_full_frame']['ourdata'] = [ A_tot_full.copy(), C_tot_full.copy()] #################### ######################## comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn) log_files = glob.glob('*_LOG_*') try: for log_file in log_files: os.remove(log_file) except: print('Cannot remove log files') ############ assertions ################## pb = False if (comp.information['differences']['params_movie']): print("you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)") pb = True if (comp.information['differences']['params_cnm']): print("you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)") pb = True if (comp.information['diff']['rig']['isdifferent']): print("the rigid shifts are different from the groundtruth ") pb = True if (comp.information['diff']['cnmpatch']['isdifferent']): print("the cnmf on patch produces different results than the groundtruth ") pb = True if (comp.information['diff']['cnmfull']['isdifferent']): print("the cnmf full frame produces different results than the groundtruth ") pb = True assert (not pb)
def fit(self, images): """ This method uses the cnmf algorithm to find sources in data. Parameters ---------- images : mapped np.ndarray of shape (t,x,y[,z]) containing the images that vary over time. Returns -------- self """ T = images.shape[0] dims = images.shape[1:] Y = np.transpose(images, list(range(1, len(dims) + 1)) + [0]) Yr = np.transpose(np.reshape(images, (T, -1), order='F')) print((T, ) + dims) # Make sure filename is pointed correctly (numpy sets it to None sometimes) Y.filename = images.filename Yr.filename = images.filename options = CNMFSetParms( Y, self.n_processes, p=self.p, gSig=self.gSig, K=self.k, ssub=self.ssub, tsub=self.tsub, p_ssub=self.p_ssub, p_tsub=self.p_tsub, method_init=self.method_init, n_pixels_per_process=self.n_pixels_per_process, block_size=self.block_size, check_nan=self.check_nan, nb=self.gnb, normalize_init=self.normalize_init, options_local_NMF=self.options_local_NMF, remove_very_bad_comps=self.remove_very_bad_comps) self.options = options if self.rf is None: # no patches print('preprocessing ...') Yr, sn, g, psx = preprocess_data(Yr, dview=self.dview, **options['preprocess_params']) if self.Ain is None: print('initializing ...') if self.alpha_snmf is not None: options['init_params']['alpha_snmf'] = self.alpha_snmf self.Ain, self.Cin, self.b_in, self.f_in, center = initialize_components( Y, **options['init_params']) if self.only_init: # only return values after initialization nA = np.squeeze(np.array(np.sum(np.square(self.Ain), axis=0))) nr = nA.size Cin = scipy.sparse.coo_matrix(self.Cin) YA = (self.Ain.T.dot(Yr).T) * scipy.sparse.spdiags( old_div(1., nA), 0, nr, nr) AA = ((self.Ain.T.dot(self.Ain)) * scipy.sparse.spdiags(old_div(1., nA), 0, nr, nr)) self.YrA = YA - Cin.T.dot(AA) self.A = self.Ain self.C = Cin.todense() if self.remove_very_bad_comps: final_frate = 3 r_values_min = 0.5 # threshold on space consistency fitness_min = -15 # threshold on time variability fitness_delta_min = -15 Npeaks = 10 traces = np.array(self.C) # import pdb;pdb.set_trace() idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = components_evaluation.estimate_components_quality( traces, Y, self.A, np.array(self.C), self.b_in, self.f_in, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True, N=5) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) self.C = self.C[idx_components] self.A = self.A[:, idx_components] self.YrA = self.YrA[:, idx_components] self.sn = sn self.b = self.b_in self.f = self.f_in self.g = g self.bl = None self.c1 = None self.neurons_sn = None return self print('update spatial ...') A, b, Cin, self.f_in = update_spatial_components( Yr, self.Cin, self.f_in, self.Ain, sn=sn, dview=self.dview, **options['spatial_params']) print('update temporal ...') if not self.skip_refinement: # set this to zero for fast updating without deconvolution options['temporal_params']['p'] = 0 else: options['temporal_params']['p'] = self.p options['temporal_params']['method'] = self.method_deconvolution C, A, b, f, S, bl, c1, neurons_sn, g, YrA = update_temporal_components( Yr, A, b, Cin, self.f_in, dview=self.dview, **options['temporal_params']) if not self.skip_refinement: if self.do_merge: print('merge components ...') A, C, nr, merged_ROIs, S, bl, c1, sn1, g1 = merge_components( Yr, A, b, C, f, S, sn, options['temporal_params'], options['spatial_params'], dview=self.dview, bl=bl, c1=c1, sn=neurons_sn, g=g, thr=self.merge_thresh, mx=50, fast_merge=True) print((A.shape)) print('update spatial ...') A, b, C, f = update_spatial_components( Yr, C, f, A, sn=sn, dview=self.dview, **options['spatial_params']) # set it back to original value to perform full deconvolution options['temporal_params']['p'] = self.p print('update temporal ...') C, A, b, f, S, bl, c1, neurons_sn, g1, YrA = update_temporal_components( Yr, A, b, C, f, dview=self.dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params']) else: C, f, S, bl, c1, neurons_sn, g1, YrA = C, f, S, bl, c1, neurons_sn, g, YrA else: # use patches if self.stride is None: self.stride = np.int(self.rf * 2 * .1) print( ('**** Setting the stride to 10% of 2*rf automatically:' + str(self.stride))) if type(images) is np.ndarray: raise Exception( 'You need to provide a memory mapped file as input if you use patches!!' ) if self.only_init: options['patch_params']['only_init'] = True if self.alpha_snmf is not None: options['init_params']['alpha_snmf'] = self.alpha_snmf A, C, YrA, b, f, sn, optional_outputs = run_CNMF_patches( images.filename, dims + (T, ), options, rf=self.rf, stride=self.stride, dview=self.dview, memory_fact=self.memory_fact, gnb=self.gnb) options = CNMFSetParms( Y, self.n_processes, p=self.p, gSig=self.gSig, K=A.shape[-1], thr=self.merge_thresh, n_pixels_per_process=self.n_pixels_per_process, block_size=self.block_size, check_nan=self.check_nan) options['temporal_params']['method'] = self.method_deconvolution print("merging") merged_ROIs = [0] while len(merged_ROIs) > 0: A, C, nr, merged_ROIs, S, bl, c1, sn_n, g = merge_components( Yr, A, [], np.array(C), [], np.array(C), [], options['temporal_params'], options['spatial_params'], dview=self.dview, thr=self.merge_thresh, mx=np.Inf) print("update temporal") C, A, b, f, S, bl, c1, neurons_sn, g1, YrA = update_temporal_components( Yr, A, b, C, f, dview=self.dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params']) # idx_components, fitness, erfc ,r_values, num_significant_samples = evaluate_components(Y,C+YrA,A,N=self.N_samples_fitness,robust_std=self.robust_std,thresh_finess=self.fitness_threshold) # sure_in_idx= idx_components[np.logical_and(np.array(num_significant_samples)>0 ,np.array(r_values)>=self.corr_threshold)] # # print ('Keeping ' + str(len(sure_in_idx)) + ' components out of ' + str(len(idx_components))) # # # A=A[:,sure_in_idx] # C=C[sure_in_idx,:] # YrA=YrA[sure_in_idx] self.A = A self.C = C self.b = b self.f = f self.S = S self.YrA = YrA self.sn = sn self.g = g1 self.bl = bl self.c1 = c1 self.neurons_sn = neurons_sn return self
def extract_masks(scan, mmap_scan, num_components=200, num_background_components=1, merge_threshold=0.8, init_on_patches=True, init_method='greedy_roi', soma_diameter=(14, 14), snmf_alpha=None, patch_size=(50, 50), proportion_patch_overlap=0.2, num_components_per_patch=5, num_processes=8, num_pixels_per_process=5000, fps=15): """ Extract masks from multi-photon scans using CNMF. Uses constrained non-negative matrix factorization to find spatial components (masks) and their fluorescence traces in a scan. Default values work well for somatic scans. Performed operations are: [Initialization on full image | Initialization on patches -> merge components] -> spatial update -> temporal update -> merge components -> spatial update -> temporal update :param np.array scan: 3-dimensional scan (image_height, image_width, num_frames). :param np.memmap mmap_scan: 2-d scan (image_height * image_width, num_frames) :param int num_components: An estimate of the number of spatial components in the scan :param int num_background_components: Number of components to model the background. :param int merge_threshold: Maximal temporal correlation allowed between the activity of overlapping components before merging them. :param bool init_on_patches: If True, run the initialization methods on small patches of the scan rather than on the whole image. :param string init_method: Initialization method for the components. 'greedy_roi': Look for a gaussian-shaped patch, apply rank-1 NMF, store components, calculate residual scan and repeat for num_components. 'sparse_nmf': Regularized non-negative matrix factorization (as impl. in sklearn) :param (float, float) soma_diameter: Estimated neuron size in y and x (pixels). Used in'greedy_roi' initialization to search for neurons of this size. :param int snmf_alpha: Regularization parameter (alpha) for sparse NMF (if used). :param (float, float) patch_size: Size of the patches in y and x (pixels). :param float proportion_patch_overlap: Patches are sampled in a sliding window. This controls how much overlap is between adjacent patches (0 for none, 0.9 for 90%). :param int num_components_per_patch: Number of components per patch (used if init_on_patches=True) :param int num_processes: Number of processes to run in parallel. None for as many processes as available cores. :param int num_pixels_per_process: Number of pixels that a process handles each iteration. :param fps: Frame rate. Used for temporal downsampling and to remove bad components. :returns: Weighted masks (image_height x image_width x num_components). Inferred location of each component. :returns: Denoised fluorescence traces (num_components x num_frames). :returns: Masks for background components (image_height x image_width x num_background_components). :returns: Traces for background components (image_height x image_width x num_background_components). :returns: Raw fluorescence traces (num_components x num_frames). Fluorescence of each component in the scan minus activity from other components and background. ..warning:: The produced number of components is not exactly what you ask for because some components will be merged or deleted. ..warning:: Better results if scans are nonnegative. """ # Get some params image_height, image_width, num_frames = scan.shape # Start processes log('Starting {} processes...'.format(num_processes)) pool = mp.Pool(processes=num_processes) # Initialize components log('Initializing components...') if init_on_patches: # TODO: Redo this (per-patch initialization) in a nicer/more efficient way # Make sure they are integers patch_size = np.array(patch_size) half_patch_size = np.int32(np.round(patch_size / 2)) num_components_per_patch = int(round(num_components_per_patch)) patch_overlap = np.int32(np.round(patch_size * proportion_patch_overlap)) # Create options dictionary (needed for run_CNMF_patches) options = {'patch_params': {'ssub': 'UNUSED.', 'tsub': 'UNUSED', 'nb': num_background_components, 'only_init': True, 'skip_refinement': 'UNUSED.', 'remove_very_bad_comps': False}, # remove_very_bads_comps unnecesary (same as default) 'preprocess_params': {'check_nan': False}, # check_nan is unnecessary (same as default value) 'spatial_params': {'nb': num_background_components}, # nb is unnecessary, it is pased to the function and in init_params 'temporal_params': {'p': 0, 'method': 'UNUSED.', 'block_size': 'UNUSED.'}, 'init_params': {'K': num_components_per_patch, 'gSig': np.array(soma_diameter)/2, 'gSiz': None, 'method': init_method, 'alpha_snmf': snmf_alpha, 'nb': num_background_components, 'ssub': 1, 'tsub': max(int(fps / 2), 1), 'options_local_NMF': 'UNUSED.', 'normalize_init': True, 'rolling_sum': True, 'rolling_length': 100, 'min_corr': 'UNUSED', 'min_pnr': 'UNUSED', 'deconvolve_options_init': 'UNUSED', 'ring_size_factor': 'UNUSED', 'center_psf': 'UNUSED'}, # gSiz, ssub, tsub, options_local_NMF, normalize_init, rolling_sum unnecessary (same as default values) 'merging' : {'thr': 'UNUSED.'}} # Initialize per patch res = map_reduce.run_CNMF_patches(mmap_scan.filename, (image_height, image_width, num_frames), options, rf=half_patch_size, stride=patch_overlap, gnb=num_background_components, dview=pool) initial_A, initial_C, YrA, initial_b, initial_f, pixels_noise, _ = res # Merge spatially overlapping components merged_masks = ['dummy'] while len(merged_masks) > 0: res = merging.merge_components(mmap_scan, initial_A, initial_b, initial_C, initial_f, initial_C, pixels_noise, {'p': 0, 'method': 'cvxpy'}, spatial_params='UNUSED', dview=pool, thr=merge_threshold, mx=np.Inf) initial_A, initial_C, num_components, merged_masks, S, bl, c1, neurons_noise, g = res # Delete log files (one per patch) log_files = glob.glob('caiman*_LOG_*') for log_file in log_files: os.remove(log_file) else: from scipy.sparse import csr_matrix if init_method == 'greedy_roi': res = _greedyROI(scan, num_components, soma_diameter, num_background_components) log('Refining initial components (HALS)...') res = initialization.hals(scan, res[0].reshape([image_height * image_width, -1], order='F'), res[1], res[2].reshape([image_height * image_width, -1], order='F'), res[3], maxIter=3) initial_A, initial_C, initial_b, initial_f = res else: print('Warning: Running sparse_nmf initialization on the entire field of view ' 'takes a lot of time.') res = initialization.initialize_components(scan, K=num_components, nb=num_background_components, method=init_method, alpha_snmf=snmf_alpha) initial_A, initial_C, initial_b, initial_f, _ = res initial_A = csr_matrix(initial_A) log(initial_A.shape[-1], 'components found...') # Remove bad components (based on spatial consistency and spiking activity) log('Removing bad components...') good_indices, _ = components_evaluation.estimate_components_quality(initial_C, scan, initial_A, initial_C, initial_b, initial_f, final_frate=fps, r_values_min=0.7, fitness_min=-20, fitness_delta_min=-20, dview=pool) initial_A = initial_A[:, good_indices] initial_C = initial_C[good_indices] log(initial_A.shape[-1], 'components remaining...') # Estimate noise per pixel log('Calculating noise per pixel...') pixels_noise, _ = pre_processing.get_noise_fft_parallel(mmap_scan, num_pixels_per_process, pool) # Update masks log('Updating masks...') A, b, C, f = spatial.update_spatial_components(mmap_scan, initial_C, initial_f, initial_A, b_in=initial_b, sn=pixels_noise, dims=(image_height, image_width), method='dilate', dview=pool, n_pixels_per_process=num_pixels_per_process, nb=num_background_components) # Update traces (no impulse response modelling p=0) log('Updating traces...') res = temporal.update_temporal_components(mmap_scan, A, b, C, f, nb=num_background_components, block_size=10000, p=0, method='cvxpy', dview=pool) C, A, b, f, S, bl, c1, neurons_noise, g, YrA, _ = res # Merge components log('Merging overlapping (and temporally correlated) masks...') merged_masks = ['dummy'] while len(merged_masks) > 0: res = merging.merge_components(mmap_scan, A, b, C, f, S, pixels_noise, {'p': 0, 'method': 'cvxpy'}, 'UNUSED', dview=pool, thr=merge_threshold, bl=bl, c1=c1, sn=neurons_noise, g=g) A, C, num_components, merged_masks, S, bl, c1, neurons_noise, g = res # Refine masks log('Refining masks...') A, b, C, f = spatial.update_spatial_components(mmap_scan, C, f, A, b_in=b, sn=pixels_noise, dims=(image_height, image_width), method='dilate', dview=pool, n_pixels_per_process=num_pixels_per_process, nb=num_background_components) # Refine traces log('Refining traces...') res = temporal.update_temporal_components(mmap_scan, A, b, C, f, nb=num_background_components, block_size=10000, p=0, method='cvxpy', dview=pool) C, A, b, f, S, bl, c1, neurons_noise, g, YrA, _ = res # Removing bad components (more stringent criteria) log('Removing bad components...') good_indices, _ = components_evaluation.estimate_components_quality(C + YrA, scan, A, C, b, f, final_frate=fps, r_values_min=0.8, fitness_min=-40, fitness_delta_min=-40, dview=pool) A = A.toarray()[:, good_indices] C = C[good_indices] YrA = YrA[good_indices] log(A.shape[-1], 'components remaining...') # Stop processes log('Done.') pool.close() # Get results masks = A.reshape((image_height, image_width, -1), order='F') # h x w x num_components traces = C # num_components x num_frames background_masks = b.reshape((image_height, image_width, -1), order='F') # h x w x num_components background_traces = f # num_background_components x num_frames raw_traces = C + YrA # num_components x num_frames # Rescale traces to match scan range (~ np.average(trace*mask, weights=mask)) scaling_factor = np.sum(masks**2, axis=(0, 1)) / np.sum(masks, axis=(0, 1)) traces = traces * np.expand_dims(scaling_factor, -1) raw_traces = raw_traces * np.expand_dims(scaling_factor, -1) masks = masks / scaling_factor background_scaling_factor = np.sum(background_masks**2, axis=(0, 1)) / np.sum(background_masks, axis=(0,1)) background_traces = background_traces * np.expand_dims(background_scaling_factor, -1) background_masks = background_masks / background_scaling_factor return masks, traces, background_masks, background_traces, raw_traces
final_frate = params_movie['final_frate'] r_values_min = params_movie[ 'r_values_min_full'] # threshold on space consistency fitness_min = params_movie[ 'fitness_min_full'] # threshold on time variability # threshold on time variability (if nonsparse activity) fitness_delta_min = params_movie['fitness_delta_min_full'] Npeaks = params_movie['Npeaks'] traces = C + YrA idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality( traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True) print(' ***** ') print((len(traces))) print((len(idx_components))) #%% else: # %% some parameter settings # order of the autoregressive fit to calcium imaging in general one (slow gcamps) or two (fast gcamps fast scanning) p = params_movie['p'] # merging threshold, max correlation allowed
# TODO: show screenshot 12` # TODO : change the way it is used crd = plot_contours(A_tot, Cn, thr=params_display['thr_plot']) # %% DISCARD LOW QUALITY COMPONENT final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] # threshold on time variability (if nonsparse activity) fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot # TODO: todocument idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) # %% # TODO: show screenshot 13 pl.figure() crd = plot_contours( A_tot.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot']) # %% A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components] # %% rerun updating the components to refine t1 = time.time() cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview, Ain=A_tot, Cin=C_tot,
def fit(self, images): """ This method uses the cnmf algorithm to find sources in data. it is calling everyfunction from the cnmf folder you can find out more at how the functions are called and how they are laid out at the ipython notebook Parameters: ---------- images : mapped np.ndarray of shape (t,x,y[,z]) containing the images that vary over time. Returns: -------- self: updated using the cnmf algorithm with C,A,S,b,f computed according to the given initial values Raise: ------ raise Exception('You need to provide a memory mapped file as input if you use patches!!') See Also: -------- ..image::docs/img/quickintro.png http://www.cell.com/neuron/fulltext/S0896-6273(15)01084-3 """ #Todo : to compartiment T = images.shape[0] dims = images.shape[1:] Y = np.transpose(images, list(range(1, len(dims) + 1)) + [0]) Yr = np.transpose(np.reshape(images, (T, -1), order='F')) print((T,) + dims) # Make sure filename is pointed correctly (numpy sets it to None sometimes) Y.filename = images.filename Yr.filename = images.filename options = CNMFSetParms(Y, self.n_processes, p=self.p, gSig=self.gSig, K=self.k, ssub=self.ssub, tsub=self.tsub, p_ssub=self.p_ssub, p_tsub=self.p_tsub, method_init=self.method_init, n_pixels_per_process=self.n_pixels_per_process, block_size=self.block_size, check_nan=self.check_nan, nb=self.gnb, normalize_init = self.normalize_init, options_local_NMF = self.options_local_NMF, remove_very_bad_comps = self.remove_very_bad_comps, low_rank_background = self.low_rank_background, update_background_components = self.update_background_components, rolling_sum = self.rolling_sum) self.options = options if self.rf is None: # no patches print('preprocessing ...') Yr, sn, g, psx = preprocess_data(Yr, dview=self.dview, **options['preprocess_params']) if self.Ain is None: print('initializing ...') if self.alpha_snmf is not None: options['init_params']['alpha_snmf'] = self.alpha_snmf self.Ain, self.Cin, self.b_in, self.f_in, center = initialize_components( Y, **options['init_params']) if self.only_init: # only return values after initialization nA = np.squeeze(np.array(np.sum(np.square(self.Ain),axis=0))) nr=nA.size Cin=scipy.sparse.coo_matrix(self.Cin) YA = (self.Ain.T.dot(Yr).T)*scipy.sparse.spdiags(old_div(1.,nA),0,nr,nr) AA = ((self.Ain.T.dot(self.Ain))*scipy.sparse.spdiags(old_div(1.,nA),0,nr,nr)) self.YrA = YA - Cin.T.dot(AA) self.A = self.Ain self.C = Cin.todense() if self.remove_very_bad_comps: print('removing bad components : ') final_frate = 10 r_values_min = 0.5 # threshold on space consistency fitness_min = -15 # threshold on time variability fitness_delta_min = -15 Npeaks = 10 traces = np.array(self.C) print('estimating the quality...') idx_components, idx_components_bad, fitness_raw,\ fitness_delta, r_values = components_evaluation.estimate_components_quality( traces, Y, self.A, np.array(self.C), self.b_in, self.f_in, final_frate = final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all = True, N = 5) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) self.C = self.C[idx_components] self.A = self.A[:,idx_components] self.YrA = self.YrA[:,idx_components] self.sn = sn self.b = self.b_in self.f = self.f_in self.g = g self.bl = None self.c1 = None self.neurons_sn = None return self print('update spatial ...') A, b, Cin, self.f_in = update_spatial_components(Yr, C = self.Cin, f = self.f_in, b_in = self.b_in, A_in = self.Ain, sn=sn, dview=self.dview, **options['spatial_params']) print('update temporal ...') if not self.skip_refinement: # set this to zero for fast updating without deconvolution options['temporal_params']['p'] = 0 else: options['temporal_params']['p'] = self.p print('deconvolution ...') options['temporal_params']['method'] = self.method_deconvolution C, A, b, f, S, bl, c1, neurons_sn, g, YrA = update_temporal_components( Yr, A, b, Cin, self.f_in, dview=self.dview, **options['temporal_params']) if not self.skip_refinement: print('refinement...') if self.do_merge: print('merge components ...') A, C, nr, merged_ROIs, S, bl, c1, sn1, g1 = merge_components( Yr, A, b, C, f, S, sn, options['temporal_params'], options['spatial_params'], dview=self.dview, bl=bl, c1=c1, sn=neurons_sn, g=g, thr=self.merge_thresh, mx=50, fast_merge=True) print((A.shape)) print('update spatial ...') A, b, C, f = update_spatial_components( Yr, C = C, f = f, A_in = A, sn=sn, b_in = b, dview=self.dview, **options['spatial_params']) # set it back to original value to perform full deconvolution options['temporal_params']['p'] = self.p print('update temporal ...') C, A, b, f, S, bl, c1, neurons_sn, g1, YrA = update_temporal_components( Yr, A, b, C, f, dview=self.dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params']) else: g1 = g # todo : ask for those.. C, f, S, bl, c1, neurons_sn, Yra = C, f, S, bl, c1, neurons_sn, YrA else: # use patches if self.stride is None: self.stride = np.int(self.rf * 2 * .1) print(('**** Setting the stride to 10% of 2*rf automatically:' + str(self.stride))) if type(images) is np.ndarray: raise Exception( 'You need to provide a memory mapped file as input if you use patches!!') if self.only_init: options['patch_params']['only_init'] = True if self.alpha_snmf is not None: options['init_params']['alpha_snmf'] = self.alpha_snmf A, C, YrA, b, f, sn, optional_outputs = run_CNMF_patches(images.filename, dims + (T,), options, rf=self.rf, stride=self.stride, dview=self.dview, memory_fact=self.memory_fact, gnb=self.gnb, border_pix = self.border_pix, low_rank_background = self.low_rank_background) options = CNMFSetParms(Y, self.n_processes, p=self.p, gSig=self.gSig, K=A.shape[ -1], thr=self.merge_thresh, n_pixels_per_process=self.n_pixels_per_process, block_size=self.block_size, check_nan=self.check_nan) options['temporal_params']['method'] = self.method_deconvolution print("merging") merged_ROIs = [0] while len(merged_ROIs) > 0: A, C, nr, merged_ROIs, S, bl, c1, sn_n, g = merge_components(Yr, A, [], np.array(C), [], np.array( C), [], options['temporal_params'], options['spatial_params'], dview=self.dview, thr=self.merge_thresh, mx=np.Inf) # print('update spatial ...') # A, b, C, f = update_spatial_components( # Yr, C = C, f = f, A_in = A, sn=sn, b_in = b, dview=self.dview, **options['spatial_params']) print("update temporal") C, A, b, f, S, bl, c1, neurons_sn, g1, YrA = update_temporal_components( Yr, A, b, C, f, dview=self.dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params']) self.A=A self.C=C self.b=b self.f=f self.S = S self.YrA=YrA self.sn=sn self.g = g1 self.bl = bl self.c1 = c1 self.neurons_sn = neurons_sn return self
# TODO : change the way it is used crd = plot_contours(A_tot, Cn, thr=params_display['thr_plot']) # %% DISCARD LOW QUALITY COMPONENT t1 = time.time() final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] # threshold on time variability fitness_min = params_movie['fitness_delta_min_patch'] # threshold on time variability (if nonsparse activity) fitness_delta_min = params_movie['fitness_delta_min_patch'] Npeaks = params_movie['Npeaks'] traces = C_tot + YrA_tot # TODO: todocument idx_components, idx_components_bad = estimate_components_quality( traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min) print(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) print(time.time() - t1) t_d = time.time() - t1 # %% # TODO: show screenshot 13 if isscreen: pl.figure() crd = plot_contours( A_tot.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot']) # %% A_tot = A_tot.tocsc()[:, idx_components] C_tot = C_tot[idx_components]
def process_data(dpath, movpath, pltpath, roi): params_movie = { 'niter_rig': 1, # maximum number of iterations rigid motion correction, # in general is 1. 0 will quickly initialize a template with the first frames 'max_shifts': (20, 20), # maximum allow rigid shift 'splits_rig': 28, # for parallelization split the movies in num_splits chuncks across time # if none all the splits are processed and the movie is saved 'num_splits_to_process_rig': None, # intervals at which patches are laid out for motion correction 'strides': (48, 48), # overlap between pathes (size of patch strides+overlaps) 'overlaps': (24, 24), 'splits_els': 28, # for parallelization split the movies in num_splits chuncks across time # if none all the splits are processed and the movie is saved 'num_splits_to_process_els': [14, None], 'upsample_factor_grid': 4, # upsample factor to avoid smearing when merging patches # maximum deviation allowed for patch with respect to rigid # shift 'max_deviation_rigid': 3, 'p': 1, # order of the autoregressive system 'merge_thresh': 0.9, # merging threshold, max correlation allowed 'rf': 40, # half-size of the patches in pixels. rf=25, patches are 50x50 'stride_cnmf': 20, # amounpl.it of overlap between the patches in pixels 'K': 4, # number of components per patch # if dendritic. In this case you need to set init_method to # sparse_nmf 'is_dendrites': False, 'init_method': 'greedy_roi', 'gSig': [10, 10], # expected half size of neurons 'alpha_snmf': None, # this controls sparsity 'final_frate': 30 } if not dpath.endswith(os.sep): dpath = dpath + os.sep if not os.path.isfile(dpath + 'mc.npz'): # start parallel c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) dpattern = 'msCam*.avi' dlist = sorted(glob.glob(dpath + dpattern), key=lambda var: [ int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var) ]) if not dlist: print("No data found in the specified folder: " + dpath) return else: vdlist = list() for vname in dlist: vdlist.append(sio.vread(vname, as_grey=True)) mov_orig = cm.movie(np.squeeze(np.concatenate( vdlist, axis=0))).astype(np.float32) # column correction meanrow = np.mean(np.mean(mov_orig, 0), 0) addframe = np.tile(meanrow, (mov_orig.shape[1], 1)) mov_cc = mov_orig - np.tile(addframe, (mov_orig.shape[0], 1, 1)) mov_cc = mov_cc - np.min(mov_cc) # filter mov_ft = mov_cc.copy() for fid, fm in enumerate(mov_cc): mov_ft[fid] = ndi.uniform_filter(fm, 2) - ndi.uniform_filter( fm, 40) mov_orig = (mov_orig - np.min(mov_orig)) / (np.max(mov_orig) - np.min(mov_orig)) mov_ft = (mov_ft - np.min(mov_ft)) / (np.max(mov_ft) - np.min(mov_ft)) np.save(dpath + 'mov_orig', mov_orig) np.save(dpath + 'mov_ft', mov_ft) del mov_orig, dlist, vdlist, mov_ft mc_data = motion_correction.MotionCorrect( dpath + 'mov_ft.npy', 0, dview=dview, max_shifts=params_movie['max_shifts'], niter_rig=params_movie['niter_rig'], splits_rig=params_movie['splits_rig'], num_splits_to_process_rig=params_movie[ 'num_splits_to_process_rig'], strides=params_movie['strides'], overlaps=params_movie['overlaps'], splits_els=params_movie['splits_els'], num_splits_to_process_els=params_movie[ 'num_splits_to_process_els'], upsample_factor_grid=params_movie['upsample_factor_grid'], max_deviation_rigid=params_movie['max_deviation_rigid'], shifts_opencv=True, nonneg_movie=False, roi=roi) mc_data.motion_correct_rigid(save_movie=True) mov_rig = cm.load(mc_data.fname_tot_rig) np.save(dpath + 'mov_rig', mov_rig) np.savez(dpath + 'mc', fname_tot_rig=mc_data.fname_tot_rig, templates_rig=mc_data.templates_rig, shifts_rig=mc_data.shifts_rig, total_templates_rig=mc_data.total_template_rig, max_shifts=mc_data.max_shifts, roi=mc_data.roi) del mov_rig else: print("motion correction data already exist. proceed") if not os.path.isfile(dpath + "cnm.npz"): # start parallel c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False) fname_tot_rig = np.array_str( np.load(dpath + 'mc.npz')['fname_tot_rig']) mov, dims, T = cm.load_memmap(fname_tot_rig) mov = np.reshape(mov.T, [T] + list(dims), order='F') cnm = cnmf.CNMF(n_processes, k=params_movie['K'], gSig=params_movie['gSig'], merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=dview, Ain=None, rf=params_movie['rf'], stride=params_movie['stride_cnmf'], memory_fact=1, method_init=params_movie['init_method'], alpha_snmf=params_movie['alpha_snmf'], only_init_patch=True, gnb=1, method_deconvolution='oasis') cnm = cnm.fit(mov) # Cn = cm.local_correlations(mov_orig, swap_dim=False) idx_comp, idx_comp_bad = components_evaluation.estimate_components_quality( cnm.C + cnm.YrA, np.reshape(mov, dims + (T, ), order='F'), cnm.A, cnm.C, cnm.b, cnm.f, params_movie['final_frate'], Npeaks=10, r_values_min=.7, fitness_min=-40, fitness_delta_min=-40) # visualization.plot_contours(cnm.A.tocsc()[:, idx_comp], Cn) A2 = cnm.A.tocsc()[:, idx_comp] C2 = cnm.C[idx_comp] cnm = cnmf.CNMF(n_processes, k=A2.shape, gSig=params_movie['gSig'], merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=dview, Ain=A2, Cin=C2, f_in=cnm.f, rf=None, stride=None, method_deconvolution='oasis') cnm = cnm.fit(mov) idx_comp, idx_comp_bad = components_evaluation.estimate_components_quality( cnm.C + cnm.YrA, np.reshape(mov, dims + (T, ), order='F'), cnm.A, cnm.C, cnm.b, cnm.f, params_movie['final_frate'], Npeaks=10, r_values_min=.75, fitness_min=-50, fitness_delta_min=-50) cnm.A = cnm.A.tocsc()[:, idx_comp] cnm.C = cnm.C[idx_comp] # visualization.plot_contours(cnm.A.tocsc()[:, idx_comp], Cn) cm.cluster.stop_server() cnm.A = (cnm.A - np.min(cnm.A)) / (np.max(cnm.A) - np.min(cnm.A)) cnm.C = (cnm.C - np.min(cnm.C)) / (np.max(cnm.C) - np.min(cnm.C)) cnm.b = (cnm.b - np.min(cnm.b)) / (np.max(cnm.b) - np.min(cnm.b)) cnm.f = (cnm.f - np.min(cnm.f)) / (np.max(cnm.f) - np.min(cnm.f)) np.savez(dpath + 'cnm', A=cnm.A.todense(), C=cnm.C, b=cnm.b, f=cnm.f, YrA=cnm.YrA, sn=cnm.sn, dims=dims) # AC = (cnm.A.dot(cnm.C)).reshape(dims + (-1,), order='F').transpose([2, 0, 1]) # ACmin = np.min(AC) # ACmax = np.max(AC) # AC = (AC - ACmin) / (ACmax - ACmin) # np.save(dpath + 'AC', AC) # del AC, ACmax, ACmin # ACbf = (cnm.A.dot(cnm.C) + cnm.b.dot(cnm.f)).reshape(dims + (-1,), order='F').transpose([2, 0, 1]) # ACbfmin = np.min(ACbf) # ACbfmax = np.max(ACbf) # ACbf = (ACbf - ACbfmin) / (ACbfmax - ACbfmin) # np.save(dpath + 'ACbf', ACbf) # del ACbf, ACbfmax, ACbfmin # plist = dpath.split(os.sep) # vname = plist[-3] + '_' + plist[-2] + '_result.mp4' # save_video( # movpath+vname, dpath + 'mov_orig.npy', dpath + 'mov_rig.npy', dpath + 'AC.npy', # dpath + 'ACbf.npy', dsratio=3) else: print("cnm data already exist. proceed") # os.remove(dpath + 'mov_orig.npy') # os.remove(dpath + 'mov_ft.npy') # os.remove(mc_data.fname_tot_rig) # os.remove(dpath+'mov_rig.npy') # os.remove(dpath + 'AC.npy') # os.remove(dpath + 'ACbf.npy') try: A = cnm.A C = cnm.C dims = dims except NameError: A = np.load(dpath + 'cnm.npz')['A'] C = np.load(dpath + 'cnm.npz')['C'] dims = np.load(dpath + 'cnm.npz')['dims'] plot_components(A, C, dims, pltpath)
rf=params_movie['rf'], stride=params_movie['stride_cnmf'], memory_fact=1, method_init=params_movie['init_method'], alpha_snmf=params_movie['alpha_snmf'], only_init_patch=True, gnb=1, method_deconvolution='oasis') cnm = cnm.fit(mov) # Cn = cm.local_correlations(mov_orig, swap_dim=False) idx_comp, idx_comp_bad = components_evaluation.estimate_components_quality( cnm.C + cnm.YrA, np.reshape(mov, dims + (T, ), order='F'), cnm.A, cnm.C, cnm.b, cnm.f, params_movie['final_frate'], Npeaks=10, r_values_min=.7, fitness_min=-40, fitness_delta_min=-40) # visualization.plot_contours(cnm.A.tocsc()[:, idx_comp], Cn) A2 = cnm.A.tocsc()[:, idx_comp] C2 = cnm.C[idx_comp] cnm = cnmf.CNMF(n_processes, k=A2.shape, gSig=params_movie['gSig'], merge_thresh=params_movie['merge_thresh'], p=params_movie['p'], dview=dview, Ain=A2,