Exemplo n.º 1
0
tB = np.minimum(-2,np.floor(-5./30*final_frate))
tA = np.maximum(5,np.ceil(25./30*final_frate))
Npeaks=10
traces=C2+YrA
#        traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
#        traces_b=np.diff(traces,axis=1)
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = evaluate_components(Y, traces, A2, C2, b2, f2, remove_baseline=True, N=5, robust_std=False, Athresh = 0.1, Npeaks = Npeaks, tB=tB, tA = tA, thresh_C = 0.3)

idx_components_r=np.where(r_values>=.6)[0]
idx_components_raw=np.where(fitness_raw<-60)[0]        
idx_components_delta=np.where(fitness_delta<-20)[0]   


min_radius=gSig[0]-2
masks_ws,idx_blobs,idx_non_blobs=extract_binary_masks_blob(
A2.tocsc(), min_radius, dims, num_std_threshold=1, 
minCircularity= 0.6, minInertiaRatio = 0.2,minConvexity =.8)




idx_components=np.union1d(idx_components_r,idx_components_raw)
idx_components=np.union1d(idx_components,idx_components_delta)  
idx_blobs=np.intersect1d(idx_components,idx_blobs)   
idx_components_bad=np.setdiff1d(list(range(len(traces))),idx_components)

print(' ***** ')
print((len(traces)))
print((len(idx_components)))
print((len(idx_blobs)))
#%% visualize components
Exemplo n.º 2
0
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
#idx_blobs = np.intersect1d(idx_components, idx_blobs)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)

print(' ***** ')
print((len(traces)))
print((len(idx_components)))
#print((len(idx_blobs)))

min_radius = gSig[0] - 2
masks_ws, idx_blobs, idx_non_blobs = extract_binary_masks_blob(
    A2.tocsc(),
    min_radius,
    dims,
    num_std_threshold=1,
    minCircularity=0.6,
    minInertiaRatio=0.2,
    minConvexity=.8)

idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_blobs = np.intersect1d(idx_components, idx_blobs)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)

print(' ***** ')
print((len(traces)))
print((len(idx_components)))
print((len(idx_blobs)))
#%% visualize components
#pl.figure();
Exemplo n.º 3
0
def process_data(haussio_data,
                 mask=None,
                 p=2,
                 nrois_init=400,
                 roi_iceberg=0.9):
    if mask is not None:
        raise RuntimeError("mask not supported in cnmf.process_data")

    fn_cnmf = haussio_data.dirname_comp + '_cnmf.mat'
    shapefn = os.path.join(haussio_data.dirname_comp,
                           haussio.THOR_RAW_FN[:-3] + "shape.npy")
    shape = np.load(shapefn)
    if len(shape) == 5:
        d1, d2 = shape[2], shape[3]
        fn_mmap = get_mmap_name('Yr', shape[2], shape[3], shape[0])
    else:
        d1, d2 = shape[1], shape[2]
        fn_mmap = get_mmap_name('Yr', shape[1], shape[2], shape[0])
    fn_mmap = os.path.join(haussio_data.dirname_comp, fn_mmap)
    print(fn_mmap, os.path.exists(fn_mmap), d1, d2)

    if not os.path.exists(fn_cnmf):
        # fn_raw = os.path.join(haussio_data.dirname_comp, haussio.THOR_RAW_FN)
        fn_sima = haussio_data.dirname_comp + '.sima'
        fnames = [
            fn_sima,
        ]
        fnames.sort()
        print(fnames)
        fnames = fnames

        final_frate = 1.0 / haussio_data.dt
        downsample_factor = 1  # use .2 or .1 if file is large and you want a quick answer
        final_frate *= downsample_factor

        c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                         n_processes=None,
                                                         single_thread=False)

        idx_xy = None
        base_name = 'Yr'
        name_new = cm.save_memmap_each(fnames,
                                       dview=dview,
                                       base_name=base_name,
                                       resize_fact=(1, 1, downsample_factor),
                                       remove_init=0,
                                       idx_xy=idx_xy)
        name_new.sort()
        print(name_new)

        if len(name_new) > 1:
            fname_new = cm.save_memmap_join(name_new,
                                            base_name='Yr',
                                            n_chunks=12,
                                            dview=dview)
        else:
            sys.stdout.write('One file only, not saving\n')
            fname_new = name_new[0]

        print("fname_new: " + fname_new)

        Yr, dims, T = cm.load_memmap(fname_new)
        Y = np.reshape(Yr, dims + (T, ), order='F')
        Cn = cm.local_correlations(Y)

        K = nrois_init  # number of neurons expected per patch
        gSig = [15, 15]  # expected half size of neurons
        merge_thresh = 0.8  # merging threshold, max correlation allowed
        p = 2  #order of the autoregressive system
        options = caiman_cnmf.utilities.CNMFSetParms(Y,
                                                     NCPUS,
                                                     p=p,
                                                     gSig=gSig,
                                                     K=K,
                                                     ssub=2,
                                                     tsub=2)

        Yr, sn, g, psx = caiman_cnmf.pre_processing.preprocess_data(
            Yr, dview=dview, **options['preprocess_params'])
        Atmp, Ctmp, b_in, f_in, center = caiman_cnmf.initialization.initialize_components(
            Y, **options['init_params'])

        Ain, Cin = Atmp, Ctmp
        A, b, Cin, f_in = caiman_cnmf.spatial.update_spatial_components(
            Yr,
            Cin,
            f_in,
            Ain,
            sn=sn,
            dview=dview,
            **options['spatial_params'])

        options['temporal_params'][
            'p'] = 0  # set this to zero for fast updating without deconvolution
        C, A, b, f, S, bl, c1, neurons_sn, g, YrA = caiman_cnmf.temporal.update_temporal_components(
            Yr,
            A,
            b,
            Cin,
            f_in,
            bl=None,
            c1=None,
            sn=None,
            g=None,
            **options['temporal_params'])

        A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = caiman_cnmf.merging.merge_components(
            Yr,
            A,
            b,
            C,
            f,
            S,
            sn,
            options['temporal_params'],
            options['spatial_params'],
            dview=dview,
            bl=bl,
            c1=c1,
            sn=neurons_sn,
            g=g,
            thr=merge_thresh,
            mx=50,
            fast_merge=True)

        A2, b2, C2, f = caiman_cnmf.spatial.update_spatial_components(
            Yr, C_m, f, A_m, sn=sn, dview=dview, **options['spatial_params'])
        options['temporal_params'][
            'p'] = p  # set it back to original value to perform full deconvolution
        C2, A2, b2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = caiman_cnmf.temporal.update_temporal_components(
            Yr,
            A2,
            b2,
            C2,
            f,
            dview=dview,
            bl=None,
            c1=None,
            sn=None,
            g=None,
            **options['temporal_params'])

        tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
        tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
        Npeaks = 10
        traces = C2 + YrA
        fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = \
            evaluate_components(
                Y, traces, A2, C2, b2, f2, final_frate, remove_baseline=True, N=5,
                robust_std=False, Athresh=0.1, Npeaks=Npeaks, thresh_C=0.3)

        idx_components_r = np.where(r_values >= .6)[0]
        idx_components_raw = np.where(fitness_raw < -60)[0]
        idx_components_delta = np.where(fitness_delta < -20)[0]

        min_radius = gSig[0] - 2
        masks_ws, idx_blobs, idx_non_blobs = extract_binary_masks_blob(
            A2.tocsc(),
            min_radius,
            dims,
            num_std_threshold=1,
            minCircularity=0.6,
            minInertiaRatio=0.2,
            minConvexity=.8)

        idx_components = np.union1d(idx_components_r, idx_components_raw)
        idx_components = np.union1d(idx_components, idx_components_delta)
        idx_blobs = np.intersect1d(idx_components, idx_blobs)
        idx_components_bad = np.setdiff1d(range(len(traces)), idx_components)

        A2 = A2.tocsc()[:, idx_components]
        C2 = C2[idx_components, :]
        YrA = YrA[idx_components, :]
        S2 = S2[idx_components, :]

        # A: spatial components (ROIs)
        # C: denoised [Ca2+]
        # YrA: residuals ("noise", i.e. traces = C+YrA)
        # S: Spikes
        savemat(fn_cnmf, {"A": A2, "C": C2, "YrA": YrA, "S": S2, "bl": bl2})

    else:
        resdict = loadmat(fn_cnmf)
        A2 = resdict["A"]
        C2 = resdict["C"]
        YrA = resdict["YrA"]
        S2 = resdict["S"]
        bl2 = resdict["bl"]
        Yr, dims, T = cm.load_memmap(fn_mmap)
        dims = dims[1:]
        Y = np.reshape(Yr, dims + (T, ), order='F')

    proj_fn = haussio_data.dirname_comp + "_proj.npy"
    if not os.path.exists(proj_fn):
        zproj = utils.zproject(np.transpose(Y, (2, 0, 1)))
        np.save(proj_fn, zproj)
    else:
        zproj = np.load(proj_fn)

    # DF_F, DF = cse.extract_DF_F(Y.reshape(d1*d2, T), A2, C2)

    # t0 = time.time()
    # sys.stdout.write("Ordering components... ")
    # sys.stdout.flush()
    # A_or, C_or, srt = cse.order_components(A2, C2)
    # sys.stdout.write(' took {0:.2f} s\n'.format(time.time()-t0))

    cm.stop_server()

    polygons = contour(A2, Y.shape[0], Y.shape[1], thr=roi_iceberg)
    rois = ROIList([sima.ROI.ROI(polygons=poly) for poly in polygons])

    return rois, C2, zproj, S2, Y, YrA