def run_subsampling():
    for dataset in ['4x4', '5x5']:
        basepath = '/data.nst/jdehning/packer_data/calcium_subsampled/'
        dic_prop = {
            '2x1': [
                '2019-11-08_RL065_t-001_Cycle00001_Ch3_2x1.hdf5',
                '2019-11-08_RL065_t-001_2x1', 16
            ],
            '2x2': [
                '2019-11-08_RL065_t-001_Cycle00001_Ch3_2x2.hdf5',
                '2019-11-08_RL065_t-001_2x2', 13
            ],
            '3x3': [
                '2019-11-08_RL065_t-001_Cycle00001_Ch3_3x3.hdf5',
                '2019-11-08_RL065_t-001_3x3', 9
            ],
            '1x2': [
                '2019-11-08_RL065_t-001_Cycle00001_Ch3_1x2.hdf5',
                '2019-11-08_RL065_t-001_1x2', 9
            ],
            '4x4': [
                '2019-11-08_RL065_t-001_Cycle00001_Ch3_4x4.hdf5',
                '2019-11-08_RL065_t-001_4x4', 6
            ],
            '5x5': [
                '2019-11-08_RL065_t-001_Cycle00001_Ch3_5x5.hdf5',
                '2019-11-08_RL065_t-001_5x5', 5
            ]
        }
        ops['diameter'] = dic_prop[dataset][2]
        db['h5py'] = basepath + dic_prop[dataset][0]
        db['save_path0'] = basepath + dic_prop[dataset][1]
        run_s2p(ops=ops, db=db)
Пример #2
0
def run_s2p_on(path,
               ops_file=None,
               reprocess=False,
               infer_from_recording_classes=False,
               inferrer=None):
    db = {'data_path': [path]}
    if ops_file == None:
        ops_file = os.path.join(path, "suite2p", "plane0", "ops.npy")
    try:
        ops = np.load(ops_file, allow_pickle=True).item()
        ops["keep_movie_raw"]  #This is so we get a KeyError
        ops["keep_movie_raw"] = True
        ops["connected"]
        ops["connected"] = False
        ops["max_overlap"]
        ops["max_overlap"] = 0.2
        ops["do_registration"]
        ops["do_registration"] = 2  #ALWAYS redo registration
        ops["look_one_level_down"]
        ops["look_one_level_down"] = True
    except FileNotFoundError:
        if all((any(["tif" in file for file in os.listdir(path)]),
                infer_from_recording_classes, inferrer != None)):
            #How many planes?
            no_of_planes = inferrer(exp_id(path))
            if no_of_planes == 1:
                ops = default_ops()
                ops["nchannels"] = 2
                ops["look_one_level_down"] = False
                ops["do_registration"] = 2
                ops["keep_movie_raw"] = True
                ops["align_by_chan"] = 2
                ops["nonrigid"] = False
                ops["connected"] = False
                ops["max_overlap"] = 0.2
                ops["bidi_corrected"] = True
                ops["two_step_reigstration"] = True
                ops["sparse_mode"] = True
                try:
                    run_s2p(ops=ops, db=db)
                except Exception as e:
                    print(
                        f"exception {e} raised at path {path} in response to run s2p call"
                    )
                    print(f"db file:")
                    for key, value in db.items():
                        print(f"{key}: {value}")
                    print(f"ops file:")
                    for key, value in ops.items():
                        print(f"{key}: {value}")
        else:
            print(f"No TIFFS at {path}" if infer_from_recording_classes else
                  f"{path} not yet processed.")
            return
    else:
        if not reprocess:
            print(f"{path} has already been processed by suite2p")
            return
        run_s2p(ops=ops, db=db)
    def single_file(f):
        head, tail = os.path.split(f)

        s2pexists = check4s2p(f)
        if s2pexists:
            if s2p_overwrite:
                shutil.rmtree(os.path.join(head, 'suite2p'))
            else:
                print("%s suite2p already done" % f)
                return None

        h5exists = check4h5(f)
        if not h5exists or h5_overwrite:
            if h5_output is None:
                h5fname = pp.sbx2h5(f, output_name=f + ".h5", max_idx=max_idx)
            else:
                h5fname = pp.sbx2h5(f,
                                    output_name=os.path.join(
                                        h5_output, tail + ".h5"),
                                    max_idx=max_idx)
        else:
            if h5_output is None:
                h5fname = f + '.h5'
            else:
                h5fname = os.path.join(h5_output, tail + ".h5")

        # set ops
        outpath = f
        try:
            os.makedirs(f)
        except:
            print("outpath not created")
        ops_d = {'save_path0': f}
        #ops_d['save_path0']=f
        ops = pp.set_ops(ops_d)

        # set db
        db = pp.set_db(h5fname, d=db_d)

        # run suite2p
        bindir = os.path.join(db['fast_disk'], 'suite2p')
        try:
            shutil.rmtree(bindir)
        except:
            pass
        run_s2p(ops=ops, db=db)

        # move registered binary over to save data folders
        if keep_binary:
            shutil.move(os.path.join(bindir, "plane0", "data.bin"),
                        os.path.join(outpath, "suite2p", "data.bin"))
        # delete temporary files
        shutil.rmtree(bindir)

        return None
Пример #4
0
def run_for_single_file(animal_path, file):
    expID = get_expID(file)
    hf_path = os.path.join(animal_path, file)
    savepath = os.path.join(animal_path, file[:-3])
    tmp_path = os.path.join(os.environ['SCRATCH'], 'register_jobs',
                            'bin_files', file[:-3])
    mkdir_p(tmp_path)

    db = default_db()
    db['h5py'] = hf_path
    db['fast_disk'] = tmp_path
    ops = default_ops()
    ops['save_path0'] = savepath
    ops['fast_disk'] = tmp_path
    # run one experiment
    if not os.path.isdir(ops['save_path0']):
        start = time.time()

        print('Running for: ' + db['h5py'] + '\n')
        print('Saving on: ' + ops['save_path0'])
        print('Tmp saving:' + ops['fast_disk'])
        opsEnd = run_s2p(ops=ops, db=db)
        stop = time.time()
        timelog = open(
            os.path.join(os.environ['L_SCRATCH'], 'suite2p_times.txt'), 'a')
        timelog.write(db['h5py'] + ', ' + str(stop - start) + '\n')
        timelog.close()
    else:
        print(ops['save_path0'] + 'already exisits, skipping. \n')
    return
Пример #5
0
def run_suite2p(db, out_path, fs, nplanes):
    default = default_ops()
    default2 = {
        'save_path0': out_path,
        'delete_bin': False,
        'look_one_level_down': True,
        'data_path': db['in_dir'],
        'nplanes': nplanes,
        'fs': fs,
        'save_mat': True,
        'reg_tif': False,
        'expts': db['expts'],
        'raw': True,
    }

    if 'ops' in db:
        opts = db['ops']
    else:
        opts = {}

    ops = {**default, **default2, **opts}

    db = {}

    print(ops)

    ops = run_s2p(ops=ops, db=db)
    return ops
Пример #6
0
def run_suite2p(hdf5_list, dirname_output, mdata):
    z_planes = mdata['size']['z_planes']
    fs_param = 1. / (mdata['period'] * z_planes)

    # Load suite2p only right before use, as it has a long load time.
    from suite2p import run_s2p
    default_ops = run_s2p.default_ops()
    params = {
        'input_format': 'h5',
        'data_path': [str(f.parent) for f in hdf5_list],
        'save_path0': str(dirname_output),
        'nplanes': z_planes,
        'fs': fs_param,
        'save_mat': True,
        'bidi_corrected': True,
        'spatial_hp': 50,
        'sparse_mode': False,
        'threshold_scaling': 3,
        'diameter': 6,
    }
    logger.info('Running suite2p on files:\n%s\n%s', '\n'.join(str(f) for f in hdf5_list), params)
    with open(dirname_output / 'recording_order.json', 'w') as fout:
        json.dump([str(e) for e in hdf5_list], fout, indent=4)
    run_s2p.run_s2p(ops=default_ops, db=params)
Пример #7
0
def process_data(animalid,
                 date,
                 expt_ids,
                 raw_base='/home/mossing/data/suite2P/raw/',
                 result_base='/home/mossing/data/suite2P/results/',
                 fast_disk='/home/mossing/data_ssd/suite2P/bin',
                 nchannels=1,
                 delete_raw=False,
                 diameter=15):
    #    save_path0 = result_base+animalid+'/'+date+'/'+'_'.join(expt_ids)
    #    data_path = [raw_base+animalid+'/'+date+'/'+lbl for lbl in expt_ids]

    db = prepare_db(animalid,
                    date,
                    expt_ids,
                    raw_base=raw_base,
                    result_base=result_base,
                    fast_disk=fast_disk,
                    nchannels=nchannels,
                    diameter=diameter)

    # provide an h5 path in 'h5py' or a tiff path in 'data_path'
    # db overwrites any ops (allows for experiment specific settings)
    #db = {
    #      'h5py': [], # a single h5 file path
    #      'h5py_key': 'data',
    #      'look_one_level_down': False, # whether to look in ALL subfolders when searching for tiffs,
    #      'save_path0': save_path0,
    #      'data_path': data_path, # a list of folders with tiffs
    #                                             # (or folder of folders with tiffs if look_one_level_down is True, or subfolders is not empty)
    #      'subfolders': [], # choose subfolders of 'data_path' to look in (optional)
    #      'fast_disk': fast_disk, # string which specifies where the binary file will be stored (should be an SSD)
    #      'nchannels': nchannels,
    #      'diameter': diameter
    #    }

    try:
        shutil.rmtree(fast_disk + '/suite2p')
        print('fast disk contents deleted')
    except:
        print('fast disk location empty')

    opsEnd = run_s2p(ops=ops, db=db)
    if delete_raw:
        for fold in db['data_path']:
            for old_file in glob.glob(fold + '/*.tif'):
                os.remove(old_file)
Пример #8
0
def process_data_1ch_2ch(animalid,
                         date,
                         expt_ids_1ch,
                         expt_ids_2ch,
                         raw_base='/home/mossing/data/suite2P/raw/',
                         result_base='/home/mossing/data/suite2P/results/',
                         fast_disk='/home/mossing/data_ssd/suite2P/bin',
                         delete_raw=False,
                         diameter=15):
    #    save_path0 = result_base+animalid+'/'+date+'/'+'_'.join(expt_ids)
    #    data_path = [raw_base+animalid+'/'+date+'/'+lbl for lbl in expt_ids]

    # process 1ch data first
    db = prepare_db(animalid,
                    date,
                    expt_ids_1ch,
                    raw_base=raw_base,
                    result_base=result_base,
                    fast_disk=fast_disk,
                    nchannels=1,
                    diameter=diameter,
                    combined=False)

    try:
        shutil.rmtree(fast_disk + '/suite2p')
        print('fast disk contents deleted')
    except:
        print('fast disk location empty')

    # run suite2p part
    opsEnd = run_s2p(ops=ops, db=db)
    if delete_raw:
        for fold in db['data_path']:
            for old_file in glob.glob(fold + '/*.tif'):
                os.remove(old_file)

    add_2ch_data(animalid,
                 date,
                 expt_ids_1ch,
                 expt_ids_2ch,
                 raw_base=raw_base,
                 result_base=result_base,
                 fast_disk=fast_disk,
                 delete_raw=delete_raw,
                 diameter=diameter)
Пример #9
0
    def s2pRun(self, user_batch_size):

        num_pixels = self.frame_x*self.frame_y
        sampling_rate = self.fps/self.n_planes
        diameter_x = 13/self.pix_sz_x
        diameter_y = 13/self.pix_sz_y
        diameter = int(diameter_x), int(diameter_y)
        batch_size = user_batch_size * (262144 / num_pixels) # larger frames will be more RAM intensive, scale user batch size based on num pixels in 512x512 images

        db = {
            'data_path' : [self.tiff_path], 
            'fs' : float(sampling_rate),
            'diameter' : diameter, 
            'batch_size' : int(batch_size), 
            'nimg_init' : int(batch_size),
            'nplanes' : self.n_planes
        }

        print(db)

        opsEnd = run_s2p(ops=ops,db=db)
Пример #10
0
    2,  # number of pixels to keep between ROI and neuropil donut
    'outer_neuropil_radius': np.inf,  # maximum neuropil radius
    'min_neuropil_pixels': 350,  # minimum number of pixels in the neuropil
    'high_pass':
    100,  # running mean subtraction with window of size 'high_pass' (use low values for 1P)
    # deconvolution settings
    'baseline': 'maximin',  # baselining mode
    'win_baseline': 60.,  # window for maximin
    'sig_baseline': 10.,  # smoothing constant for gaussian filter
    'prctile_baseline': 8.,  # optional (whether to use a percentile baseline)
    'neucoeff': .7,  # neuropil coefficient
}

# provide an h5 path in 'h5py' or a tiff path in 'data_path'
# db overwrites any ops (allows for experiment specific settings)
db = {
    'h5py': [],  # a single h5 file path
    'h5py_key': 'data',
    'look_one_level_down':
    False,  # whether to look in ALL subfolders when searching for tiffs
    'data_path': [raw_dir],  # a list of folders with tiffs 
    # (or folder of folders with tiffs if look_one_level_down is True, or subfolders is not empty)
    'subfolders': [],  # choose subfolders of 'data_path' to look in (optional)
    'fast_disk':
    scratch_folder,  # string which specifies where the binary file will be stored (should be an SSD)
    #      'tiff_list': ['fov1_retino_00001.tif'] # list of tiffs in folder * data_path *!
}

# run one experiment
opsEnd = run_s2p(ops=ops, db=db)
def find_rois_suite2p(video, video_path, params, mc_borders=None, use_multiprocessing=True):
    if suite2p_enabled:
        full_video_path = video_path

        directory = os.path.dirname(full_video_path)
        filename  = os.path.basename(full_video_path)

        roi_spatial_footprints  = [ None for i in range(video.shape[1]) ]
        roi_temporal_footprints = [ None for i in range(video.shape[1]) ]
        roi_temporal_residuals  = [ None for i in range(video.shape[1]) ]
        bg_spatial_footprints   = [ None for i in range(video.shape[1]) ]
        bg_temporal_footprints  = [ None for i in range(video.shape[1]) ]

        if os.path.exists("suite2p"):
            shutil.rmtree("suite2p")

        for z in range(video.shape[1]):
            fname = os.path.splitext(filename)[0] + "_masked_z_{}.h5".format(z)

            video_path = os.path.join(directory, fname)

            h5f = h5py.File(video_path, 'w')
            h5f.create_dataset('data', data=video[:, z, :, :])
            h5f.close()

            ops = {
                'fast_disk': [], # used to store temporary binary file, defaults to save_path0
                'save_path0': '', # stores results, defaults to first item in data_path
                'delete_bin': False, # whether to delete binary file after processing
                # main settings
                'nplanes' : 1, # each tiff has these many planes in sequence
                'nchannels' : 1, # each tiff has these many channels per plane
                'functional_chan' : 1, # this channel is used to extract functional ROIs (1-based)
                'diameter':params['diameter'], # this is the main parameter for cell detection, 2-dimensional if Y and X are different (e.g. [6 12])
                'tau':  1., # this is the main parameter for deconvolution
                'fs': params['sampling_rate'],  # sampling rate (total across planes)
                # output settings
                'save_mat': False, # whether to save output as matlab files
                'combined': True, # combine multiple planes into a single result /single canvas for GUI
                # parallel settings
                'num_workers': 0, # 0 to select num_cores, -1 to disable parallelism, N to enforce value
                'num_workers_roi': 0, # 0 to select number of planes, -1 to disable parallelism, N to enforce value
                # registration settings
                'do_registration': False, # whether to register data
                'nimg_init': 200, # subsampled frames for finding reference image
                'batch_size': 200, # number of frames per batch
                'maxregshift': 0.1, # max allowed registration shift, as a fraction of frame max(width and height)
                'align_by_chan' : 1, # when multi-channel, you can align by non-functional channel (1-based)
                'reg_tif': False, # whether to save registered tiffs
                'subpixel' : 10, # precision of subpixel registration (1/subpixel steps)
                # cell detection settings
                'connected': params['connected'], # whether or not to keep ROIs fully connected (set to 0 for dendrites)
                'navg_frames_svd': 5000, # max number of binned frames for the SVD
                'nsvd_for_roi': 1000, # max number of SVD components to keep for ROI detection
                'max_iterations': 20, # maximum number of iterations to do cell detection
                'ratio_neuropil': params['neuropil_basis_ratio'], # ratio between neuropil basis size and cell radius
                'ratio_neuropil_to_cell': params['neuropil_radius_ratio'], # minimum ratio between neuropil radius and cell radius
                'tile_factor': 1., # use finer (>1) or coarser (<1) tiles for neuropil estimation during cell detection
                'threshold_scaling': 1., # adjust the automatically determined threshold by this scalar multiplier
                'max_overlap': 0.75, # cells with more overlap than this get removed during triage, before refinement
                'inner_neuropil_radius': params['inner_neuropil_radius'], # number of pixels to keep between ROI and neuropil donut
                'outer_neuropil_radius': np.inf, # maximum neuropil radius
                'min_neuropil_pixels': params['min_neuropil_pixels'], # minimum number of pixels in the neuropil
                # deconvolution settings
                'baseline': 'maximin', # baselining mode
                'win_baseline': 60., # window for maximin
                'sig_baseline': 10., # smoothing constant for gaussian filter
                'prctile_baseline': 8.,# optional (whether to use a percentile baseline)
                'neucoeff': .7,  # neuropil coefficient
            }

            db = {
                'h5py': video_path, # a single h5 file path
                'h5py_key': 'data',
                'look_one_level_down': False, # whether to look in ALL subfolders when searching for tiffs
                'data_path': [], # a list of folders with tiffs 
                                                     # (or folder of folders with tiffs if look_one_level_down is True, or subfolders is not empty)
                'subfolders': [] # choose subfolders of 'data_path' to look in (optional)
            }

            opsEnd=run_s2p(ops=ops,db=db)

            stat = np.load("suite2p/plane0/stat.npy")
            F    = np.load("suite2p/plane0/F.npy")
            Fneu = np.load("suite2p/plane0/Fneu.npy")

            spatial_components = np.zeros((video.shape[2], video.shape[3], len(stat)))
            for i in range(len(stat)):
                spatial_components[stat[i]['xpix'], stat[i]['ypix'], i] = stat[i]['lam']

            roi_spatial_footprints[z]  = scipy.sparse.coo_matrix(spatial_components.reshape((video.shape[2]*video.shape[3], len(stat))))
            roi_temporal_footprints[z] = F - ops["neucoeff"]*Fneu
            roi_temporal_residuals[z]  = np.zeros(F.shape)
            bg_spatial_footprints[z]   = None
            bg_temporal_footprints[z]  = None

            os.remove(fname)
            shutil.rmtree("suite2p")

        return roi_spatial_footprints, roi_temporal_footprints, roi_temporal_residuals, bg_spatial_footprints, bg_temporal_footprints
Пример #12
0
def run_suite2p(path_hdf5, diameter):
    save_path = os.path.dirname(path_hdf5)
    ops['diameter'] = diameter
    db['h5py'] = path_hdf5
    db['save_path0'] = save_path
    run_s2p(ops=ops, db=db)
def main():
    # set your options for running
    # overwrites the run_s2p.default_ops
    ops = {
        'fast_disk':
        [],  # used to store temporary binary file, defaults to save_path0 (set as a string NOT a list)
        'save_path0':
        [],  # stores results, defaults to first item in data_path
        'delete_bin': False,  # whether to delete binary file after processing
        # main settings
        'nplanes': 1,  # each tiff has these many planes in sequence
        'nchannels': 1,  # each tiff has these many channels per plane
        'functional_chan':
        1,  # this channel is used to extract functional ROIs (1-based)
        'diameter':
        3,  # this is the main parameter for cell detection, 2-dimensional if Y and X are different (e.g. [6 12])
        'tau':
        2.,  # this is the main parameter for deconvolution, 2 for gcamp6s
        'fs': 10.,  # sampling rate (total across planes)
        # output settings
        'save_mat': False,  # whether to save output as matlab files
        'combined':
        True,  # combine multiple planes into a single result /single canvas for GUI
        # parallel settings
        'num_workers':
        0,  # 0 to select num_cores, -1 to disable parallelism, N to enforce value
        'num_workers_roi':
        -1,  # 0 to select number of planes, -1 to disable parallelism, N to enforce value
        # registration settings
        'do_registration': False,  # whether to register data
        'nimg_init': 200,  # subsampled frames for finding reference image
        'batch_size': 200,  # number of frames per batch
        'maxregshift':
        0.1,  # max allowed registration shift, as a fraction of frame max(width and height)
        'align_by_chan':
        1,  # when multi-channel, you can align by non-functional channel (1-based)
        'reg_tif': False,  # whether to save registered tiffs
        'subpixel':
        10,  # precision of subpixel registration (1/subpixel steps)
        # cell detection settings
        'connected':
        True,  # whether or not to keep ROIs fully connected (set to 0 for dendrites)
        'navg_frames_svd': 5000,  # max number of binned frames for the SVD
        'nsvd_for_roi':
        1000,  # max number of SVD components to keep for ROI detection
        'max_iterations':
        100,  # maximum number of iterations to do cell detection
        'ratio_neuropil':
        6.,  # ratio between neuropil basis size and cell radius
        'ratio_neuropil_to_cell':
        3,  # minimum ratio between neuropil radius and cell radius
        'tile_factor':
        1.,  # use finer (>1) or coarser (<1) tiles for neuropil estimation during cell detection
        # TODO: Try to lower the threshold to get more ROI
        'threshold_scaling':
        0.7,  # adjust the automatically determined threshold by this scalar multiplier
        'max_overlap':
        0.70,  # cells with more overlap than this get removed during triage, before refinement
        'inner_neuropil_radius':
        2,  # number of pixels to keep between ROI and neuropil donut
        'outer_neuropil_radius': np.inf,  # maximum neuropil radius
        'min_neuropil_pixels': 300,  # minimum number of pixels in the neuropil
        # deconvolution settings
        'baseline': 'maximin',  # baselining mode
        'win_baseline': 60.,  # window for maximin
        'sig_baseline': 10.,  # smoothing constant for gaussian filter
        'prctile_baseline':
        8.,  # optional (whether to use a percentile baseline)
        'neucoeff': .7,  # neuropil coefficient
    }

    for_axon_flex = True
    if for_axon_flex:
        # new OPS (cop/past from https://github.com/MouseLand/suite2p on the 1st december 2019
        ops = {
            # file paths
            'look_one_level_down':
            False,  # whether to look in all subfolders when searching for tiffs
            'fast_disk':
            [],  # used to store temporary binary file, defaults to save_path0
            'delete_bin':
            False,  # whether to delete binary file after processing
            'mesoscan': False,  # for reading in scanimage mesoscope files
            'h5py': [],  # take h5py as input (deactivates data_path)
            'h5py_key': 'data',  # key in h5py where data array is stored
            'save_path0':
            [],  # stores results, defaults to first item in data_path
            'subfolders': [],
            # main settings
            'nplanes': 1,  # each tiff has these many planes in sequence
            'nchannels': 1,  # each tiff has these many channels per plane
            'functional_chan':
            1,  # this channel is used to extract functional ROIs (1-based)
            'tau':
            2.,  # this is the main parameter for deconvolution, 2 for gcamp6s
            'fs':
            10.,  # sampling rate (PER PLANE - e.g. if you have 12 planes then this should be around 2.5)
            'force_sktiff':
            False,  # whether or not to use scikit-image for tiff reading
            # output settings
            'preclassify':
            0,  # apply classifier before signal extraction with probability 0.5 (turn off with value 0)
            'save_mat': False,  # whether to save output as matlab files
            'combined':
            True,  # combine multiple planes into a single result /single canvas for GUI
            'aspect':
            1.0,  # um/pixels in X / um/pixels in Y (for correct aspect ratio in GUI)
            # bidirectional phase offset
            'do_bidiphase': False,
            'bidiphase': 0,
            # registration settings
            'do_registration':
            0,  # whether to register data (2 forces re-registration)
            'keep_movie_raw': False,
            'nimg_init': 300,  # subsampled frames for finding reference image
            'batch_size': 500,  # number of frames per batch
            'maxregshift':
            0.1,  # max allowed registration shift, as a fraction of frame max(width and height)
            'align_by_chan':
            1,  # when multi-channel, you can align by non-functional channel (1-based)
            'reg_tif': False,  # whether to save registered tiffs
            'reg_tif_chan2':
            False,  # whether to save channel 2 registered tiffs
            'subpixel':
            10,  # precision of subpixel registration (1/subpixel steps)
            'smooth_sigma':
            1.15,  # ~1 good for 2P recordings, recommend >5 for 1P recordings
            'th_badframes': 1.0,
            # this parameter determines which frames to exclude when determining cropping - set it smaller to exclude more frames
            'pad_fft': False,
            # non rigid registration settings
            'nonrigid': True,  # whether to use nonrigid registration
            'block_size': [
                128, 128
            ],  # block size to register (** keep this a multiple of 2 **)
            'snr_thresh': 1.2,
            # if any nonrigid block is below this threshold, it gets smoothed until above this threshold. 1.0 results in no smoothing
            'maxregshiftNR':
            5,  # maximum pixel shift allowed for nonrigid, relative to rigid
            # 1P settings
            '1Preg':
            False,  # whether to perform high-pass filtering and tapering
            'spatial_hp':
            50,  # window for spatial high-pass filtering before registration
            'pre_smooth':
            2,  # whether to smooth before high-pass filtering before registration
            'spatial_taper': 50,
            # how much to ignore on edges (important for vignetted windows, for FFT padding do not set BELOW 3*ops['smooth_sigma'])
            # cell detection settings
            'roidetect': True,  # whether or not to run ROI extraction
            'spatial_scale':
            0,  # 0: multi-scale; 1: 6 pixels, 2: 12 pixels, 3: 24 pixels, 4: 48 pixels
            'connected':
            0,  # whether or not to keep ROIs fully connected (set to 0 for dendrites)
            'nbinned': 5000,  # max number of binned frames for cell detection
            'max_iterations':
            100,  # maximum number of iterations to do cell detection
            'threshold_scaling': 1,
            # adjust the automatically determined threshold by this scalar multiplier used to be 5
            'max_overlap': 0.7,
            # cells with more overlap than this get removed during triage, before refinement default: 0.75
            'high_pass':
            100,  # running mean subtraction with window of size 'high_pass' (use low values for 1P)
            # ROI extraction parameters
            'inner_neuropil_radius':
            2,  # number of pixels to keep between ROI and neuropil donut
            'min_neuropil_pixels':
            300,  # minimum number of pixels in the neuropil
            'allow_overlap':
            True,  # pixels that are overlapping are thrown out (False) or added to both ROIs (True)
            # channel 2 detection settings (stat[n]['chan2'], stat[n]['not_chan2'])
            'chan2_thres':
            0.65,  # minimum for detection of brightness on channel 2
            # deconvolution settings
            'baseline':
            'maximin',  # baselining mode (can also choose 'prctile')
            'win_baseline': 60.,  # window for maximin
            'sig_baseline': 10.,  # smoothing constant for gaussian filter
            'prctile_baseline':
            8.,  # optional (whether to use a percentile baseline)
            'neucoeff': .7,  # neuropil coefficient
            'xrange': np.array([0, 0]),
            'yrange': np.array([0, 0])
        }
    else:
        # new OPS (cop/past from https://github.com/MouseLand/suite2p on the 1st december 2019
        ops = {
            # file paths
            'look_one_level_down':
            False,  # whether to look in all subfolders when searching for tiffs
            'fast_disk':
            [],  # used to store temporary binary file, defaults to save_path0
            'delete_bin':
            False,  # whether to delete binary file after processing
            'mesoscan': False,  # for reading in scanimage mesoscope files
            'h5py': [],  # take h5py as input (deactivates data_path)
            'h5py_key': 'data',  # key in h5py where data array is stored
            'save_path0':
            [],  # stores results, defaults to first item in data_path
            'subfolders': [],
            # main settings
            'nplanes': 1,  # each tiff has these many planes in sequence
            'nchannels': 1,  # each tiff has these many channels per plane
            'functional_chan':
            1,  # this channel is used to extract functional ROIs (1-based)
            'tau':
            2.,  # this is the main parameter for deconvolution, 2 for gcamp6s
            'fs':
            10.,  # sampling rate (PER PLANE - e.g. if you have 12 planes then this should be around 2.5)
            'force_sktiff':
            False,  # whether or not to use scikit-image for tiff reading
            # output settings
            'preclassify':
            0,  # apply classifier before signal extraction with probability 0.5 (turn off with value 0)
            'save_mat': False,  # whether to save output as matlab files
            'combined':
            True,  # combine multiple planes into a single result /single canvas for GUI
            'aspect':
            1.0,  # um/pixels in X / um/pixels in Y (for correct aspect ratio in GUI)
            # bidirectional phase offset
            'do_bidiphase': False,
            'bidiphase': 0,
            # registration settings
            'do_registration':
            0,  # whether to register data (2 forces re-registration)
            'keep_movie_raw': False,
            'nimg_init': 300,  # subsampled frames for finding reference image
            'batch_size': 500,  # number of frames per batch
            'maxregshift':
            0.1,  # max allowed registration shift, as a fraction of frame max(width and height)
            'align_by_chan':
            1,  # when multi-channel, you can align by non-functional channel (1-based)
            'reg_tif': False,  # whether to save registered tiffs
            'reg_tif_chan2':
            False,  # whether to save channel 2 registered tiffs
            'subpixel':
            10,  # precision of subpixel registration (1/subpixel steps)
            'smooth_sigma':
            1.15,  # ~1 good for 2P recordings, recommend >5 for 1P recordings
            'th_badframes': 1.0,
            # this parameter determines which frames to exclude when determining cropping - set it smaller to exclude more frames
            'pad_fft': False,
            # non rigid registration settings
            'nonrigid': True,  # whether to use nonrigid registration
            'block_size': [
                128, 128
            ],  # block size to register (** keep this a multiple of 2 **)
            'snr_thresh': 1.2,
            # if any nonrigid block is below this threshold, it gets smoothed until above this threshold. 1.0 results in no smoothing
            'maxregshiftNR':
            5,  # maximum pixel shift allowed for nonrigid, relative to rigid
            # 1P settings
            '1Preg':
            False,  # whether to perform high-pass filtering and tapering
            'spatial_hp':
            50,  # window for spatial high-pass filtering before registration
            'pre_smooth':
            2,  # whether to smooth before high-pass filtering before registration
            'spatial_taper': 50,
            # how much to ignore on edges (important for vignetted windows, for FFT padding do not set BELOW 3*ops['smooth_sigma'])
            # cell detection settings
            'roidetect': True,  # whether or not to run ROI extraction
            'spatial_scale':
            0,  # 0: multi-scale; 1: 6 pixels, 2: 12 pixels, 3: 24 pixels, 4: 48 pixels
            'connected':
            True,  # whether or not to keep ROIs fully connected (set to 0 for dendrites)
            'nbinned': 5000,  # max number of binned frames for cell detection
            'max_iterations':
            100,  # maximum number of iterations to do cell detection
            'threshold_scaling':
            1,  # adjust the automatically determined threshold by this scalar multiplier used to be 5
            'max_overlap':
            0.7,  # cells with more overlap than this get removed during triage, before refinement default: 0.75
            'high_pass':
            100,  # running mean subtraction with window of size 'high_pass' (use low values for 1P)
            # ROI extraction parameters
            'inner_neuropil_radius':
            2,  # number of pixels to keep between ROI and neuropil donut
            'min_neuropil_pixels':
            300,  # minimum number of pixels in the neuropil
            'allow_overlap':
            True,  # pixels that are overlapping are thrown out (False) or added to both ROIs (True)
            # channel 2 detection settings (stat[n]['chan2'], stat[n]['not_chan2'])
            'chan2_thres':
            0.65,  # minimum for detection of brightness on channel 2
            # deconvolution settings
            'baseline':
            'maximin',  # baselining mode (can also choose 'prctile')
            'win_baseline': 60.,  # window for maximin
            'sig_baseline': 10.,  # smoothing constant for gaussian filter
            'prctile_baseline':
            8.,  # optional (whether to use a percentile baseline)
            'neucoeff': .7,  # neuropil coefficient
            'xrange': np.array([0, 0]),
            'yrange': np.array([0, 0])
        }
    """
    
    """
    """
    ops to modify defaults):
    'preclassify': 0.5
    'threshold_scaling': 5 # select many more cells if lowered
    'max_overlap': 0.75
     'inner_neuropil_radius': 2,  # number of pixels to keep between ROI and neuropil donut
    'min_neuropil_pixels': 350,  # minimum number of pixels in the neuropil
     'allow_overlap': False,  # pixels that are overlapping are thrown out (False) or added to both ROIs (True)
    'neucoeff': .7,  # neuropil coefficient
    'allow_overlap'
    
    To run the code:
    conda activate suite2p
    (First time only: To install it from sources: pip install -e .)
    (after pasting the .h5 file in suite2p_main.py)
    then: python suite2p_main.py
    And to open the GUI:
    python -m suite2p
    At the end:
    conda deactivate
    """

    # provide an h5 path in 'h5py' or a tiff path in 'data_path'
    # db overwrites any ops (allows for experiment specific settings)
    # p8_19_09_29_1_a001.h5
    db = {
        # 'h5py': '/Users/pappyhammer/Documents/academique/these_inmed/suite2p/suite2p_tiffs/MichelMotC_p8_19_09_29_1_a000.h5',  # a single h5 file path
        # 'h5py': '/home/julien/these_inmed/suite2p/suite2p_tiffs/181016_181029_a002_concat.h5',
        # 'h5py_key': 'data',
        'look_one_level_down':
        False,  # whether to look in ALL subfolders when searching for tiffs
        'data_path': ['/home/julien/these_inmed/suite2p/suite2p_tiffs'
                      ],  # a list of folders with tiffs
        # 'data_path': ['/Users/pappyhammer/Documents/academique/these_inmed/suite2p/suite2p_tiffs'],
        # (or folder of folders with tiffs if look_one_level_down is True, or subfolders is not empty)
        'subfolders':
        [],  # choose subfolders of 'data_path' to look in (optional)
        'fast_disk':
        '/home/julien/these_inmed/suite2p/suite2p_bin',  # string which specifies where the binary file will be stored (should be an SSD)
        # 'fast_disk': '/Users/pappyhammer/Documents/academique/these_inmed/suite2p/suite2p_bin'
    }

    # run one experiment
    opsEnd = run_s2p(ops=ops, db=db)
Пример #14
0
def main(mouse_id,
         run_number,
         pkl_path,
         do_s2p=False,
         reprocess=True,
         reload=True,
         do_flu_preprocess=True):
    ''' caches an single session for one mouse to pkl'''

    run = run_processor(mouse_id,
                        run_number,
                        pkl_path,
                        reprocess=reprocess,
                        reload=reload)

    if run is None:
        return

    # flatten out the tseries path lists if it is nested
    if any(isinstance(i, list) for i in run.tseries_paths):
        run.tseries_paths = [
            item for sublist in run.tseries_paths for item in sublist
        ]
    tseries_nframes = []
    tseries_dims = []

    print('\nfollowing tseries found:')
    tiff_list = []
    print(run.tseries_paths)
    for tseries in run.tseries_paths:
        tiffs = utils.get_tiffs(tseries)
        if not tiffs:
            raise filenotfounderror('cannot find tiff in '
                                    'folder {}'.format(tseries))
        elif len(tiffs) == 1:
            assert tiffs[0][-7:] == 'Ch3.tif', 'channel not understood '\
                                              'for tiff {}'.format(tiff)

            tiff_list.append(tiffs[0])
        elif len(tiffs) == 2:  # two channels recorded (red is too dim)
            assert tiffs[0][-7:] == 'Ch2.tif' and tiffs[1][-7:] == 'Ch3.tif',\
                                        'channel not understood '\
                                        'for tiffs {} and {}'.format(tiffs[0],
                                                                        tiffs[1])

            tiffs = [tiffs[1]]
            tiff_list.append(tiffs[0])

        elif len(tiffs) > 2:
            print('Single frame tiffs not deleted, checking for MPTs')

            # Check if multipage tiff conversion happend but single tiffs
            # were not deleted
            multipages = [tiff for tiff in tiffs if not '.ome' in tiff]
            multipages.sort()

            if not multipages:
                raise ValueError('Folder likely contains unconverted single '
                                 'page tiffs')

            # check that the number of tiffs in the multipage matches the
            # number of .ome
            num_ome = len([tiff for tiff in tiffs if '.ome' in tiff])
            for i, multipage in enumerate(multipages):
                _, n_frames = tiff_metadata(multipage)
                # ideally want equal but sometimes deletion starts but is
                # not completed
                assert n_frames >= num_ome or n_frames == num_ome/len(multipages), \
                '{} {} {}'.format(multipage, n_frames, num_ome)
            # check for the green channel
            if len(multipages) > 1:
                assert multipages[1][-7:] == 'Ch3.tif', 'channel not understood '\
                                             'for tiff {}'.format(multipages)
                tiff_list.append(multipages[1])
                tiffs = [multipages[1]]

            elif len(multipages) == 1:
                assert multipages[0][-7:] == 'Ch3.tif', 'channel not understood '\
                                             'for tiff {}'.format(multipages)

                tiff_list.append(multipages[0])
                tiffs = [multipages[0]]

        image_dims, n_frames = tiff_metadata(tiffs[0])
        tseries_dims.append(image_dims)
        tseries_nframes.append(n_frames)

    print('\n')

    run.num_frames = tseries_nframes

    print(tseries_dims)
    if tseries_dims[0][0] == 1024 and tseries_dims[0][1] == 1024:
        diameter = 11
        fs = 15
        print('Bafov detected')
        raise  # path hack in db has ruined bafov
    elif tseries_dims[0][0] == 1024 and tseries_dims[0][1] == 514:
        diameter = 11
        fs = 30
        assert run.num_planes == 1  # temporary
        print('obfov detected')
    else:
        raise NotImplementedError('Cacher is currently only set up for bafov '
                                  'and obfov images')

    data_path = str(Path(run.tseries_paths[0]).parent)
    save_folder = os.path.join(data_path, 'suite2p', run.mouse_id)

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    db = {
        'data_path': [data_path],
        'look_one_level_down': True,
        'diameter': diameter,
        'tiff_list': tiff_list,
        'nplanes': run.num_planes,
        'fs': fs,
        'save_folder': save_folder,
        'roidetect': True,
        'spikedetect': False,
    }

    # TEMPORARRY??
    run.s2p_path = os.path.join(Path(db['data_path'][0]), 'suite2p')
    # 2021 addition, think this is correct
    run.s2p_path = save_folder

    #    # check that suite2p hasn't alreay been run
    #    if os.path.exists(run.s2p_path):
    #        print('Already done s2p\n')
    #        do_s2p = False

    print('Data path is {}'.format(data_path))
    print('s2p path is {}'.format(run.s2p_path))

    if do_s2p:
        # Build bad_frames.npy (not sure which directory to use so do both
        # useful to keep record of bad frames in mouse specific suite2p
        # output folder
        rf.get_bad_frames(run, [data_path, run.s2p_path, save_folder])
        print('Running s2p on tseries printed above\n')
        with open('/home/jrowland/mnt/qnap/suite2p_report.txt', 'a') as f:
            dateTime = datetime.datetime.today()
            f.write('\n{} Beginning s2p for {} run {} path {}'.format(
                dateTime.isoformat("|", 'seconds'), run.mouse_id, run_number,
                save_folder))

        opsEnd = run_s2p(ops=ops, db=db)

        with open('/home/jrowland/mnt/qnap/suite2p_report.txt', 'a') as f:
            dateTime = datetime.datetime.today()
            f.write('\n{} Completed s2p for {} run {} path {}\n\n'.format(
                dateTime.isoformat("|", 'seconds'), run.mouse_id, run_number,
                save_folder))

    if do_flu_preprocess:
        run = preprocess_flu(run)

    if not run.reloaded:
        with open(pkl_file, 'wb') as f:
            pickle.dump(run, f)