Beispiel #1
0
    def __init__(self, file_name, max_shifts, strides, overlaps,
                 upsample_factor_grid, max_deviation_rigid):
        self.name_orig = [file_name]
        self.data_orig = cm.load_movie_chain(self.name_orig)

        self.name_rig, self.name_pwrig, self.shifts_rig, self.x_shifts_pwrig, self.y_shifts_pwrig, self.template_shape = self._run_motion_correction(
            file_name, max_shifts, strides, overlaps, upsample_factor_grid,
            max_deviation_rigid)

        self.data_rig = cm.load(self.name_rig)
        self.data_pwrig = cm.load(self.name_pwrig)
Beispiel #2
0
def local_correlations_movie_parallel(params:Tuple) -> np.ndarray:
        mv_name, idx, eight_neighbours, swap_dim, order_mean, ismulticolor = params
        mv = cm.load(mv_name,subindices=idx)
        if ismulticolor:
            return local_correlations_multicolor(mv,swap_dim=swap_dim)[None,:,:].astype(np.float32)
        else:
            return local_correlations(mv, eight_neighbours=eight_neighbours, swap_dim=swap_dim, order_mean=order_mean)[None,:,:].astype(np.float32)
def plot_movie_frame_cropped(cropped_file):
    """
    This function creates an image for visual inspections of cropped frame
    """
    m = cm.load(cropped_file)
    pl.imshow(m[0, :, :], cmap='gray')
    return
def plot_movie_frame(decoded_file):
    """
    This function creates an image for visual inspection of cropping points.
    """
    m = cm.load(decoded_file)
    pl.imshow(m[0, :, :], cmap='gray')
    return
def get_metric_min_mean_max(index, row):
    # Define the metric directory
    metrics_trial_wise_min_mean_max_dir = f'data/interim/decoding/meta/metrics/trial_wise/min_mean_max/'
    # Define arrays in which to store min, mean, max
    a_min = np.array([])
    a_mean = np.array([])
    a_max = np.array([])
    # Get the pickle file name
    state_name = src.pipeline.create_file_name(0, index)
    pkl_path = metrics_trial_wise_min_mean_max_dir + state_name + '.pkl'
    # Get the decoding output
    output = eval(row.loc['decoding_output'])
    # Load the movie into memory
    m = cm.load(output['main'])
    # Iterate over the movie frames:
    for frame in m:
        a_min = np.append(a_min, frame.min())
        a_mean = np.append(a_mean, frame.mean())
        a_max = np.append(a_max, frame.max())
    # Save the metrics file path
    if not 'metrics' in output['meta']:
        output['meta']['metrics'] = {}
    output['meta']['metrics']['min_max_mean'] = pkl_path
    row.loc['decoding_output'] = str(output)
    # Save the metrics in the pkl file
    metrics = {'min': a_min, 'mean': a_mean, 'max': a_max}
    with open(pkl_path, 'wb') as f:
        pickle.dump(metrics, f)
    return index, row
Beispiel #6
0
def mc_vids(vids_fpath, mc_rigid_template):
    start = time.time()
    # estimated minimum value of the movie to produce an output that is positive
    min_mov = np.array([
        cm.motion_correction.high_pass_filter_space(m_, gSig_filt)
        for m_ in cm.load(vids_fpath[0], subindices=range(400))
    ]).min()
    mc = MotionCorrect(vids_fpath,
                       min_mov,
                       dview=dview,
                       max_shifts=max_shifts,
                       niter_rig=1,
                       splits_rig=splits_rig,
                       num_splits_to_process_rig=None,
                       shifts_opencv=True,
                       nonneg_movie=True,
                       gSig_filt=gSig_filt,
                       border_nan=border_nan,
                       is3D=False)

    mc.motion_correct_rigid(save_movie=(not doPwRigid),
                            template=mc_rigid_template)

    shifts_rig = mc.shifts_rig
    template_rig = mc.total_template_rig

    if doPwRigid:
        mc.motion_correct_pwrigid(save_movie=True, template=template_rig)
        mc.total_template_rig = template_rig

    duration = time.time() - start
    logging.info('Motion correction done in %s', str(duration))
    return mc, duration, shifts_rig
Beispiel #7
0
    def loadmovie(self, fname):
        ''' Load movie from disk using opencv
            TO DO: may need to write .tif readers etc.
                    OR use caiman only to read imaging files
                    
            Note; movie is loaded and pased into its own container
                - i.e. each screen will have its own movie array in general
        '''

        # load movie
        print("  loading movie...")
        movie = cm.load(fname, in_memory=True)

        # Cat: TODO: fix the mmem map issue her
        print("  TODO: fix memm map for motion correction files ...")
        temp_fname = '/home/cat/code/caiman_gui/temp.npy'
        np.save(temp_fname, movie)
        movie = np.load(temp_fname)

        self.data_min = np.float(movie.min())
        self.data_max = np.float(movie.max())

        print("  finished loading movie, size: ", movie.shape)

        return movie
Beispiel #8
0
def main():
    fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]

    movie = cm.load(fnames)
    movie = movie.astype(np.float)

    # makes estimation numerically better:
    movie -= movie.mean()

    # use one every 200 frames
    temporal_stride = 200
    # use one every 8 patches (patches are 8x8 by default)
    spatial_stride = 8

    movie_train = movie[::temporal_stride]

    t = timeit.default_timer()
    estimation_res = est.estimate_vst_movie(movie_train, stride=spatial_stride)
    print('\tTime', timeit.default_timer() - t)

    alpha = estimation_res.alpha
    sigma_sq = estimation_res.sigma_sq

    movie_gat = compute_gat(movie, sigma_sq, alpha=alpha)
    # save movie_gat here
    movie_gat_inv = compute_inverse_gat(movie_gat, sigma_sq, alpha=alpha,
                                        method='asym')
    # save movie_gat_inv here
    return movie, movie_gat_inv
Beispiel #9
0
def map_corr(scan) -> Tuple[Any, Any, Any, int]:
    '''This part of the code is in a mapping function that's run over different
    movies in parallel
    '''
    # TODO: Tighten prototype above
    if type(scan) is str:
        scan = cm.load(scan)

    # h x w x num_frames
    chunk = np.array(scan).transpose([1, 2, 0])
    # Subtract overall brightness per frame
    chunk -= chunk.mean(axis=(0, 1))

    # Compute sum_x and sum_x^2
    chunk_sum = np.sum(chunk, axis=-1, dtype=float)
    chunk_sqsum = np.sum(chunk**2, axis=-1, dtype=float)

    # Compute sum_xy: Multiply each pixel by its eight neighbors
    chunk_xysum = np.zeros((chunk.shape[0], chunk.shape[1], 8))
    # amount of 90 degree rotations
    for k in [0, 1, 2, 3]:
        rotated_chunk = np.rot90(chunk, k=k)
        rotated_xysum = np.rot90(chunk_xysum, k=k)

        # Multiply each pixel by one above and by one above to the left
        rotated_xysum[1:, :, k] = np.sum(rotated_chunk[1:] * rotated_chunk[:-1], axis=-1, dtype=float)
        rotated_xysum[1:, 1:, 4 + k] = np.sum(rotated_chunk[1:, 1:] * rotated_chunk[:-1, :-1], axis=-1, dtype=float)

        # Return back to original orientation
        chunk = np.rot90(rotated_chunk, k=4 - k)
        chunk_xysum = np.rot90(rotated_xysum, k=4 - k)

    num_frames = chunk.shape[-1]

    return chunk_sum, chunk_sqsum, chunk_xysum, num_frames
Beispiel #10
0
def main():
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]

    movie = cm.load(fnames)
    movie = movie.astype(np.float)

    # makes estimation numerically better:
    movie -= movie.mean()

    # use one every 200 frames
    temporal_stride = 200
    # use one every 8 patches (patches are 8x8 by default)
    spatial_stride = 8

    movie_train = movie[::temporal_stride]

    t = timeit.default_timer()
    estimation_res = est.estimate_vst_movie(movie_train, stride=spatial_stride)
    print('\tTime', timeit.default_timer() - t)

    alpha = estimation_res.alpha
    sigma_sq = estimation_res.sigma_sq

    movie_gat = compute_gat(movie, sigma_sq, alpha=alpha)
    # save movie_gat here
    movie_gat_inv = compute_inverse_gat(movie_gat,
                                        sigma_sq,
                                        alpha=alpha,
                                        method='asym')
    # save movie_gat_inv here
    return movie, movie_gat_inv
Beispiel #11
0
    def motion_correct_rigid(self, fname):
        dview = None
        try:

            c, dview, n_processes = cm.cluster.setup_cluster(
                backend='local', n_processes=None, single_thread=False)

            niter_rig = 1  # number of iterations for rigid motion correction
            max_shifts = self.get_dict_param('max_shifts_rigid', 'tuple_int')
            # for parallelization split the movies in  num_splits chuncks across time
            splits_rig = self.get_dict_param('splits_rig', 'single_int')
            # first we create a motion correction object with the parameters specified
            min_mov = cm.load(fname[0], subindices=range(200)).min()
            # this will be subtracted from the movie to make it non-negative

            mc = MotionCorrect(fname,
                               min_mov,
                               dview=dview,
                               max_shifts=max_shifts,
                               niter_rig=niter_rig,
                               splits_rig=splits_rig,
                               border_nan='copy',
                               shifts_opencv=True,
                               nonneg_movie=True)

            mc.motion_correct_rigid(save_movie=True)

            self.motion_correct = mc
        except Exception as e:
            raise e
        finally:
            cm.cluster.stop_server(dview=dview)
def run_cropper(input_path, parameters):
    """
    This function takes in a decoded analysis state and crops it according to
    specified cropping points.

    Args:
        input_path the path of the decoding file

    """

    # Determine output .tif file path
    sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,input,home_path FROM Analysis WHERE decoding_main=?"
    val = [input_path, ]
    mycursor.execute(sql, val)
    myresult = mycursor.fetchall()
    data = []
    aux = []
    for x in myresult:
        aux = x
    for y in aux:
        data.append(y)

    # update the database
    if data[5] == 0:
        data[5] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}"
        output_tif_file_path = f"data/interim/cropping/main/{file_name}.tif"
        sql1 = "UPDATE Analysis SET cropping_main=?,cropping_v=? WHERE decoding_main=? "
        val1 = [output_tif_file_path, data[5], input_path]
        mycursor.execute(sql1, val1)

    else:
        data[5] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}"
        output_tif_file_path = f"data/interim/cropping/main/{file_name}.tif"
        sql2 = "INSERT INTO Analysis (cropping_main,cropping_v) VALUES (?,?)"
        val2 = [output_tif_file_path, data[5]]
        mycursor.execute(sql2, val2)
        database.commit()
        sql3 = "UPDATE Analysis SET decoding_main=?,decoding_v=?,mouse=?,session=?,trial=?,is_rest=?,input=?,home_path=? WHERE cropping_main=? AND cropping_v=?"
        val3 = [input_path, data[4], data[0], data[1], data[2], data[3], data[6], data[7], output_tif_file_path,
                data[5]]
        mycursor.execute(sql3, val3)

    # Spatial cropping
    input_path = os.path.join(os.environ['DATA_DIR_LOCAL'], input_path)
    logging.info('Loading movie')
    m = cm.load(input_path)
    logging.info('Loaded movie')

    [x_, _x, y_, _y] = parameters['cropping_points_spatial']

    logging.info('Performing spatial cropping')
    m = m[:, x_:_x, y_:_y]
    logging.info(' Spatial cropping finished')
    output_tif_file_path_full = os.path.join(os.environ['DATA_DIR_LOCAL'], output_tif_file_path)
    # Save the movie
    m.save(output_tif_file_path_full)

    return output_tif_file_path, data[5]
Beispiel #13
0
def local_correlations_movie_offline(file_name,
                                     Tot_frames=None,
                                     fr: int = 10,
                                     window: int = 30,
                                     stride: int = 3,
                                     swap_dim: bool = True,
                                     eight_neighbours: bool = True,
                                     order_mean: int = 1,
                                     ismulticolor: bool = False,
                                     dview=None):

    if Tot_frames is None:
        Tot_frames = cm.load(file_name).shape[0]

    params: List = [[
        file_name,
        range(j, j + window), eight_neighbours, swap_dim, order_mean,
        ismulticolor
    ] for j in range(0, Tot_frames - window, stride)]
    if dview is None:
        parallel_result = list(map(local_correlations_movie_parallel, params))
    else:
        if 'multiprocessing' in str(type(dview)):
            parallel_result = dview.map_async(
                local_correlations_movie_parallel, params).get(4294967)
        else:
            parallel_result = dview.map_sync(local_correlations_movie_parallel,
                                             params)
            dview.results.clear()

    mm = cm.movie(np.concatenate(parallel_result, axis=0), fr=fr)
    return mm
Beispiel #14
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

# %% load data

    fname = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]

# %% set up some parameters

    fr = 10  # frame rate (Hz)
    decay_time = .75  # approximate length of transient event in seconds
    gSig = [6, 6]  # expected half size of neurons
    p = 1  # order of AR indicator dynamics
    min_SNR = 1  # minimum SNR for accepting candidate components
    thresh_CNN_noisy = 0.65  # CNN threshold for candidate components
    gnb = 2  # number of background components
    init_method = 'cnmf'  # initialization method

    # set up CNMF initialization parameters

    init_batch = 400  # number of frames for initialization
    patch_size = 32  # size of patch
    stride = 3  # amount of overlap between patches
    K = 4  # max number of components in each patch

    params_dict = {'fr': fr,
                   'fnames': fname,
                   'decay_time': decay_time,
                   'gSig': gSig,
                   'p': p,
                   'min_SNR': min_SNR,
                   'nb': gnb,
                   'init_batch': init_batch,
                   'init_method': init_method,
                   'rf': patch_size//2,
                   'stride': stride,
                   'sniper_mode': True,
                   'thresh_CNN_noisy': thresh_CNN_noisy,
                   'K': K}
    opts = cnmf.params.CNMFParams(params_dict=params_dict)
# %% fit with online object
    cnm = cnmf.online_cnmf.OnACID(params=opts)
    cnm.fit_online()

# %% plot contours

    logging.info('Number of components:' + str(cnm.estimates.A.shape[-1]))
    Cn = cm.load(fname[0], subindices=slice(0,500)).local_correlations(swap_dim=False)
    cnm.estimates.plot_contours(img=Cn)

# %% pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes)
    use_CNN = True
    if use_CNN:
        # threshold for CNN classifier
        opts.set('quality', {'min_cnn_thr': 0.05})
        cnm.estimates.evaluate_components_CNN(opts)
        cnm.estimates.plot_contours(img=Cn, idx=cnm.estimates.idx_components)
# %% plot results
    cnm.estimates.view_components(img=Cn, idx=cnm.estimates.idx_components)
Beispiel #15
0
    def on_cnmfButtonFilesCorrImage_clicked(self):
        file = self.cnmfListFilesInput.item(0).text()

        pl.imshow(cm.load(file).local_correlations(eight_neighbours=True,
                                                   swap_dim=False,
                                                   frames_per_chunk=1500,
                                                   order_mean=1),
                  vmax=np.float(self.cnmfMaxCorrImage.text()))
Beispiel #16
0
def merge_denoised_tiff_files(movie, loaddir, savedir):
    #%%
    cpu_num = 2
    #cpu_num_spikepursuit = 1

    filenames = os.listdir(loaddir)
    counter = 0
    filenames_final = list()
    residualnames = list()
    while 'denoised_{}.tif'.format(counter) in filenames:
        m_new_denoised = cm.load(
            os.path.join(loaddir,
                         'denoised_{}.tif'.format(counter))).transpose(
                             2, 0, 1)
        i_new_sn = imio.imread(
            os.path.join(loaddir, 'Sn_image_{}.tif'.format(counter)))[:, :, 0]
        m_new_trend = cm.load(
            os.path.join(loaddir,
                         'trend_{}.tif'.format(counter))).transpose(2, 0, 1)
        movief = m_new_denoised * i_new_sn + m_new_trend
        movief.save(os.path.join(loaddir, 'movie{}.tif'.format(counter)))
        filenames_final.append(
            os.path.join(loaddir, 'movie{}.tif'.format(counter)))
        residualnames.append(
            os.path.join(loaddir, 'PMD_residual_{}.tif'.format(counter)))
        counter += 1
        print(counter)

    #%%
    residuals_movie = cm.load_movie_chain(residualnames)
    residuals_movie.save(os.path.join(savedir, 'PMD_residuals.tif'))
    #movie_big = cm.load_movie_chain(filenames_final)
    # %% Memory Mapping
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=cpu_num,
                                                     single_thread=False)
    fname_new = cm.save_memmap(filenames_final,
                               base_name=movie['movie_name'],
                               dview=dview,
                               n_chunks=10,
                               order='C')
    dview.terminate()
    fname = pathlib.Path(fname_new).name
    shutil.move(fname_new, os.path.join(savedir, fname))
    print('done')
Beispiel #17
0
def write_hdf5_movie(
    movie_name: str,
    memmap_files: Union[str, List[str]],
    frame_shape: Tuple[int, int],
    dataset_name: str = "mov",
    compression: Optional[str] = None,
):
    """Function to write an hdf5 formatted movie

    Parameters
    ----------
    movie_name : str
        name of the movie to be saved
    memmap_files : str or list
        the memmap files to be saved as an hdf5 movie.
        If a list of files is provided, they will all
        be concatenated into a single file
    frame_shape : tuple
        The shape of a single frame in the movie.
        The hdf5 file will be chunked by frame
    dataset_name : str
        The name of the dataset in the hdf5 file.
        The default value is 'mov'
    compression : Optional[str]
        The compression to be applied to the movie.
        Valid options are: 'gzip', 'lzf'
        See the h5py docs for the more information:
        https://docs.h5py.org/en/stable/high/dataset

    """
    from caiman import load

    with h5py.File(movie_name, mode="w") as h5f:
        _ = h5f.create_dataset(
            dataset_name,
            shape=(0, ) + frame_shape,
            maxshape=(None, ) + frame_shape,
            dtype=np.uint16,
            compression=compression,
            chunks=(1, ) + frame_shape,
        )

    with h5py.File(movie_name, mode="a") as h5f:
        for f in memmap_files:
            mov = load(f)
            mov_array = mov.astype(np.uint16)
            dset = h5f[dataset_name]

            curr_length = dset.shape[0]
            mov_length = mov.shape[0]
            new_length = curr_length + mov_length
            new_shape = (new_length, ) + frame_shape

            dset.resize(new_shape)
            dset[curr_length::, :, :] = mov_array

            h5f.flush()
Beispiel #18
0
def load():
    global summary_images, dims, cur_img
    fpath = F.getOpenFileName(caption='Load Summary Images',
                              filter='HDF5 (*.h5 *.hdf5)')[0]
    summary_images = cm.load(fpath)
    summary_images = summary_images.transpose([0, 2, 1])
    summary_images = np.flip(summary_images, axis=2)
    cur_img = summary_images[0]
    img.setImage(cur_img)
    dims = summary_images[0].shape
Beispiel #19
0
    def motion_correct_pwrigid(self, fname):
        dview = None
        try:
            c, dview, n_processes = cm.cluster.setup_cluster(
                backend='local', n_processes=None, single_thread=False)

            niter_rig = 1  # number of iterations for rigid motion correction
            max_shifts = self.get_dict_param(
                'max_shifts_pwrigid', 'tuple_int')  # maximum allow rigid shift
            # for parallelization split the movies in  num_splits chuncks across time
            splits_rig = self.get_dict_param('splits_rig', 'single_int')
            # start a new patch for pw-rigid motion correction every x pixels
            strides = self.get_dict_param('strides', 'tuple_int')
            # overlap between pathes (size of patch strides+overlaps)
            overlaps = self.get_dict_param('overlaps', 'tuple_int')
            # for parallelization split the movies in  num_splits chuncks across time
            splits_els = self.get_dict_param('splits_els', 'single_int')

            upsample_factor_grid = self.get_dict_param(
                'upsample_factor_grid', 'single_int'
            )  # upsample factor to avoid smearing when merging patches
            # maximum deviation allowed for patch with respect to rigid shifts
            max_deviation_rigid = self.get_dict_param('max_deviation_rigid',
                                                      'single_int')
            # first we create a motion correction object with the parameters specified
            min_mov = cm.load(fname[0], subindices=range(200)).min()
            # this will be subtracted from the movie to make it non-negative

            print(
                str([
                    max_shifts, splits_rig, strides, overlaps, splits_els,
                    upsample_factor_grid, max_deviation_rigid, min_mov
                ]))
            mc = MotionCorrect(fname,
                               min_mov,
                               dview=dview,
                               max_shifts=max_shifts,
                               niter_rig=niter_rig,
                               splits_rig=splits_rig,
                               strides=strides,
                               overlaps=overlaps,
                               splits_els=splits_els,
                               border_nan='copy',
                               upsample_factor_grid=upsample_factor_grid,
                               max_deviation_rigid=max_deviation_rigid,
                               shifts_opencv=True,
                               nonneg_movie=True)

            mc.motion_correct_pwrigid(save_movie=True)
            self.motion_correct = mc

        except Exception as e:
            raise e
        finally:
            cm.cluster.stop_server(dview=dview)
Beispiel #20
0
def plot_movie_frame_cropped(row):
    '''
    This function creates an image for visual inspections of cropped frame
    :param row: dictionary with all relevant information about state of analysis
    :return: none
    '''
    output = row['cropping_output']
    cropped_file = eval(output)['main']
    m = cm.load(cropped_file)
    #print(m.shape)
    pl.imshow(m[0,:,:],cmap='gray')
    return
Beispiel #21
0
def load():
    global summary_images, dims, cur_img, p1
    fpath = F.getOpenFileName(caption='Load Summary Images',
                              filter='TIFF (*.tif);;HDF5 (*.h5 *.hdf5)')[0]
    summary_images = cm.load(fpath)
    summary_images = summary_images.transpose([0, 2, 1])
    summary_images = np.flip(summary_images, axis=2)
    cur_img = summary_images[0]
    dims = summary_images[0].shape
    #p1.resize(dims[0], dims[1])
    img.setImage(cur_img)
    p1.setAspectLocked()
Beispiel #22
0
def local_correlations_movie_parallel(params: Tuple) -> np.ndarray:
    mv_name, idx, eight_neighbours, swap_dim, order_mean, ismulticolor, remove_baseline, winSize_baseline, quantil_min_baseline, gaussian_blur = params
    mv = cm.load(mv_name, subindices=idx, in_memory=True)
    if gaussian_blur:
        mv = mv.gaussian_blur_2D()

    if remove_baseline:
        mv.removeBL(quantilMin=quantil_min_baseline, windowSize=winSize_baseline, in_place=True)

    if ismulticolor:
        return local_correlations_multicolor(mv, swap_dim=swap_dim)[None, :, :].astype(np.float32)
    else:
        return local_correlations(mv, eight_neighbours=eight_neighbours, swap_dim=swap_dim,
                                  order_mean=order_mean)[None, :, :].astype(np.float32)
Beispiel #23
0
    def pre_process_handle(args):
        # todo: todocument

        from scipy.ndimage import filters as ft
        import logging

        fil, resize_factors, diameter_bilateral_blur, median_filter_size = args

        name_log = fil[:-4] + '_LOG'
        logger = logging.getLogger(name_log)
        hdlr = logging.FileHandler(name_log)

        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)

        logger.addHandler(hdlr)
        logger.setLevel(logging.INFO)

        logger.info('START')
        logger.info(fil)

        mov = cm.load(fil, fr=30)
        logger.info('Read file')

        mov = mov.resize(1, 1, resize_factors[0])
        logger.info('Resize')

        mov = mov.bilateral_blur_2D(diameter=diameter_bilateral_blur)
        logger.info('Bilateral')

        mov1 = cm.movie(ft.median_filter(mov, median_filter_size), fr=30)
        logger.info('Median filter')

        mov1 = mov1.resize(1, 1, resize_factors[1])
        logger.info('Resize 2')

        mov1 = mov1 - cm.utils.stats.mode_robust(mov1, 0)
        logger.info('Mode')

        mov = mov.resize(1, 1, resize_factors[1])
        logger.info('Resize')

        mov.save(fil[:-4] + '_compress_.tif')
        logger.info('Save 1')

        mov1.save(fil[:-4] + '_BL_compress_.tif')
        logger.info('Save 2')
        return 1
Beispiel #24
0
    def pre_process_handle(args):
        # todo: todocument

        from scipy.ndimage import filters as ft
        import logging

        fil, resize_factors, diameter_bilateral_blur, median_filter_size = args

        name_log = fil[:-4] + '_LOG'
        logger = logging.getLogger(name_log)
        hdlr = logging.FileHandler(name_log)

        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)

        logger.addHandler(hdlr)
        logger.setLevel(logging.INFO)

        logger.info('START')
        logger.info(fil)

        mov = cm.load(fil, fr=30)
        logger.info('Read file')

        mov = mov.resize(1, 1, resize_factors[0])
        logger.info('Resize')

        mov = mov.bilateral_blur_2D(diameter=diameter_bilateral_blur)
        logger.info('Bilateral')

        mov1 = cm.movie(ft.median_filter(mov, median_filter_size), fr=30)
        logger.info('Median filter')

        mov1 = mov1.resize(1, 1, resize_factors[1])
        logger.info('Resize 2')

        mov1 = mov1 - cm.utils.stats.mode_robust(mov1, 0)
        logger.info('Mode')

        mov = mov.resize(1, 1, resize_factors[1])
        logger.info('Resize')

        mov.save(fil[:-4] + '_compress_.tif')
        logger.info('Save 1')

        mov1.save(fil[:-4] + '_BL_compress_.tif')
        logger.info('Save 2')
        return 1
Beispiel #25
0
def get_fig_gSig_filt_vals(cropped_file, gSig_filt_vals):
    """

    Plot original cropped frame and several versions of spatial filtering for comparison
    :param cropped_file
    :param gSig_filt_vals: array containing size of spatial filters that will be applied
    :return: figure

    """

    m = cm.load(cropped_file)
    temp = cm.motion_correction.bin_median(m)
    N = len(gSig_filt_vals)
    fig, axes = plt.subplots(int(math.ceil((N + 1) / 2)), 2)
    axes[0, 0].imshow(temp, cmap='gray')
    axes[0, 0].set_title('unfiltered')
    axes[0, 0].axis('off')
    for i in range(0, N):
        gSig_filt = gSig_filt_vals[i]
        m_filt = [
            high_pass_filter_space(m_, (gSig_filt, gSig_filt)) for m_ in m
        ]
        temp_filt = cm.motion_correction.bin_median(m_filt)
        axes.flatten()[i + 1].imshow(temp_filt, cmap='gray')
        axes.flatten()[i + 1].set_title(f'gSig_filt = {gSig_filt}')
        axes.flatten()[i + 1].axis('off')
    if N + 1 != axes.size:
        for i in range(N + 1, axes.size):
            axes.flatten()[i].axis('off')

    # Get output file paths
    sql = "SELECT mouse,session,trial,is_rest,cropping_v,decoding_v,motion_correction_v FROM Analysis WHERE cropping_main=%s "
    val = [
        cropped_file,
    ]
    mycursor.execute(sql, val)
    myresult = mycursor.fetchall()
    data = []
    for x in myresult:
        data += x

    file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[5]}.{data[4]}.{data[6]}"
    data_dir = 'data/interim/motion_correction/'
    output_meta_gSig_filt = data_dir + f'meta/figures/frame_gSig_filt/{file_name}.png'

    fig.savefig(output_meta_gSig_filt)

    return fig
Beispiel #26
0
def reconstructed_movie(estimates, fnames, idx, scope, flip_signal):
    """ Create reconstructed movie in VolPy. The movie has three panels: 
    motion corrected movie on the left panel, movie removed from the baseline
    on the mid panel and reconstructed movie on the right panel.
    Args: 
        estimates: dict
            estimates dictionary contain results of VolPy
            
        fnames: list
            motion corrected movie in F-order memory mapping format
            
        idx: list
            index of selected neurons
            
        scope: list
            scope of number of frames in reconstructed movie
            
        flip_signal: boolean
            if True the signal will be flipped (for voltron) 
    
    Return:
        mv_all: 3-D array
            motion corrected movie, movie removed from baseline, reconstructed movie
            concatenated into one matrix
    """
    # motion corrected movie and movie removed from baseline
    mv = cm.load(fnames, fr=400)[scope[0]:scope[1]]
    dims = (mv.shape[1], mv.shape[2])
    mv_bl = mv.computeDFF(secsWindow=0.1)[0]
    mv = (mv - mv.min()) / (mv.max() - mv.min())
    if flip_signal:
        mv_bl = -mv_bl
    mv_bl[mv_bl < np.percentile(mv_bl, 3)] = np.percentile(mv_bl, 3)
    mv_bl[mv_bl > np.percentile(mv_bl, 98)] = np.percentile(mv_bl, 98)
    mv_bl = (mv_bl - mv_bl.min()) / (mv_bl.max() - mv_bl.min())

    # reconstructed movie
    estimates['weights'][estimates['weights'] < 0] = 0
    A = estimates['weights'][idx].transpose([1, 2, 0]).reshape((-1, len(idx)))
    C = estimates['t_rec'][idx, scope[0]:scope[1]]
    mv_rec = np.dot(A, C).reshape(
        (dims[0], dims[1], scope[1] - scope[0])).transpose((2, 0, 1))
    mv_rec = cm.movie(mv_rec, fr=400)
    mv_rec = (mv_rec - mv_rec.min()) / (mv_rec.max() - mv_rec.min())
    mv_all = cm.concatenate((mv, mv_bl, mv_rec), axis=2)
    return mv_all
Beispiel #27
0
def compute_metrics(selected_rows):
    '''
    Metrics for cropping.
    
    Metrics
        Total movie
            min
            mean
            max
        Frame wise
            min
            mean 
            max
    '''

    for index, row in selected_rows.iterrows():
        logging.info(index)
        output = eval(row.loc['cropping_output'])
        m = cm.load(output['main'])
        min_array, mean_array, max_array = [], [], []
        for m_ in m:
            min_array.append(m_.min())
            mean_array.append(m_.mean())
            max_array.append(m_.max())
        total_min = m.min()
        total_mean = m.mean()
        total_max = m.max()

    file_name = src.pipeline.create_file_name(1, index)
    metrics_file_path = f'data/interim/cropping/meta/metrics/{file_name}.pkl'
    with open(metrics_file_path, 'rb') as f:
        try:
            metrics = pickle.load(f)
        except:
            metrics = {}
        metrics['min_array'] = min_array
        metrics['mean_array'] = mean_array
        metrics['max_array'] = max_array
        metrics['total_min'] = total_min
        metrics['total_mean'] = total_mean
        metrics['total_max'] = total_max

        pickle.dump(metrics, f)

    return
Beispiel #28
0
def run_pca_ica(fnames):
    m = cm.load(fnames)
    
    # run pca-ica
    output, _ = m.IPCA_stICA(componentsICA=15, mu=0.05)
    masks = output.copy()
    masks = np.array(extractROIsFromPCAICA(masks)[0])
    masks = masks / np.linalg.norm(masks, ord='fro', axis=(1,2))[:, np.newaxis, np.newaxis]
    spatial = masks.copy()
    
    plt.imshow(spatial.sum(0));plt.show()

    # from masks recover signal
    temporal = m.extract_traces_from_masks(masks)
    temporal = -signal_filter(temporal.T, freq=15, fr=400).T
    
    result = {'spatial':spatial, 'temporal':temporal}
    save_path = os.path.join(os.path.split(fnames)[0], 'pca-ica', f'pca-ica_{os.path.split(fnames)[1][:-5]}')
    np.save(save_path, result)
Beispiel #29
0
def local_correlations_movie_offline(file_name, Tot_frames = None, fr = 10, window=30, stride = 3, swap_dim=True, eight_neighbours=True, order_mean = 1, ismulticolor = False, dview = None):
        import caiman as cm

        if Tot_frames is None:
            Tot_frames = cm.load(file_name).shape[0]

        params = [[file_name,range(j,j + window), eight_neighbours, swap_dim, order_mean, ismulticolor] for j in range(0,Tot_frames - window,stride)]
        if dview is None:
#            parallel_result = [self[j:j + window, :, :].local_correlations(
#                    eight_neighbours=True,swap_dim=swap_dim, order_mean=order_mean)[np.newaxis, :, :] for j in range(T - window)]
            parallel_result = list(map(local_correlations_movie_parallel,params))

        else:
            if 'multiprocessing' in str(type(dview)):
                parallel_result = dview.map_async(
                        local_correlations_movie_parallel, params).get(4294967)
            else:
                parallel_result = dview.map_sync(
                    local_correlations_movie_parallel, params)
                dview.results.clear()

        mm = cm.movie(np.concatenate(parallel_result, axis=0),fr=fr)
        return mm
def map_corr(scan):
    '''This part of the code is in a mapping function that's run over different
    movies in parallel
    '''
    import caiman as cm
    if type(scan) is str:
        scan = cm.load(scan)

    # h x w x num_frames
    chunk = np.array(scan).transpose([1, 2, 0])
    # Subtract overall brightness per frame
    chunk -= chunk.mean(axis=(0, 1))

    # Compute sum_x and sum_x^2
    chunk_sum = np.sum(chunk, axis=-1, dtype=float)
    chunk_sqsum = np.sum(chunk**2, axis=-1, dtype=float)

    # Compute sum_xy: Multiply each pixel by its eight neighbors
    chunk_xysum = np.zeros((chunk.shape[0], chunk.shape[1], 8))
    # amount of 90 degree rotations
    for k in [0, 1, 2, 3]:
        rotated_chunk = np.rot90(chunk, k=k)
        rotated_xysum = np.rot90(chunk_xysum, k=k)

        # Multiply each pixel by one above and by one above to the left
        rotated_xysum[1:, :, k] = np.sum(rotated_chunk[1:] * rotated_chunk[:-1],
                                         axis=-1, dtype=float)
        rotated_xysum[1:, 1:, 4 + k] = np.sum(rotated_chunk[1:, 1:] *
                                              rotated_chunk[:-1, :-1], axis=-1, dtype=float)

        # Return back to original orientation
        chunk = np.rot90(rotated_chunk, k=4 - k)
        chunk_xysum = np.rot90(rotated_xysum, k=4 - k)

    num_frames = chunk.shape[-1]

    return chunk_sum, chunk_sqsum, chunk_xysum, num_frames
Beispiel #31
0
def get_fig_gSig_filt_vals(row, gSig_filt_vals):
    '''
    Plot original cropped frame and several versions of spatial filtering for comparison
    :param row: analisis state row for which the filtering is computed
    :param gSig_filt_vals: array containing size of spatial filters that will be applyed
    :return: figure
    '''
    output = row['cropping_output']
    cropped_file = eval(output)['main']
    m = cm.load(cropped_file)
    temp = cm.motion_correction.bin_median(m)
    N = len(gSig_filt_vals)
    fig, axes = plt.subplots(int(math.ceil((N + 1) / 2)), 2)
    axes[0, 0].imshow(temp, cmap='gray')
    axes[0, 0].set_title('unfiltered')
    axes[0, 0].axis('off')
    for i in range(0, N):
        gSig_filt = gSig_filt_vals[i]
        m_filt = [high_pass_filter_space(m_, (gSig_filt, gSig_filt)) for m_ in m]
        temp_filt = cm.motion_correction.bin_median(m_filt)
        axes.flatten()[i + 1].imshow(temp_filt, cmap='gray')
        axes.flatten()[i + 1].set_title(f'gSig_filt = {gSig_filt}')
        axes.flatten()[i + 1].axis('off')
    if N + 1 != axes.size:
        for i in range(N + 1, axes.size):
            axes.flatten()[i].axis('off')

    # Get output file paths
    index = row.name
    data_dir = 'data/interim/motion_correction/'
    step_index = db.get_step_index('motion_correction')
    file_name = db.create_file_name(step_index, index)
    output_meta_gSig_filt = data_dir + f'meta/figures/frame_gSig_filt/{file_name}.png'

    fig.savefig(output_meta_gSig_filt)

    return fig
Beispiel #32
0
def motion_corr(fnames, dview, opts, disp_movie, is_3d=False):
    """Perform motion correction"""
    # Create a motion correction object with the parameters specified. Note
    # that the file is not loaded in memory
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))

    # Run piecewise-rigid motion correction using NoRMCorre
    mc.motion_correct(save_movie=True)

    # Determine maximum shift to be used for trimming against NaNs
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0

    # Compare with original movie
    if disp_movie and not is_3d:
        m_els = cm.load(mc.fname_tot_els)
        m_orig = cm.load_movie_chain(fnames)
        ds_ratio = 0.2
        cm.concatenate([
            m_orig.resize(1, 1, ds_ratio) - mc.min_mov * mc.nonneg_movie,
            m_els.resize(1, 1, ds_ratio)
        ],
                       axis=2).play(fr=60, gain=15, magnification=2,
                                    offset=0)  # press q to exit
    return mc, border_to_0
def parameters_test_gSig(path, figname, gSig_filt_list=None):
    m_orig = cm.load(path)

    if gSig_filt_list == None:
        gSig_filt_list = [(2, 2), (4, 4), (6, 6), (8, 8), (10, 10), (20, 20),
                          (30, 30)]
    m_filt_list = []
    for i, gSig_filt in enumerate(gSig_filt_list):
        m_filt_list.append(
            cm.movie(
                np.array(
                    [high_pass_filter_space(m_, gSig_filt) for m_ in m_orig])))

    import matplotlib.pyplot as plt
    for i, mov in enumerate(m_filt_list):
        plt.figure()
        plt.imshow(mov[0], cmap='gray')
        gSig_size = gSig_filt_list[i]
        plt.title(f'{figname} \n gSig_size = {gSig_size}')
        plt.savefig(
            f'data/motion_correction/png/{figname}_gSig_experiment_{gSig_size}.png'
        )

    return
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=False)
#%%
if params_movie['fname'] is None:
    all_files = [os.path.abspath(flfl) for flfl in glob.glob('*.tif')]
    all_files.sort()

else:
    all_files = [params_movie['fname']]

print(all_files)
#%% RIGID MOTION CORRECTION
total_template_rig = None
total_shifts = []
templates_all = []
add_to_movie = np.min(cm.load(all_files[0]))
max_shifts = params_movie['max_shifts']  # maximum allowed shifts
num_iter = 1  # number of times the algorithm is run
# for parallelization split the movies in  num_splits chuncks across time
splits = params_movie['splits_rig']
# if none all the splits are processed and the movie is saved
num_splits_to_process = params_movie['num_splits_to_process_rig']
shifts_opencv = True  # apply shifts fast way (but smoothing results)
save_movie_rigid = False  # save the movies vs just get the template
for file_to_process in all_files:
    t1 = time.time()
    fname = file_to_process

    t1 = time.time()
    fname_tot_rig, total_template_rig, templates_rig, shifts_rig = cm.motion_correction.motion_correct_batch_rigid(fname,
                                                                                                                   max_shifts, dview=dview, splits=splits, num_splits_to_process=num_splits_to_process,
# overlap between pathes (size of patch strides+overlaps)
overlaps = params_movie['overlaps']
# for parallelization split the movies in  num_splits chuncks across time
splits_els = params_movie['splits_els']
# if none all the splits are processed and the movie is saved
num_splits_to_process_els = params_movie['num_splits_to_process_els']
# upsample factor to avoid smearing when merging patches
upsample_factor_grid = params_movie['upsample_factor_grid']
# maximum deviation allowed for patch with respect to rigid
# shift
max_deviation_rigid = params_movie['max_deviation_rigid']
#%% download movie if not there
if fname == 'example_movies/demoSue2x.tif':
    download_demo()
#%%
m_orig = cm.load(fname)
#%% play movie
downsample_ratio = .2
offset_mov = -np.min(m_orig[:100])
m_orig.resize(1, 1, downsample_ratio).play(
    gain=2, offset=offset_mov, fr=30, magnification=1)
#%% RUN ANALYSIS
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=False)
#%%
# movie must be mostly positive for this to work
min_mov = cm.load(fname, subindices=range(400)).min()

mc = MotionCorrect(fname, min_mov,
                   dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig,
                   num_splits_to_process_rig=num_splits_to_process_rig,
def process_movie_parallel(arg_in):


    fname,fr,margins_out,template,max_shift_w, max_shift_h,remove_blanks,apply_smooth,save_hdf5=arg_in

    if template is not None:
        if isinstance(template,basestring):
            if os.path.exists(template):
                template=cm.load(template,fr=1)
            else:
                raise Exception('Path to template does not exist:'+template)                
#    with open(fname[:-4]+'.stout', "a") as log:
#        print fname
#        sys.stdout = log

    #    import pdb
    #    pdb.set_trace()
    type_input = str(type(fname)) 
    if 'movie' in type_input:        
        print((type(fname)))
        Yr=fname

    elif ('ndarray' in type_input):        
        Yr=cm.movie(np.array(fname,dtype=np.float32),fr=fr)
    elif isinstance(fname,basestring): 
        Yr=cm.load(fname,fr=fr)
    else:
        raise Exception('Unkown input type:' + type_input)

    if Yr.ndim>1:

        print('loaded')    

        if apply_smooth:

            print('applying smoothing')

            Yr=Yr.bilateral_blur_2D(diameter=10,sigmaColor=10000,sigmaSpace=0)

#        bl_yr=np.float32(np.percentile(Yr,8))    

 #       Yr=Yr-bl_yr     # needed to remove baseline

        print('Remove BL')

        if margins_out!=0:

            Yr=Yr[:,margins_out:-margins_out,margins_out:-margins_out] # borders create troubles

        print('motion correcting')

        Yr,shifts,xcorrs,template=Yr.motion_correct(max_shift_w=max_shift_w, max_shift_h=max_shift_h,  method='opencv',template=template,remove_blanks=remove_blanks) 

  #      Yr = Yr + bl_yr           

        if ('movie' in type_input) or ('ndarray' in type_input):
            print('Returning Values')
            return Yr, shifts, xcorrs, template

        else:     

            print('median computing')        

            template=Yr.bin_median()

            print('saving')  

            idx_dot=len(fname.split('.')[-1])

            if save_hdf5:

                Yr.save(fname[:-idx_dot]+'hdf5')        

            print('saving 2')                 

            np.savez(fname[:-idx_dot]+'npz',shifts=shifts,xcorrs=xcorrs,template=template)

            print('deleting')        

            del Yr

            print('done!')

            return fname[:-idx_dot] 
        #sys.stdout = sys.__stdout__ 
    else:
        return None
Beispiel #37
0
def save_memmap(filenames, base_name='Yr', resize_fact=(1, 1, 1), remove_init=0, idx_xy=None,
                order='F', xy_shifts=None, is_3D=False, add_to_movie=0, border_to_0=0, dview = None,
                n_chunks=100):

    """ Efficiently write data from a list of tif files into a memory mappable file

    Parameters:
    ----------
        filenames: list
            list of tif files or list of numpy arrays

        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"

        resize_fact: tuple
            x,y, and z downsampling factors (0.5 means downsampled by a factor 2)

        remove_init: int
            number of frames to remove at the begining of each tif file
            (used for resonant scanning images if laser in rutned on trial by trial)

        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance
            idx_xy = (slice(150,350,None), slice(150,350,None))

        order: string
            whether to save the file in 'C' or 'F' order

        xy_shifts: list
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping

        is_3D: boolean
            whether it is 3D data
        add_to_movie: floating-point
            value to add to each image point, typically to keep negative values out.
        border_to_0: (undocumented)
        dview:       (undocumented)
        n_chunks:    (undocumented)
    Returns:
    -------
        fname_new: the name of the mapped file, the format is such that
            the name will contain the frame dimensions and the number of frames

    """
    if type(filenames) is not list:
        raise Exception('input should be a list of filenames')

    if len(filenames) > 1:
        is_inconsistent_order = False
        for file__ in filenames:
            if ('order_' + order not in file__) or ('.mmap' not in file__):
                is_inconsistent_order = True


        if is_inconsistent_order: # Here we make a bunch of memmap files in the right order. Same parameters
            fname_new = cm.save_memmap_each(filenames,
                                        base_name    = base_name,
                                        order        = order,
                                        border_to_0  = border_to_0,
                                        dview        = dview,
                                        resize_fact  = resize_fact,
                                        remove_init  = remove_init,
                                        idx_xy       = idx_xy,
                                        xy_shifts    = xy_shifts,
                                        add_to_movie = add_to_movie)
        else:
            fname_new = filenames

        # The goal is to make a single large memmap file, which we do here
        if order == 'F':
            raise exception('You cannot merge files in F order, they must be in C order')


        fname_new = cm.save_memmap_join(fname_new, base_name=base_name, dview=dview, n_chunks=n_chunks, add_to_mov = add_to_movie)

    else:
    # TODO: can be done online
        Ttot = 0
        for idx, f in enumerate(filenames):
            if isinstance(f, str): # Might not always be filenames.
                print(f)

            if is_3D:
                Yr = f if not(isinstance(f, basestring)) else tifffile.imread(f)
                if idx_xy is None:
                    Yr = Yr[remove_init:]
                elif len(idx_xy) == 2:
                    Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
                else:
                    Yr = Yr[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

            else:
                Yr = cm.load(f, fr=1, in_memory=True) if (isinstance(f, basestring) or isinstance(f, list)) else cm.movie(f) # TODO: Rewrite more legibly
                if xy_shifts is not None:
                    Yr = Yr.apply_shifts(xy_shifts, interpolation='cubic', remove_blanks=False)
                if idx_xy is None:
                    if remove_init > 0:
                        Yr = Yr[remove_init:]
                elif len(idx_xy) == 2:
                    Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
                else:
                    raise Exception('You need to set is_3D=True for 3D data)')
                    Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

            if border_to_0 > 0:
                min_mov = Yr.calc_min()
                Yr[:, :border_to_0, :] = min_mov
                Yr[:, :, :border_to_0] = min_mov
                Yr[:, :, -border_to_0:] = min_mov
                Yr[:, -border_to_0:, :] = min_mov

            fx, fy, fz = resize_fact
            if fx != 1 or fy != 1 or fz != 1:
                if 'movie' not in str(type(Yr)):
                    Yr = cm.movie(Yr, fr=1)
                Yr = Yr.resize(fx=fx, fy=fy, fz=fz)

            T, dims = Yr.shape[0], Yr.shape[1:]
            Yr = np.transpose(Yr, list(range(1, len(dims) + 1)) + [0])
            Yr = np.reshape(Yr, (np.prod(dims), T), order='F')
            Yr = np.ascontiguousarray(Yr, dtype=np.float32) + 0.0001 + add_to_movie

            if idx == 0:
                fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str(
                    1 if len(dims) == 2 else dims[2]) + '_order_' + str(order) # TODO: Rewrite more legibly
                if isinstance(f, str):
                    fname_tot = os.path.join(os.path.split(f)[0], fname_tot)
                if len(filenames) > 1:
                    big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32,
                                    shape=prepare_shape((np.prod(dims), T)), order=order)
                    big_mov[:, Ttot:Ttot + T] = Yr
                    del big_mov
                else:
                    print('SAVING WITH numpy.tofile()')
                    Yr.tofile(fname_tot)
            else:
                big_mov = np.memmap(fname_tot, dtype=np.float32, mode='r+',
                                    shape=prepare_shape((np.prod(dims), Ttot + T)), order=order)

                big_mov[:, Ttot:Ttot + T] = Yr
                del big_mov

            sys.stdout.flush()
            Ttot = Ttot + T

        fname_new = fname_tot + '_frames_' + str(Ttot) + '_.mmap'
        try:
            # need to explicitly remove destination on windows
            os.unlink(fname_new)
        except OSError:
            pass
        os.rename(fname_tot, fname_new)

    return fname_new

#%%
pl.close('all')
import itertools
# 0: with gt, 1: among themselves, 2: with all consensus, 3: with at least 2 consensus
compare_code = 0
label_name = ['regions/natalia_active_regions_nd.zip', 'regions/lindsey_active_regions_nd.zip',
              'regions/sonia_active_regions_nd.zip', 'regions/ben_active_regions_nd.zip']
#label_name = ['regions/intermediate_regions/natalia_all_regions.zip','regions/intermediate_regions/lindsey_all_regions.zip','regions/intermediate_regions/sonia_all_regions.zip','regions/intermediate_regions/ben_all_regions.zip']
consensus_name = 'regions/joined_consensus_active_regions.zip'
results = dict()
for count, par in enumerate(params):
    print(os.path.join('/mnt/ceph/neuro/labeling/',
                       par[0], 'projections/correlation_image.tif'))
    c_img = cm.load(os.path.join('/mnt/ceph/neuro/labeling/',
                                 par[0], 'projections/correlation_image.tif'))
    result = dict()
    if compare_code == 0:
        iterlabels = label_name
    elif compare_code == 1:
        iterlabels = itertools.combinations(label_name, 2)
    else:
        raise Exception('Not defined')

    for region_pairs in iterlabels:
        print(region_pairs)
        try:
            if compare_code == 0:
                roi_nat = nf_read_roi_zip(os.path.join(
                    '/mnt/ceph/neuro/labeling/', par[0], consensus_name), c_img.shape)
                roi_lin = nf_read_roi_zip(os.path.join(
import glob
import pylab as pl
import caiman as cm
from caiman.components_evaluation import evaluate_components
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.source_extraction.cnmf.online_cnmf import seeded_initialization
import os
from copy import deepcopy
from caiman.summary_images import max_correlation_image

#%% construct the seeding matrix using the structural channel (note that some components are missed - thresholding can be improved)


filename = 'example_movies/gmc_960_30mw_00001_red.tif'
Ain, mR = cm.base.rois.extract_binary_masks_from_structural_channel(
    cm.load(filename), expand_method='dilation', selem=np.ones((1, 1)))
pl.figure()
crd = cm.utils.visualization.plot_contours(
    Ain.astype('float32'), mR, thr=0.99, display_numbers=False)
pl.title('Contour plots of detected ROIs in the structural channel')

#%% choose whether to use online algorithm (OnACID) or offline (CNMF)
use_online = True

#%% some common parameters
K = 5  # number of neurons expected per patch (nuisance parameter in this case)
gSig = [7, 7]  # expected half size of neurons
merge_thresh = 0.8  # merging threshold, max correlation allowed
p = 1  # order of the autoregressive system
#%%
if use_online:
# fname_new='Yr_d1_501_d2_398_d3_1_order_F_frames_369_.mmap'
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
# TODO: needinfo
Y = np.reshape(Yr, dims + (T,), order='F')
m_images = cm.movie(images)

# TODO: show screenshot 10
# %% correlation image
if m_images.shape[0] < 10000:
    Cn = m_images.local_correlations(
        swap_dim=params_movie['swap_dim'], frames_per_chunk=1500)
    Cn[np.isnan(Cn)] = 0
else:
    Cn = np.array(cm.load(('/'.join(fname_new.split('/')
                                    [:-3] + ['projections', 'correlation_image_better.tif'])))).squeeze()
pl.imshow(Cn, cmap='gray', vmax=.95)

# %% some parameter settings
# order of the autoregressive fit to calcium imaging in general one (slow gcamps) or two (fast gcamps fast scanning)
p = params_movie['p']
# merging threshold, max correlation allowed
merge_thresh = params_movie['merge_thresh']
# half-size of the patches in pixels. rf=25, patches are 50x50
rf = params_movie['rf']
# amounpl.it of overlap between the patches in pixels
stride_cnmf = params_movie['stride_cnmf']
# number of components per patch
K = params_movie['K']
# if dendritic. In this case you need to set init_method to sparse_nmf
is_dendrites = params_movie['is_dendrites']
Beispiel #41
0
        cm.start_server(slurm_script=slurm_script)
        pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
        c = Client(ipython_dir=pdir, profile=profile)
    else:
        cm.stop_server()
        cm.start_server()
        c = Client()

    print(('Using ' + str(len(c)) + ' processes'))
    dview = c[:len(c)]
#%% set parameters and create template by rigid motion correction
t1 = time.time()
fname = '20_12__002_cr.tif'
max_shifts = (12, 12)
splits = 28  # for parallelization split the movies in  num_sqplits chuncks across time
m = cm.load(fname, subindices=slice(None, None, None))
#%%
m.play(gain=1, magnification=1, fr=60)
#%% initiali template
template = cm.motion_correction.bin_median(m[100:500].copy().motion_correct(
    max_shifts[0], max_shifts[1], template=None)[0])
pl.imshow(template)
#%%
new_templ = template
add_to_movie = -np.min(template)
save_movie = False
num_iter = 2
for iter_ in range(num_iter):
    print(iter_)
    old_templ = new_templ.copy()
    if iter_ == num_iter - 1:
Beispiel #42
0
def test_general():
    """  General Test of pipeline with comparison against ground truth
    A shorter version than the demo pipeline that calls comparison for the real test work



        Raises:
      ---------
        params_movie

        params_cnmf

        rig correction

        cnmf on patch

        cnmf full frame

        not able to read the file

        no groundtruth


    """
#\bug
#\warning

    global params_movie
    global params_diplay
    fname = params_movie['fname']
    niter_rig = params_movie['niter_rig']
    max_shifts = params_movie['max_shifts']
    splits_rig = params_movie['splits_rig']
    num_splits_to_process_rig = params_movie['num_splits_to_process_rig']

    cwd = os.getcwd()
    fname = download_demo(fname[0])
    m_orig = cm.load(fname)
    min_mov = m_orig[:400].min()
    comp = comparison.Comparison()
    comp.dims = np.shape(m_orig)[1:]


################ RIG CORRECTION #################
    t1 = time.time()
    mc = MotionCorrect(fname, min_mov,
                       max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig,
                       num_splits_to_process_rig=num_splits_to_process_rig,
                       shifts_opencv=True, nonneg_movie=True)
    mc.motion_correct_rigid(save_movie=True)
    m_rig = cm.load(mc.fname_tot_rig)
    bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int)
    comp.comparison['rig_shifts']['timer'] = time.time() - t1
    comp.comparison['rig_shifts']['ourdata'] = mc.shifts_rig
###########################################

    if 'max_shifts' not in params_movie:
        fnames = params_movie['fname']
        border_to_0 = 0
    else:  # elif not params_movie.has_key('overlaps'):
        fnames = mc.fname_tot_rig
        border_to_0 = bord_px_rig
        m_els = m_rig

    idx_xy = None
    add_to_movie = -np.nanmin(m_els) + 1  # movie must be positive
    remove_init = 0
    downsample_factor = 1
    base_name = fname[0].split('/')[-1][:-4]
    name_new = cm.save_memmap_each(fnames, base_name=base_name, resize_fact=(
        1, 1, downsample_factor), remove_init=remove_init,
        idx_xy=idx_xy, add_to_movie=add_to_movie, border_to_0=border_to_0)
    name_new.sort()

    if len(name_new) > 1:
        fname_new = cm.save_memmap_join(
            name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None)
    else:
        print('One file only, not saving!')
        fname_new = name_new[0]

    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Y = np.reshape(Yr, dims + (T,), order='F')

    if np.min(images) < 0:
        # TODO: should do this in an automatic fashion with a while loop at the 367 line
        raise Exception('Movie too negative, add_to_movie should be larger')
    if np.sum(np.isnan(images)) > 0:
        # TODO: same here
        raise Exception(
            'Movie contains nan! You did not remove enough borders')

    Cn = cm.local_correlations(Y)
    Cn[np.isnan(Cn)] = 0
    p = params_movie['p']
    merge_thresh = params_movie['merge_thresh']
    rf = params_movie['rf']
    stride_cnmf = params_movie['stride_cnmf']
    K = params_movie['K']
    init_method = params_movie['init_method']
    gSig = params_movie['gSig']
    alpha_snmf = params_movie['alpha_snmf']

    if params_movie['is_dendrites'] == True:
        if params_movie['init_method'] is not 'sparse_nmf':
            raise Exception('dendritic requires sparse_nmf')
        if params_movie['alpha_snmf'] is None:
            raise Exception('need to set a value for alpha_snmf')


################ CNMF PART PATCH #################
    t1 = time.time()
    cnm = cnmf.CNMF(n_processes=1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=params_movie['p'],
                    dview=None, rf=rf, stride=stride_cnmf, memory_fact=params_movie['memory_fact'],
                    method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=params_movie[
                        'only_init_patch'],
                    gnb=params_movie['gnb'], method_deconvolution='oasis')
    comp.cnmpatch = copy.copy(cnm)
    cnm = cnm.fit(images)
    A_tot = cnm.A
    C_tot = cnm.C
    YrA_tot = cnm.YrA
    b_tot = cnm.b
    f_tot = cnm.f
    # DISCARDING
    print(('Number of components:' + str(A_tot.shape[-1])))
    final_frate = params_movie['final_frate']
    # threshold on space consistency
    r_values_min = params_movie['r_values_min_patch']
    # threshold on time variability
    fitness_min = params_movie['fitness_delta_min_patch']
    fitness_delta_min = params_movie['fitness_delta_min_patch']
    Npeaks = params_movie['Npeaks']
    traces = C_tot + YrA_tot
    idx_components, idx_components_bad = estimate_components_quality(
        traces, Y, A_tot, C_tot, b_tot, f_tot, final_frate=final_frate,
        Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min,
        fitness_delta_min=fitness_delta_min)
    #######
    A_tot = A_tot.tocsc()[:, idx_components]
    C_tot = C_tot[idx_components]
    comp.comparison['cnmf_on_patch']['timer'] = time.time() - t1
    comp.comparison['cnmf_on_patch']['ourdata'] = [A_tot.copy(), C_tot.copy()]
#################### ########################


################ CNMF PART FULL #################
    t1 = time.time()
    cnm = cnmf.CNMF(n_processes=1, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, Ain=A_tot, Cin=C_tot,
                    f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis')
    cnm = cnm.fit(images)
    # DISCARDING
    A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn
    final_frate = params_movie['final_frate']
    # threshold on space consistency
    r_values_min = params_movie['r_values_min_full']
    # threshold on time variability
    fitness_min = params_movie['fitness_delta_min_full']
    fitness_delta_min = params_movie['fitness_delta_min_full']
    Npeaks = params_movie['Npeaks']
    traces = C + YrA
    idx_components, idx_components_bad, fitness_raw, fitness_delta, r_values = estimate_components_quality(
        traces, Y, A, C, b, f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min,
        fitness_min=fitness_min,
        fitness_delta_min=fitness_delta_min, return_all=True)
    ##########
    A_tot_full = A_tot.tocsc()[:, idx_components]
    C_tot_full = C_tot[idx_components]
    comp.comparison['cnmf_full_frame']['timer'] = time.time() - t1
    comp.comparison['cnmf_full_frame']['ourdata'] = [
        A_tot_full.copy(), C_tot_full.copy()]
#################### ########################
    comp.save_with_compare(istruth=False, params=params_movie, Cn=Cn)
    log_files = glob.glob('*_LOG_*')
    try:
        for log_file in log_files:
            os.remove(log_file)
    except:
        print('Cannot remove log files')
############ assertions ##################
    pb = False
    if (comp.information['differences']['params_movie']):
        print("you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)")
        pb = True
    if (comp.information['differences']['params_cnm']):
        print("you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)")
        pb = True
    if (comp.information['diff']['rig']['isdifferent']):
        print("the rigid shifts are different from the groundtruth ")
        pb = True
    if (comp.information['diff']['cnmpatch']['isdifferent']):
        print("the cnmf on patch produces different results than the groundtruth ")
        pb = True
    if (comp.information['diff']['cnmfull']['isdifferent']):
        print("the cnmf full frame produces different  results than the groundtruth ")
        pb = True

    assert (not pb)
    cnm_refine.r_values = r_values
    cnm_refine.fitness_raw = fitness_raw
    cnm_refine.fitness_delta = fitness_delta
    cnm_refine.Cn2 = Cn2

    #%

#    cnm_init.dview = None
#    save_object(cnm_init,fls[0][:-4]+ '_DS_' + str(ds)+ '_init.pkl')

    return cnm_refine, Cn2, fname_new


#%%
#m = cm.load('/mnt/xfs1/home/agiovann/SOFTWARE/CaImAn/example_movies/demoMovie.tif')
m = cm.load('/opt/local/Data/Johannes/Ahrens/TM_layer20_crop_3000_5150_orig.hdf5')
#%%
initbatch = 100
K = 8
gSig = [2, 2]
rf = 15
stride = 6
#expected_comps =120
base_name_total = 'demo_'
max_shifts = (8, 8)
expected_comps = 400
mot_corr = True
img_min = m[:initbatch].min()
overlaps = (32, 32)
strides = (192, 192)
T1 = m.shape[0] + initbatch
                                       save_base_name='test_mmap',  init_frames_template=100, show_movie=False, remove_blanks=True, n_iter=2, show_template=False)
#%%
[pl.plot(np.array(r[0][-2])) for r in res]
[pl.plot(np.array(r[0][-1])) for r in res]

#%%
[pl.plot(np.array(r[1][-2])) for r in res]
[pl.plot(np.array(r[1][-1])) for r in res]
#%%
pl.imshow(res[0][2], cmap='gray', vmin=300, vmax=400)
#%%
# mr,dim_r,T=cm.load_memmap('test_mmap_d1_48_d2_114_d3_1_order_C_frames_6764_.mmap')
#m = cm.load('test_mmap_d1_48_d2_114_d3_1_order_C_frames_6764_.mmap')
mr, dim_r, T = cm.load_memmap(
    'test_mmap_d1_509_d2_504_d3_1_order_C_frames_1076_.mmap')
m = cm.load('test_mmap_d1_509_d2_504_d3_1_order_C_frames_1076_.mmap')
#%%
res_p, idfl, shape_grid = apply_to_patch(mr, (T,) + dim_r, dview, 24, 8, motion_correct_online, 0,
                                         max_shift_w=4, max_shift_h=4, save_base_name=None,  init_frames_template=100,
                                         show_movie=False, remove_blanks=False, n_iter=2, return_mov=True, use_median_as_template=True)
#%% video
res_p = apply_to_patch(mr, (T,) + dim_r, None, 48, 8, motion_correct_online, 0, show_template=True,
                       max_shift_w=3, max_shift_h=3, save_base_name=None,  init_frames_template=100,
                       show_movie=False, remove_blanks=False, n_iter=2, return_mov=True, use_median_as_template=True)

#%%

for idx, r in enumerate(res_p):
    pl.subplot(shape_grid[0], shape_grid[1], idx + 1)
#    pl.plot(np.reshape(np.ar4ray(r[0][0]).T,(4,-1)).T)
#    pl.plot(np.array(r[0][-2]))
max_comp_update_shape = np.inf                                      # number of shapes to be updated each time (put this to a finite small value to increase speed)
init_files = 1                                                      # number of files used for initialization
online_files = len(fls) - 1                                         # number of files used for online
initbatch = 200                                                     # number of frames for initialization (presumably from the first file)
expected_comps = 300                                                # maximum number of expected components used for memory pre-allocation (exaggerate here)
K = 2                                                               # initial number of components
N_samples = np.ceil(fr*decay_time)                                  # number of timesteps to consider when testing new neuron candidates
thresh_fitness_raw = scipy.special.log_ndtr(-min_SNR)*N_samples     # exceptionality threshold
epochs = 1                                                          # number of passes over the data
len_file = 1000                                                     # upper bound for number of frames in each file (used right below)
T1 = len(fls)*len_file*epochs                                       # total length of all files (if not known use a large number, then truncate at the end)

#%%    Initialize movie

if ds_factor > 1:                                   # load only the first initbatch frames and possibly downsample them
    Y = cm.load(fls[0], subindices = slice(0,initbatch,None)).astype(np.float32).resize(1. / ds_factor, 1. / ds_factor)
else:
    Y =  cm.load(fls[0], subindices = slice(0,initbatch,None)).astype(np.float32)
    
if mot_corr:                                        # perform motion correction on the first initbatch frames
    mc = Y.motion_correct(max_shift, max_shift)
    Y = mc[0].astype(np.float32)
    borders = np.max(mc[1])
else:
    Y = Y.astype(np.float32)
      
img_min = Y.min()                                   # minimum value of movie. Subtract it to make the data non-negative
Y -= img_min
img_norm = np.std(Y, axis=0)                        
img_norm += np.median(img_norm)                     # normalizing factor to equalize the FOV
Y = Y / img_norm[None, :, :]                        # normalize data
#    for file_count, ffll in enumerate(fls):
#        file_name = '/'.join(ffll.split('/')[:-2]+['mmap_tifs']+[ffll.split('/')[-1][:-4]+'tif'])
#        if not os.path.isfile(file_name):
#            fl_temp = cm.movie(np.array(cm.load(ffll)))
#            fl_temp.save(file_name)
#        print(file_name)
#    print(ind_dataset)
#%%  download and list all files to be processed
for ind_dataset in ID[:]:
    mot_corr = global_params['mot_corr']
    use_VST = False
    use_mmap = True

    if mot_corr:
        fls = glob.glob('/'.join( params_movie[ind_dataset]['fname'].split('/')[:-3]+['images','tifs','*.tif']))
        template = cm.load( '/'.join( params_movie[ind_dataset]['fname'].split('/')[:-3]+['projections','median_projection.tif']))
    else:
        if use_mmap:
            fls = glob.glob('/'.join( params_movie[ind_dataset]['fname'].split('/')[:-3]+['images','mmap','*.mmap']))
        elif not use_VST:
            fls = glob.glob('/'.join( params_movie[ind_dataset]['fname'].split('/')[:-3]+['images','mmap_tifs','*.tif']))
        else:
            fls = glob.glob('/'.join( params_movie[ind_dataset]['fname'].split('/')[:-3]+['images','tiff_VST','*.tif']))

    fls.sort()
    print(fls)

    #%% Set up some parameters
    ds_factor = params_movie[ind_dataset]['ds_factor']                            # spatial downsampling factor (increases speed but may lose some fine structure)
    gSig = tuple(np.ceil(np.array(params_movie[ind_dataset]['gSig'])/ds_factor).astype(np.int))  # expected half size of neurons
    init_files = 1                                                       # number of files used for initialization
            Y2 = np.clip(Y2, 0, PictureShape[1])
            # add all the lines to the mask
#         mask = cv2.line(mask, (Y1,X1),(Y2,X2), [0, 0, 100], 2)
            mask = cv2.arrowedLine(mask, (Y1, X1), (Y2, X2), [100, 0, 0], 1)

    # superpose lines onto image
    img = cv2.add(Image / np.max(Image) * 2, mask)
    # print image
    return img


#%%
if False:
    #%%
    import caiman as cm
    m = cm.load(
        '/mnt/ceph/neuro/Sue_2000_els_opencv__d1_512_d2_512_d3_1_order_F_frames_2000_.hdf5').resize(1, 1, .1)
    templ = np.nanmean(m, 0)
    new_templ = templ * 1. / np.max(np.nanmean(templ, 0)) / 2
    new_templ[np.isnan(new_templ)] = np.nanmean(new_templ)
#    min_val = np.min(new_templ)
    new_templ = new_templ + 0.1

    #%%
    a, b = 256, 256
    n = 512
    r = 280

    y, x = np.ogrid[-a:n - a, -b:n - b]
    mask = x * x + y * y <= r * r

    array = np.zeros((n, n), dtype=np.float32)
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=False)
# %% LOAD MEMMAP FILE
# fname_new='Yr_d1_501_d2_398_d3_1_order_F_frames_369_.mmap'
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Y = np.reshape(Yr, dims + (T,), order='F')
m_images = cm.movie(images)
# %% correlation image
if m_images.shape[0] < 10000:
    Cn = m_images.local_correlations(
        swap_dim=params_movie['swap_dim'], frames_per_chunk=1500)
    Cn[np.isnan(Cn)] = 0
else:
    Cn = np.array(cm.load(('/'.join(params_movie['gtname'][0].split('/')[:-2] + [
                  'projections', 'correlation_image_better.tif'])))).squeeze()
pl.imshow(Cn, cmap='gray', vmax=.95)
# TODO: show screenshot 11
#%%
import cv2
if not '.mat' in params_movie['seed_name'][0]:
    roi_cons = np.load(params_movie['seed_name'][0])
else:
    roi_cons = scipy.io.loadmat(params_movie['seed_name'][0])['comps'].reshape(
        (dims[1], dims[0], -1), order='F').transpose([2, 1, 0]) * 1.

radius = np.int(np.median(np.sqrt(np.sum(roi_cons, (1, 2)) / np.pi)))

print(radius)
#roi_cons = caiman.base.rois.nf_read_roi_zip('/mnt/ceph/neuro/labeling/neurofinder.03.00.test/regions/ben_active_regions_nd_sonia_active_regions_nd__lindsey_active_regions_nd_matches.zip',dims)
#roi_cons = np.concatenate([roi_cons, caiman.base.rois.nf_read_roi_zip('/mnt/ceph/neuro/labeling/neurofinder.03.00.test/regions/intermediate_regions/ben_active_regions_nd_sonia_active_regions_nd__lindsey_active_regions_nd_1_mismatches.zip',dims)],0)
for fl in new_fls:
    if os.path.exists(fl[:-3] + 'npz'):
        print((fl[:-3] + 'npz'))
        with np.load(fl[:-3] + 'npz') as ld:
            xy_shifts.append(ld['shifts'])
    else:
        raise Exception('*********************** ERROR, FILE NOT EXISTING!!!')
#%%
resize_facts = (1, 1, .2)
name_new = cm.save_memmap_each(
    new_fls, dview=c[:], base_name=None, resize_fact=resize_facts, remove_init=0, xy_shifts=xy_shifts)
#%%
fname_new = cm.save_memmap_join(
    name_new, base_name='TOTAL_', n_chunks=6, dview=c[:])
#%%
m = cm.load('TOTAL__d1_512_d2_512_d3_1_order_C_frames_2300_.mmap', fr=6)
#%%
tmp = np.median(m, 0)
#%%
Cn = m.local_correlations(eight_neighbours=True, swap_dim=False)
pl.imshow(Cn, cmap='gray')
#%%
lq, hq = np.percentile(tmp, [10, 98])
pl.imshow(tmp, cmap='gray', vmin=lq, vmax=hq)
#%%
pl.imshow(tmp[10:160, 120:450], cmap='gray', vmin=lq, vmax=hq)
#%%
m1 = m[:, 10:160, 120:450]
m1.save('MOV_EXAMPLE_20160706154257.tif')
#%%
name_new = cm.save_memmap_each(
#%% play the movie
# playing the movie using opencv. It requires loading the movie in memory. To
# close the video press q

m_orig = cm.load_movie_chain(fname[:1])
downsample_ratio = 0.2
offset_mov = -np.min(m_orig[:100])
m_orig.resize(1, 1, downsample_ratio).play(
    gain=10, offset=offset_mov, fr=30, magnification=1)

#%% start a cluster for parallel processing
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=False)
#%%% MOTION CORRECTION
# first we create a motion correction object with the parameters specified
min_mov = cm.load(fname[0], subindices=range(200)).min()
# this will be subtracted from the movie to make it non-negative

mc = MotionCorrect(fname[0], min_mov,
                   dview=dview, max_shifts=max_shifts, niter_rig=niter_rig,
                   splits_rig=splits_rig,
                   strides=strides, overlaps=overlaps, splits_els=splits_els,
                   upsample_factor_grid=upsample_factor_grid,
                   max_deviation_rigid=max_deviation_rigid,
                   shifts_opencv=True, nonneg_movie=True)
# note that the file is not loaded in memory

#%% Run piecewise-rigid motion correction using NoRMCorre
mc.motion_correct_rigid(save_movie=True)
#%%
m_els = cm.load(mc.fname_tot_rig)
#%
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.base.rois import extract_binary_masks_blob
from caiman.behavior import behavior
from scipy.sparse import coo_matrix
from caiman.utils.utils import download_demo

#%%
fname = [u'demo_behavior.h5']
if fname[0] in ['demo_behavior.h5']:
    # TODO: todocument
    fname = [download_demo(fname[0])]
# TODO: todocument
m = cm.load(fname[0], is_behavior=True)

#%% load, rotate and eliminate useless pixels
m = m.transpose([0, 2, 1])
m = m[:, 150:, :]
#%% visualize movie
m.play()
#%% select interesting portion of the FOV (draw a polygon on the figure that pops up, when done press enter)
mask = np.array(behavior.select_roi(np.median(m[::100], 0), 1)[0], np.float32)
#%%
n_components = 4  # number of movement looked for
resize_fact = 0.5  # for computational efficiency movies are downsampled
# number of standard deviations above mean for the magnitude that are considered enough to measure the angle in polar coordinates
num_std_mag_for_angle = .6
only_magnitude = False  # if onlu interested in factorizing over the magnitude
method_factorization = 'dict_learn'  # could also use nmf
font = {'family': 'Myriad Pro',
        'weight': 'regular',
        'size': 10}
pl.rc('font', **font)

for folder_out in folders_out[:]:
    projection_img_median = folder_out + '/projections/median_projection.tif'
    projection_img_correlation = folder_out + '/projections/correlation_image.tif'
    folder_in = folder_out + '/regions'
    performance_all = dict()
    fls = list(glob.glob(folder_in + '/*_nd.zip'))
    consensus_counter = dict()
    fl1 = os.path.join(folder_in, 'joined_consensus_active_regions.zip')
    for fl2 in fls:
        print([fl1, fl2])
        Cn = cm.load(projection_img_correlation)
        shape = Cn.shape

        roi_1, names_1 = nf_read_roi_zip(fl1, shape, return_names=True)
        roi_2, names_2 = nf_read_roi_zip(fl2, shape, return_names=True)
    #    pl.figure()
    #    pl.imshow(np.sum(roi_1,0),cmap = 'gray',vmax=2,alpha=.5)
    #    pl.imshow(np.sum(roi_2,0),cmap = 'hot',vmax=2,alpha=.5)
        lab1, lab2 = fl1.split('/')[-1][:-4], fl2.split('/')[-1][:-4]
    #    pl.figure(figsize=(15,10))
        tp_gt, tp_comp, fn_gt, fp_comp, performance = cm.base.rois.nf_match_neurons_in_binary_masks(roi_1, roi_2, thresh_cost=.7, min_dist=10,
                                                                                                    print_assignment=False, plot_results=False, Cn=Cn, labels=[lab1, lab2])

        performance['tp_gt'] = tp_gt
        performance['tp_comp'] = tp_comp
        performance['fn_gt'] = fn_gt
import time
import pylab as pl
import psutil
import sys
from ipyparallel import Client
from skimage.external.tifffile import TiffFile
import scipy
#%%
from caiman.motion_correction import tile_and_correct, motion_correction_piecewise
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.base.rois import extract_binary_masks_blob

#%%
m = cm.load('example_movies/demoMovie.tif')

cm.concatenate([m.resize(1, 1, .2), m.resize(1, 1, .2)],
               axis=1).play(fr=20, gain=3., magnification=3)
#%% set parameters and create template by RIGID MOTION CORRECTION
# params_movie = {'fname':'example_movies/Sue_2x_3000_40_-46.tif',
#                'max_shifts':(6,6), # maximum allow rigid shift
#                'splits_rig':56, # for parallelization split the movies in  num_splits chuncks across time
#                'num_splits_to_process_rig':None, # if none all the splits are processed and the movie is saved
#                'strides': (48,48), # intervals at which patches are laid out for motion correction
#                'overlaps': (24,24), # overlap between pathes (size of patch strides+overlaps)
#                'splits_els':56, # for parallelization split the movies in  num_splits chuncks across time
#                'num_splits_to_process_els':[28,None], # if none all the splits are processed and the movie is saved
#                'upsample_factor_grid':4, # upsample factor to avoid smearing when merging patches
#                'max_deviation_rigid':3, #maximum deviation allowed for patch with respect to rigid shift
#                'p': 1, # order of the autoregressive system
Beispiel #54
0
def save_memmap(filenames, base_name='Yr', resize_fact=(1, 1, 1), remove_init=0, idx_xy=None, order='F',xy_shifts=None,is_3D=False,add_to_movie=0,border_to_0=0):

    """ Saves efficiently a list of tif files into a memory mappable file
    Parameters
    ----------
        filenames: list
            list of tif files
        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"    
        resize_fact: tuple
            x,y, and z downampling factors (0.5 means downsampled by a factor 2) 
        remove_init: int
            number of frames to remove at the begining of each tif file (used for resonant scanning images if laser in rutned on trial by trial)
        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance idx_xy=(slice(150,350,None),slice(150,350,None))
        order: string
            whether to save the file in 'C' or 'F' order     
        xy_shifts: list 
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping    

        is_3D: boolean
            whether it is 3D data
    Return
    -------
        fname_new: the name of the mapped file, the format is such that the name will contain the frame dimensions and the number of f

    """


    #TODO: can be done online    
    Ttot = 0
    for idx, f in enumerate(filenames):
        print(f)

        if is_3D:
            import tifffile                       
            print("Using tifffile library instead of skimage because of  3D")

            if idx_xy is None:
                Yr = tifffile.imread(f)[remove_init:]
            elif len(idx_xy) == 2:
                Yr = tifffile.imread(f)[remove_init:, idx_xy[0], idx_xy[1]]
            else:
                Yr = tifffile.imread(f)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]     

#        elif :
#            
#            if xy_shifts is not None:
#                raise Exception('Calblitz not installed, you cannot motion correct')
#                
#            if idx_xy is None:
#                Yr = imread(f)[remove_init:]
#            elif len(idx_xy) == 2:
#                Yr = imread(f)[remove_init:, idx_xy[0], idx_xy[1]]
#            else:
#                raise Exception('You need to set is_3D=True for 3D data)')                  

        else:

            Yr=cm.load(f,fr=1)            
            if xy_shifts is not None:
                Yr=Yr.apply_shifts(xy_shifts,interpolation='cubic',remove_blanks=False)

            if idx_xy is None:
                Yr = np.array(Yr)[remove_init:]
            elif len(idx_xy) == 2:
                Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1]]
            else:
                raise Exception('You need to set is_3D=True for 3D data)')
                Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

        if border_to_0>0:
            min_mov=np.min(Yr)
            Yr[:,:border_to_0,:]=min_mov
            Yr[:,:,:border_to_0]=min_mov
            Yr[:,:,-border_to_0:]=min_mov
            Yr[:,-border_to_0:,:]=min_mov

        fx, fy, fz = resize_fact
        if fx != 1 or fy != 1 or fz != 1:

            Yr = cm.movie(Yr, fr=1)
            Yr = Yr.resize(fx=fx, fy=fy, fz=fz)


        T, dims = Yr.shape[0], Yr.shape[1:]
        Yr = np.transpose(Yr, list(range(1, len(dims) + 1)) + [0])
        Yr = np.reshape(Yr, (np.prod(dims), T), order='F')

        if idx == 0:
            fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str(
                1 if len(dims) == 2 else dims[2]) + '_order_' + str(order)
            fname_tot = os.path.join(os.path.split(f)[0],fname_tot)         
            big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32,
                                shape=(np.prod(dims), T), order=order)
        else:
            big_mov = np.memmap(fname_tot, dtype=np.float32, mode='r+',
                                shape=(np.prod(dims), Ttot + T), order=order)
        #    np.save(fname[:-3]+'npy',np.asarray(Yr))

        big_mov[:, Ttot:Ttot + T] = np.asarray(Yr, dtype=np.float32) + 1e-10 + add_to_movie
        big_mov.flush()
        del big_mov
        Ttot = Ttot + T

    fname_new = fname_tot + '_frames_' + str(Ttot) + '_.mmap'
    os.rename(fname_tot, fname_new)

    return fname_new
        cm.start_server(slurm_script=slurm_script)
        pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
        c = Client(ipython_dir=pdir, profile=profile)
    else:
        cm.stop_server()
        cm.start_server()
        c = Client()

    print(('Using ' + str(len(c)) + ' processes'))
    dview = c[:len(c)]
#%% set parameters and create template by rigid motion correction
fname = 'k56_20160608_RSM_125um_41mW_zoom2p2_00001_00034.tif'
#fname = 'M_FLUO_t.tif'
#fname = 'M_FLUO_4.tif'
max_shifts = (12,12)
m = cm.load(fname,subindices=slice(None,None,None))
template = cm.motion_correction.bin_median( m[100:500].copy().motion_correct(max_shifts[0],max_shifts[1],template=None)[0])
pl.imshow(template)
#%%
splits = 28 # for parallelization split the movies in  num_splits chuncks across time
new_templ = template
add_to_movie=-np.min(template)
save_movie = False
num_iter = 3 
for iter_ in range(num_iter):
    print(iter_)
    old_templ = new_templ.copy()
    if iter_ == num_iter-1:
        save_movie = True
        print('saving!')
#        templ_to_save = old_templ
#%%
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=True)
#%%
is_patches = True
is_dendrites = True

if is_dendrites == True:
    # THIS METHOd CAN GIVE POSSIBLY INCONSISTENT RESULTS ON SOMAS WHEN NOT USED WITH PATCHES
    init_method = 'local_nmf'
    alpha_snmf = None  # this controls sparsity
else:
    init_method = 'greedy_roi'
    alpha_snmf = None  # 10e2  # this controls sparsity
#%%
m = cm.load('quietBlock.h5_at')
(m - np.min(m, 0)).save('example_movies/quiet_block_1000.tif')
#%% FOR LOADING ALL TIFF FILES IN A FILE AND SAVING THEM ON A SINGLE MEMORY MAPPABLE FILE
fnames = []
base_folder = './example_movies/'  # folder containing the demo files
for file in glob.glob(os.path.join(base_folder, '*.tif')):
    if file.endswith("1000.tif"):
        fnames.append(os.path.abspath(file))
fnames.sort()
if len(fnames) == 0:
    raise Exception("Could not find any file")

print(fnames)
fnames = fnames
#%%
# idx_x=slice(12,500,None)
Beispiel #57
0
overlaps = (32, 32)         # overlap between pathes (size of patch strides+overlaps)
splits_els = 50             # for parallelization split the movies in  num_splits chuncks across time
upsample_factor_grid = 50    # upsample factor to avoid smearing when merging patches
max_deviation_rigid = 6     # maximum deviation allowed for patch with respect to rigid shifts
dview = Client #need to add the ipyparallel object??

#%% select video 
fname = os.path.join(os.getcwd(),'Substackmini.tif')  # filename to be processed

#%% start a cluster for parallel processing
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=False)

#%%% MOTION CORRECTION
# first we create a motion correction object with the parameters specified
min_mov = cm.load(fname[0], subindices=range(200)).min() 
        # this will be subtracted from the movie to make it non-negative 

mc = MotionCorrect(fname[0], min_mov,
                   dview=dview, max_shifts=max_shifts, niter_rig=niter_rig,
                   splits_rig=splits_rig, 
                   strides= strides, overlaps= overlaps, splits_els=splits_els,
                   upsample_factor_grid=upsample_factor_grid,
                   max_deviation_rigid=max_deviation_rigid, 
                   shifts_opencv = True, nonneg_movie = True)
# note that the file is not loaded in memory




Beispiel #58
0
#%%
# params_movie = {'fname':'/Users/agiovann/Dropbox (Simons Foundation)/CaImWorkshop2017/E.Schut/ES_CA2_crop_1p.tif',
#                'p': 1, # order of the autoregressive system
#                'merge_thresh' : 0.8,  # merging threshold, max correlation allowed
#                'rf' : 30,  # half-size of the patches in pixels. rf=25, patches are 50x50
#                'stride_cnmf' : 15,  # amounpl.it of overlap between the patches in pixels
#                'K' : 4,  #  number of components per patch
#                'is_dendrites': False,  # if dendritic. In this case you need to set init_method to sparse_nmf
#                'init_method' : 'greedy_roi',
#                'gSig' : [10, 10],  # expected half size of neurons
#                'alpha_snmf' : None,  # this controls sparsity
#                'final_frate' : 30
#                }
#%%
m_orig = cm.load(params_movie['fname'])
#%% start local cluster
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=False)
#%% RIGID MOTION CORRECTION
t1 = time.time()
fname = params_movie['fname']
max_shifts = params_movie['max_shifts']  # maximum allowed shifts
num_iter = params_movie['niter_rig']  # number of times the algorithm is run
# for parallelization split the movies in  num_splits chuncks across time
splits = params_movie['splits_rig']
# if none all the splits are processed and the movie is saved
num_splits_to_process = params_movie['num_splits_to_process_rig']
shifts_opencv = True  # apply shifts fast way (but smoothing results)
save_movie_rigid = True  # save the movies vs just get the template
t1 = time.time()
downsample_ratio = params_display['downsample_ratio']
offset_mov = -np.min(m_orig[:100])
m_orig.resize(1, 1, downsample_ratio).play(
    gain=10, offset=offset_mov, fr=30, magnification=2)

# %% RUN ANALYSIS
c, dview, n_processes = cm.cluster.setup_cluster(
    backend='local', n_processes=None, single_thread=False)

# %% INITIALIZING
t1 = time.time()
# movie must be mostly positive for this to work
# TODO : document
# setting timer to see how the changement in functions make the code react on a same computer.

min_mov = cm.load(fname[0], subindices=range(400)).min()
mc_list = []
new_templ = None
for each_file in fname:
    # TODO: needinfo how the classes works
    mc = MotionCorrect(each_file, min_mov,
                       dview=dview, max_shifts=max_shifts, niter_rig=niter_rig, splits_rig=splits_rig,
                       num_splits_to_process_rig=num_splits_to_process_rig,
                       strides=strides, overlaps=overlaps, splits_els=splits_els,
                       num_splits_to_process_els=num_splits_to_process_els,
                       upsample_factor_grid=upsample_factor_grid, max_deviation_rigid=max_deviation_rigid,
                       shifts_opencv=True, nonneg_movie=True)
    mc.motion_correct_rigid(save_movie=True, template=new_templ)
    new_templ = mc.total_template_rig
    m_rig = cm.load(mc.fname_tot_rig)
    bord_px_rig = np.ceil(np.max(mc.shifts_rig)).astype(np.int)
               '/mnt/ceph/neuro/labeling/neurofinder.04.00.test']

foldernames = ['/mnt/ceph/neuro/labeling/FINAL_NO_USED_FOR_CONSENSUS/neurofinder.01.01',
               '/mnt/ceph/neuro/labeling/FINAL_NO_USED_FOR_CONSENSUS/packer.001',
               '/mnt/ceph/neuro/labeling/FINAL_NO_USED_FOR_CONSENSUS/Yi.data.001',
               '/mnt/ceph/neuro/labeling/FINAL_NO_USED_FOR_CONSENSUS/yuste.Single_150u',
               '/mnt/ceph/neuro/labeling/FINAL_NO_USED_FOR_CONSENSUS/Jan-AMG1_exp2_new_001']
#%%

import glob
from caiman.base.rois import detect_duplicates, nf_merge_roi_zip, nf_read_roi_zip
from shutil import copyfile
#%%
for fldname in foldernames:
    current_folder = os.path.join(
        '/mnt/ceph/neuro/labeling', fldname, 'regions')
    img_shape = cm.load(os.path.join('/mnt/ceph/neuro/labeling',
                                     fldname, 'projections/correlation_image.tif')).shape
    filenames = glob.glob(os.path.join(current_folder, '*active*regions.zip'))
    for flname in filenames:
        ind_dup, ind_keep = detect_duplicates(flname, 0.25, FOV=img_shape)
        rois = nf_read_roi_zip(flname, img_shape)
        new_fname = flname[:-4] + '_nd.zip'
        print(flname)
        if not ind_dup:
            copyfile(flname, new_fname)
        else:
            nf_merge_roi_zip([flname], [ind_dup], flname[:-4] + '_copy')
            nf_merge_roi_zip([flname], [ind_keep], new_fname[:-4])
            print('FOUND!!')