Example #1
0
def see(filename=None):
    """shows you the important data about a certain test file ( just give the number or name)

        if you give nothing it will give you back the groundtruth infos

        Parameters:
        -----------
        self:  dictionnary
           the object of this class tha tcontains every value
        filename:
            ( just give the number or name)

        See Also:
        ---------
        @image html caiman/tests/comparison/data.pdf

            """

    if filename == None:
        dr = os.path.join(caiman_datadir(), "testdata", "groundtruth.npz")
    else:
        dr = os.path.join(caiman_datadir(), "testdata", filename,
                          filename + ".npz")
        print(dr)
    with np.load(dr) as dt:
        print('here is the info :\n')
        see_it(dt)
def evaluate_components_CNN(A, dims, gSig, model_name=os.path.join(caiman_datadir(), 'model', 'cnn_model'), patch_size=50, loaded_model=None, isGPU=False):
    """ evaluate component quality using a CNN network

    """

    import os
    if not isGPU:

        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

#    try:
    os.environ["KERAS_BACKEND"] = "tensorflow"
    from keras.models import model_from_json
#    except:
#        print('PROBLEM LOADING KERAS: cannot use classifier')


    if loaded_model is None:
        if os.path.isfile(os.path.join(caiman_datadir(), model_name + ".json")):
            model_file    = os.path.join(caiman_datadir(), model_name + ".json")
            model_weights = os.path.join(caiman_datadir(), model_name + ".h5")
        elif os.path.isfile(model_name + ".json"):
            model_file    = model_name + ".json"
            model_weights = model_name + ".h5"
        else:
            raise FileNotFoundError("File for requested model {} not found".format(model_name))
        with open(model_file, 'r') as json_file:
            loaded_model_json = json_file.read()

        loaded_model = model_from_json(loaded_model_json)
        loaded_model.load_weights(model_name + '.h5')
        loaded_model.compile('sgd', 'mse')
        print("Loaded model from disk")
    half_crop = np.minimum(
        gSig[0] * 4 + 1, patch_size), np.minimum(gSig[1] * 4 + 1, patch_size)
    dims = np.array(dims)
    coms = [scipy.ndimage.center_of_mass(
        mm.toarray().reshape(dims, order='F')) for mm in A.tocsc().T]
    coms = np.maximum(coms, half_crop)
    coms = np.array([np.minimum(cms, dims - half_crop)
                     for cms in coms]).astype(np.int)
    crop_imgs = [mm.toarray().reshape(dims, order='F')[com[0] - half_crop[0]:com[0] + half_crop[0],
                                                       com[1] - half_crop[1]:com[1] + half_crop[1]] for mm, com in zip(A.tocsc().T, coms)]
    final_crops = np.array([cv2.resize(
        im / np.linalg.norm(im), (patch_size, patch_size)) for im in crop_imgs])
    predictions = loaded_model.predict(
        final_crops[:, :, :, np.newaxis], batch_size=32, verbose=1)

    return predictions, final_crops
def evaluate_components_CNN(A, dims, gSig, model_name=os.path.join(caiman_datadir(), 'model', 'cnn_model'), patch_size=50, loaded_model=None, isGPU=False):
    """ evaluate component quality using a CNN network

    """

    import os
    if not isGPU:

        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

#    try:
    os.environ["KERAS_BACKEND"] = "tensorflow"
    from keras.models import model_from_json
#    except:
#        print('PROBLEM LOADING KERAS: cannot use classifier')


    if loaded_model is None:
        if os.path.isfile(os.path.join(caiman_datadir(), model_name + ".json")):
            model_file    = os.path.join(caiman_datadir(), model_name + ".json")
            model_weights = os.path.join(caiman_datadir(), model_name + ".h5")
        elif os.path.isfile(model_name + ".json"):
            model_file    = model_name + ".json"
            model_weights = model_name + ".h5"
        else:
            raise FileNotFoundError("File for requested model {} not found".format(model_name))
        with open(model_file, 'r') as json_file:
            loaded_model_json = json_file.read()

        loaded_model = model_from_json(loaded_model_json)
        loaded_model.load_weights(model_name + '.h5')
        loaded_model.compile('sgd', 'mse')
        print("Loaded model from disk")
    half_crop = np.minimum(
        gSig[0] * 4 + 1, patch_size), np.minimum(gSig[1] * 4 + 1, patch_size)
    dims = np.array(dims)
    coms = [scipy.ndimage.center_of_mass(
        mm.toarray().reshape(dims, order='F')) for mm in A.tocsc().T]
    coms = np.maximum(coms, half_crop)
    coms = np.array([np.minimum(cms, dims - half_crop)
                     for cms in coms]).astype(np.int)
    crop_imgs = [mm.toarray().reshape(dims, order='F')[com[0] - half_crop[0]:com[0] + half_crop[0],
                                                       com[1] - half_crop[1]:com[1] + half_crop[1]] for mm, com in zip(A.tocsc().T, coms)]
    final_crops = np.array([cv2.resize(
        im / np.linalg.norm(im), (patch_size, patch_size)) for im in crop_imgs])
    predictions = loaded_model.predict(
        final_crops[:, :, :, np.newaxis], batch_size=32, verbose=1)

    return predictions, final_crops
Example #4
0
def handle_args():
    global sourcedir_base
    parser = argparse.ArgumentParser(
        description="Tool to manage Caiman data directory")
    parser.add_argument("command",
                        help="Subcommand to run. install/check/test/demotest")
    parser.add_argument(
        "--inplace",
        action='store_true',
        help=
        "Use only if you did an inplace install of caiman rather than a pure one"
    )
    parser.add_argument(
        "--force",
        action='store_true',
        help=
        "In installs, overwrite parts of an old caiman dir that changed upstream"
    )
    cfg = parser.parse_args()
    if cfg.inplace:
        # In this configuration, the user did a "pip install -e ." and so the share directory was not made.
        # We assume the user is running caimanmanager right out of the source tree, and still want to try to
        # copy the correct files out, which is a little tricky because we never kept track of that before.
        sourcedir_base = os.getcwd()
    cfg.userdir = caiman_datadir()
    return cfg
Example #5
0
def test_tf():
    os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    try:
        model_name = os.path.join(caiman_datadir(), 'model', 'cnn_model')
        if use_keras:
            model_file = model_name + ".json"
            with open(model_file, 'r') as json_file:
                print('USING MODEL:' + model_file)
                loaded_model_json = json_file.read()

            loaded_model = model_from_json(loaded_model_json)
            loaded_model.load_weights(model_name + '.h5')
            loaded_model.compile('sgd', 'mse')
        else:
            model_file = model_name + ".h5.pb"
            loaded_model = load_graph(model_file)
    except:
        raise Exception('NN model could not be loaded. use_keras = ' +
                        str(use_keras))

    A = np.random.randn(10, 50, 50, 1)
    try:
        if use_keras:
            predictions = loaded_model.predict(A, batch_size=32)
        else:
            tf_in = loaded_model.get_tensor_by_name('prefix/conv2d_20_input:0')
            tf_out = loaded_model.get_tensor_by_name('prefix/output_node0:0')
            with tf.Session(graph=loaded_model) as sess:
                predictions = sess.run(tf_out, feed_dict={tf_in: A})
        pass
    except:
        raise Exception('NN model could not be deployed. use_keras = ' +
                        str(use_keras))
Example #6
0
def main():
    fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]

    movie = cm.load(fnames)
    movie = movie.astype(np.float)

    # makes estimation numerically better:
    movie -= movie.mean()

    # use one every 200 frames
    temporal_stride = 200
    # use one every 8 patches (patches are 8x8 by default)
    spatial_stride = 8

    movie_train = movie[::temporal_stride]

    t = timeit.default_timer()
    estimation_res = est.estimate_vst_movie(movie_train, stride=spatial_stride)
    print('\tTime', timeit.default_timer() - t)

    alpha = estimation_res.alpha
    sigma_sq = estimation_res.sigma_sq

    movie_gat = compute_gat(movie, sigma_sq, alpha=alpha)
    # save movie_gat here
    movie_gat_inv = compute_inverse_gat(movie_gat, sigma_sq, alpha=alpha,
                                        method='asym')
    # save movie_gat_inv here
    return movie, movie_gat_inv
Example #7
0
def download_demo(name='Sue_2x_3000_40_-46.tif', save_folder=''):
    """download a file from the file list with the url of its location


    using urllib, you can add you own name and location in this global parameter

        Parameters:
        -----------

        name: str
            the path of the file correspondong to a file in the filelist (''Sue_2x_3000_40_-46.tif' or 'demoMovieJ.tif')

        save_folder: str
            folder inside ./example_movies to which the files will be saved. Will be created if it doesn't exist

    Raise:
    ---------
        WrongFolder Exception


    """

    #\bug
    #\warning

    file_dict = {
        'Sue_2x_3000_40_-46.tif':
        'https://www.dropbox.com/s/09z974vkeg3t5gn/Sue_2x_3000_40_-46.tif?dl=1',
        'demoMovieJ.tif':
        'https://www.dropbox.com/s/8j1cnqubye3asmu/demoMovieJ.tif?dl=1',
        'demo_behavior.h5':
        'https://www.dropbox.com/s/53jmhc9sok35o82/movie_behavior.h5?dl=1',
        'Tolias_mesoscope_1.hdf5':
        'https://www.dropbox.com/s/t1yt35u0x72py6r/Tolias_mesoscope_1.hdf5?dl=1',
        'Tolias_mesoscope_2.hdf5':
        'https://www.dropbox.com/s/i233b485uxq8wn6/Tolias_mesoscope_2.hdf5?dl=1',
        'Tolias_mesoscope_3.hdf5':
        'https://www.dropbox.com/s/4fxiqnbg8fovnzt/Tolias_mesoscope_3.hdf5?dl=1',
        'data_endoscope.tif':
        'https://www.dropbox.com/s/dcwgwqiwpaz4qgc/data_endoscope.tif?dl=1'
    }
    #          ,['./example_movies/demoMovie.tif','https://www.dropbox.com/s/obmtq7305ug4dh7/demoMovie.tif?dl=1']]
    base_folder = os.path.join(caiman_datadir(), 'example_movies')
    if os.path.exists(base_folder):
        if not os.path.isdir(os.path.join(base_folder, save_folder)):
            os.makedirs(os.path.join(base_folder, save_folder))
        path_movie = os.path.join(base_folder, save_folder, name)
        if not os.path.exists(path_movie):
            url = file_dict[name]
            print("downloading " + name + "with urllib")
            f = urlopen(url)
            data = f.read()
            with open(path_movie, "wb") as code:
                code.write(data)
        else:
            print("File already downloaded")
    else:
        raise Exception('You are in ' + os.getcwd() +
                        ' and must be in caiman folder')
    return path_movie
Example #8
0
def demo(parallel=False):

    p = 2  # order of the AR model (in general 1 or 2)
    if parallel:
        c, dview, n_processes = cm.cluster.setup_cluster(
            backend='local', n_processes=None, single_thread=False)
    else:
        n_processes, dview = 2, None

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap([os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')],
                                base_name='Yr',
                                order = 'C')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes, method_init='greedy_roi', k=30, gSig=[4, 4], merge_thresh=.8,
                    p=p, dview=dview, Ain=None, method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        cm.cluster.stop_server(dview=dview)

    # verifying the spatial components
    npt.assert_allclose(cnm.A.sum(), 281.1, 1e-2)
    # verifying the temporal components
    npt.assert_allclose(cnm.C.sum(), 66271668, 1e-2)
    try:
        dview.terminate()
    except:
        pass
Example #9
0
def main():
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]

    movie = cm.load(fnames)
    movie = movie.astype(np.float)

    # makes estimation numerically better:
    movie -= movie.mean()

    # use one every 200 frames
    temporal_stride = 200
    # use one every 8 patches (patches are 8x8 by default)
    spatial_stride = 8

    movie_train = movie[::temporal_stride]

    t = timeit.default_timer()
    estimation_res = est.estimate_vst_movie(movie_train, stride=spatial_stride)
    print('\tTime', timeit.default_timer() - t)

    alpha = estimation_res.alpha
    sigma_sq = estimation_res.sigma_sq

    movie_gat = compute_gat(movie, sigma_sq, alpha=alpha)
    # save movie_gat here
    movie_gat_inv = compute_inverse_gat(movie_gat,
                                        sigma_sq,
                                        alpha=alpha,
                                        method='asym')
    # save movie_gat_inv here
    return movie, movie_gat_inv
Example #10
0
def download_demo(name:str='Sue_2x_3000_40_-46.tif', save_folder:str='') -> str:
    """download a file from the file list with the url of its location


    using urllib, you can add you own name and location in this global parameter

        Args:
            name: str
                the path of the file correspondong to a file in the filelist (''Sue_2x_3000_40_-46.tif' or 'demoMovieJ.tif')
    
            save_folder: str
                folder inside ./example_movies to which the files will be saved. Will be created if it doesn't exist
        Returns:
            Path of the saved file
    Raise:
        WrongFolder Exception
    """

    #\bug
    #\warning

    file_dict = {'Sue_2x_3000_40_-46.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Sue_2x_3000_40_-46.tif',
                 'demoMovieJ.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/demoMovieJ.tif',
                 'demo_behavior.h5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/demo_behavior.h5',
                 'Tolias_mesoscope_1.hdf5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_1.hdf5',
                 'Tolias_mesoscope_2.hdf5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_2.hdf5',
                 'Tolias_mesoscope_3.hdf5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_3.hdf5',
                 'data_endoscope.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/data_endoscope.tif',
                 'gmc_960_30mw_00001_red.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/gmc_960_30mw_00001_red.tif',
                 'gmc_960_30mw_00001_green.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/gmc_960_30mw_00001_green.tif',
                 'msCam13.avi': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/msCam13.avi',
                 'alignment.pickle': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/alignment.pickle',
                 'data_dendritic.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/2014-04-05-003.tif'}
    #          ,['./example_movies/demoMovie.tif','https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/demoMovie.tif']]
    base_folder = os.path.join(caiman_datadir(), 'example_movies')
    if os.path.exists(base_folder):
        if not os.path.isdir(os.path.join(base_folder, save_folder)):
            os.makedirs(os.path.join(base_folder, save_folder))
        path_movie = os.path.join(base_folder, save_folder, name)
        if not os.path.exists(path_movie):
            url = file_dict[name]
            logging.info(f"downloading {name} with urllib")
            logging.info(f"GET {url} HTTP/1.1")
            try:
                f = urlopen(url)
            except:
                logging.info(f"Trying to set user agent to download demo")
                from urllib.request import Request
                req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
                f = urlopen(req)
                
                
            data = f.read()
            with open(path_movie, "wb") as code:
                code.write(data)
        else:
            logging.info("File " + str(name) + " already downloaded")
    else:
        raise Exception('Cannot find the example_movies folder in your caiman_datadir - did you make one with caimanmanager.py?')
    return path_movie
Example #11
0
def do_run_demotests(targdir: str) -> None:
    out, err, ret = runcmd([os.path.join(caiman_datadir(), "test_demos.sh")])
    if ret != 0:
        print("Demos failed with return code " + str(ret))
        sys.exit(ret)
    else:
        print("Demos success!")
Example #12
0
    def mrcnn():
        summary_image = np.load(
            '/home/nel/data/voltage_data/volpy_paper/memory/summary.npz'
        )['arr_0']
        config = neurons.NeuronsConfig()

        class InferenceConfig(config.__class__):
            # Run detection on one image at a time
            GPU_COUNT = 1
            IMAGES_PER_GPU = 1
            DETECTION_MIN_CONFIDENCE = 0.7
            IMAGE_RESIZE_MODE = "pad64"
            IMAGE_MAX_DIM = 512
            RPN_NMS_THRESHOLD = 0.7
            POST_NMS_ROIS_INFERENCE = 1000

        config = InferenceConfig()
        config.display()
        model_dir = os.path.join(caiman_datadir(), 'model')
        DEVICE = "/cpu:0"  # /cpu:0 or /gpu:0

        with tf.device(DEVICE):
            model = modellib.MaskRCNN(mode="inference",
                                      model_dir=model_dir,
                                      config=config)

        weights_path = download_model('mask_rcnn')
        model.load_weights(weights_path, by_name=True)

        results = model.detect([summary_image], verbose=1)
        r = results[0]
        ROIs = r['masks'].transpose([2, 0, 1])
        return ROIs
Example #13
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

# %% load data

    fname = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]

# %% set up some parameters

    fr = 10  # frame rate (Hz)
    decay_time = .75  # approximate length of transient event in seconds
    gSig = [6, 6]  # expected half size of neurons
    p = 1  # order of AR indicator dynamics
    min_SNR = 1  # minimum SNR for accepting candidate components
    thresh_CNN_noisy = 0.65  # CNN threshold for candidate components
    gnb = 2  # number of background components
    init_method = 'cnmf'  # initialization method

    # set up CNMF initialization parameters

    init_batch = 400  # number of frames for initialization
    patch_size = 32  # size of patch
    stride = 3  # amount of overlap between patches
    K = 4  # max number of components in each patch

    params_dict = {'fr': fr,
                   'fnames': fname,
                   'decay_time': decay_time,
                   'gSig': gSig,
                   'p': p,
                   'min_SNR': min_SNR,
                   'nb': gnb,
                   'init_batch': init_batch,
                   'init_method': init_method,
                   'rf': patch_size//2,
                   'stride': stride,
                   'sniper_mode': True,
                   'thresh_CNN_noisy': thresh_CNN_noisy,
                   'K': K}
    opts = cnmf.params.CNMFParams(params_dict=params_dict)
# %% fit with online object
    cnm = cnmf.online_cnmf.OnACID(params=opts)
    cnm.fit_online()

# %% plot contours

    logging.info('Number of components:' + str(cnm.estimates.A.shape[-1]))
    Cn = cm.load(fname[0], subindices=slice(0,500)).local_correlations(swap_dim=False)
    cnm.estimates.plot_contours(img=Cn)

# %% pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes)
    use_CNN = True
    if use_CNN:
        # threshold for CNN classifier
        opts.set('quality', {'min_cnn_thr': 0.05})
        cnm.estimates.evaluate_components_CNN(opts)
        cnm.estimates.plot_contours(img=Cn, idx=cnm.estimates.idx_components)
# %% plot results
    cnm.estimates.view_components(img=Cn, idx=cnm.estimates.idx_components)
Example #14
0
def get_run_logdir():
    """ Returns the path to the directory where the model will be saved.
    The directory will be locates inside the caiman_data/my_logs.
    """
    root_logdir = os.path.join(caiman_datadir(), "my_logs")
    if not os.path.exists(root_logdir):
        os.mkdir(root_logdir)
    run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
    return os.path.join(root_logdir, run_id)
Example #15
0
def download_demo(name='Sue_2x_3000_40_-46.tif', save_folder=''):
    """download a file from the file list with the url of its location


    using urllib, you can add you own name and location in this global parameter

        Args:
            name: str
                the path of the file correspondong to a file in the filelist (''Sue_2x_3000_40_-46.tif' or 'demoMovieJ.tif')
    
            save_folder: str
                folder inside ./example_movies to which the files will be saved. Will be created if it doesn't exist

    Raise:
        WrongFolder Exception
    """

    #\bug
    #\warning

    file_dict = {
        'Sue_2x_3000_40_-46.tif':
        'https://users.flatironinstitute.org/~neuro/caiman_downloadables/Sue_2x_3000_40_-46.tif',
        'demoMovieJ.tif':
        'https://users.flatironinstitute.org/~neuro/caiman_downloadables/demoMovieJ.tif',
        'demo_behavior.h5':
        'https://users.flatironinstitute.org/~neuro/caiman_downloadables/demo_behavior.h5',
        'Tolias_mesoscope_1.hdf5':
        'https://users.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_1.hdf5',
        'Tolias_mesoscope_2.hdf5':
        'https://users.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_2.hdf5',
        'Tolias_mesoscope_3.hdf5':
        'https://users.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_3.hdf5',
        'data_endoscope.tif':
        'https://users.flatironinstitute.org/~neuro/caiman_downloadables/data_endoscope.tif'
    }
    #          ,['./example_movies/demoMovie.tif','https://users.flatironinstitute.org/~neuro/caiman_downloadables/demoMovie.tif']]
    base_folder = os.path.join(caiman_datadir(), 'example_movies')
    if os.path.exists(base_folder):
        if not os.path.isdir(os.path.join(base_folder, save_folder)):
            os.makedirs(os.path.join(base_folder, save_folder))
        path_movie = os.path.join(base_folder, save_folder, name)
        if not os.path.exists(path_movie):
            url = file_dict[name]
            logging.info("downloading " + str(name) + " with urllib")
            f = urlopen(url)
            data = f.read()
            with open(path_movie, "wb") as code:
                code.write(data)
        else:
            logging.info("File " + str(name) + " already downloaded")
    else:
        raise Exception(
            'Cannot find the example_movies folder in your caiman_datadir - did you make one with caimanmanager.py?'
        )
    return path_movie
Example #16
0
def mrcnn_inference(img, weights_path, display_result=True):
    """ Mask R-CNN inference in VolPy
    Args: 
        img: 2-D array
            summary images for detection
            
        weights_path: str
            path for Mask R-CNN weight
            
        display_result: boolean
            if True, the function will plot the result of inference
        
    Return:
        ROIs: 3-D array
            region of interests 
            (# of components * # of pixels in x dim * # of pixels in y dim)
    """
    from caiman.source_extraction.volpy.mrcnn import visualize, neurons
    import caiman.source_extraction.volpy.mrcnn.model as modellib
    config = neurons.NeuronsConfig()

    class InferenceConfig(config.__class__):
        # Run detection on one img at a time
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1
        DETECTION_MIN_CONFIDENCE = 0.7
        IMAGE_RESIZE_MODE = "pad64"
        IMAGE_MAX_DIM = 512
        RPN_NMS_THRESHOLD = 0.7
        POST_NMS_ROIS_INFERENCE = 1000

    config = InferenceConfig()
    config.display()
    model_dir = os.path.join(caiman_datadir(), 'model')
    DEVICE = "/cpu:0"  # /cpu:0 or /gpu:0
    with tf.device(DEVICE):
        model = modellib.MaskRCNN(mode="inference",
                                  model_dir=model_dir,
                                  config=config)
    model.load_weights(weights_path, by_name=True)
    results = model.detect([img], verbose=1)
    r = results[0]
    ROIs = r['masks'].transpose([2, 0, 1])

    if display_result:
        _, ax = plt.subplots(1, 1, figsize=(16, 16))
        visualize.display_instances(img,
                                    r['rois'],
                                    r['masks'],
                                    r['class_ids'], ['BG', 'neurons'],
                                    r['scores'],
                                    ax=ax,
                                    title="Predictions")
    return ROIs
Example #17
0
def download_model(name: str = 'mask_rcnn', save_folder: str = '') -> str:
    """download a NN model from the file list with the url of its location


    using urllib, you can add you own name and location in this global parameter

        Args:
            name: str
                the path of the file correspondong to a file in the filelist
    
            save_folder: str
                folder inside caiman_data/model to which the files will be saved. Will be created if it doesn't exist
        Returns:
            Path of the saved file
    Raise:
        WrongFolder Exception
    """

    #\bug
    #\warning

    file_dict = {
        'mask_rcnn':
        'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/model/mask_rcnn_neurons_0040.h5'
    }
    base_folder = os.path.join(caiman_datadir(), 'model')
    if os.path.exists(base_folder):
        if not os.path.isdir(os.path.join(base_folder, save_folder)):
            os.makedirs(os.path.join(base_folder, save_folder))
        path_movie = os.path.join(base_folder, save_folder, name)
        if not os.path.exists(path_movie):
            url = file_dict[name]
            logging.info(f"downloading {name} with urllib")
            logging.info(f"GET {url} HTTP/1.1")
            try:
                f = urlopen(url)
            except:
                logging.info(f"Trying to set user agent to download demo")
                from urllib.request import Request
                req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
                f = urlopen(req)

            data = f.read()
            with open(path_movie, "wb") as code:
                code.write(data)
        else:
            logging.info("File " + str(name) + " already downloaded")
    else:
        raise Exception(
            'Cannot find the model folder in your caiman_datadir - did you make one with caimanmanager.py?'
        )
    return path_movie
Example #18
0
def download_demo(name='Sue_2x_3000_40_-46.tif', save_folder=''):
    """download a file from the file list with the url of its location


    using urllib, you can add you own name and location in this global parameter

        Parameters:
        -----------

        name: str
            the path of the file correspondong to a file in the filelist (''Sue_2x_3000_40_-46.tif' or 'demoMovieJ.tif')

        save_folder: str
            folder inside ./example_movies to which the files will be saved. Will be created if it doesn't exist

    Raise:
    ---------
        WrongFolder Exception


    """

    #\bug
    #\warning

    file_dict = {'Sue_2x_3000_40_-46.tif': 'https://www.dropbox.com/s/09z974vkeg3t5gn/Sue_2x_3000_40_-46.tif?dl=1',
                 'demoMovieJ.tif': 'https://www.dropbox.com/s/8j1cnqubye3asmu/demoMovieJ.tif?dl=1',
                 'demo_behavior.h5': 'https://www.dropbox.com/s/53jmhc9sok35o82/movie_behavior.h5?dl=1',
                 'Tolias_mesoscope_1.hdf5': 'https://www.dropbox.com/s/t1yt35u0x72py6r/Tolias_mesoscope_1.hdf5?dl=1',
                 'Tolias_mesoscope_2.hdf5': 'https://www.dropbox.com/s/i233b485uxq8wn6/Tolias_mesoscope_2.hdf5?dl=1',
                 'Tolias_mesoscope_3.hdf5': 'https://www.dropbox.com/s/4fxiqnbg8fovnzt/Tolias_mesoscope_3.hdf5?dl=1',
                 'data_endoscope.tif': 'https://www.dropbox.com/s/dcwgwqiwpaz4qgc/data_endoscope.tif?dl=1'}
    #          ,['./example_movies/demoMovie.tif','https://www.dropbox.com/s/obmtq7305ug4dh7/demoMovie.tif?dl=1']]
    base_folder = os.path.join(caiman_datadir(), 'example_movies')
    if os.path.exists(base_folder):
        if not os.path.isdir(os.path.join(base_folder, save_folder)):
            os.makedirs(os.path.join(base_folder, save_folder))
        path_movie = os.path.join(base_folder, save_folder, name)
        if not os.path.exists(path_movie):
            url = file_dict[name]
            print("downloading " + name + "with urllib")
            f = urlopen(url)
            data = f.read()
            with open(path_movie, "wb") as code:
                code.write(data)
        else:
            print("File already downloaded")
    else:
        raise Exception('Cannot find the example_movies folder in your caiman_datadir - did you make one with caimanmanager.py?')
    return path_movie
Example #19
0
def get_caiman_version() -> Tuple[str, str]:
    """ Get the version of CaImAn, as best we can determine"""
    # This does its best to determine the version of CaImAn. This uses the first successful
    # from these methods:
    # 'GITW' ) git rev-parse if caiman is built from "pip install -e ." and we are working
    #    out of the checkout directory (the user may have since updated without reinstall)
    # 'RELF') A release file left in the process to cut a release. Should have a single line
    #    in it whick looks like "Version:1.4"
    # 'FILE') The date of some frequently changing files, which act as a very rough
    #    approximation when no other methods are possible
    #
    # Data is returned as a tuple of method and version, with method being the 4-letter string above
    # and version being a format-dependent string

    # Attempt 'GITW'.
    # TODO:
    # A) Find a place to do it that's better than cwd
    # B) Hide the output from the terminal
    try:
        rev = subprocess.check_output(
            ["git", "rev-parse", "HEAD"],
            stderr=subprocess.DEVNULL).decode("utf-8").split("\n")[0]
    except:
        rev = None
    if rev is not None:
        return 'GITW', rev

    # Attempt: 'RELF'
    relfile = os.path.join(caiman_datadir(), 'RELEASE')
    if os.path.isfile(relfile):
        with open(relfile, 'r') as sfh:
            for line in sfh:
                if ':' in line:  # expect a line like "Version:1.3"
                    _, version = line.rstrip().split(':')
                    return 'RELF', version

    # Attempt: 'FILE'
    # Right now this samples the utils directory
    modpath = os.path.dirname(
        inspect.getfile(caiman.utils)
    )  # Probably something like /mnt/home/pgunn/miniconda3/envs/caiman/lib/python3.7/site-packages/caiman
    newest = 0
    for fn in os.listdir(modpath):
        last_modified = os.stat(os.path.join(modpath, fn)).st_mtime
        if last_modified > newest:
            newest = last_modified
    return 'FILE', str(int(newest))
Example #20
0
def demo():

    fname = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]
    fr = 10                    # frame rate (Hz)
    decay_time = .75           # approximate length of transient event in seconds
    gSig = [6, 6]              # expected half size of neurons
    p = 1                      # order of AR indicator dynamics
    min_SNR = 1                # minimum SNR for accepting candidate components
    thresh_CNN_noisy = 0.65    # CNN threshold for candidate components
    gnb = 2                    # number of background components
    init_method = 'cnmf'       # initialization method

    # set up CNMF initialization parameters

    init_batch = 400   # number of frames for initialization
    patch_size = 32    # size of patch
    stride = 3         # amount of overlap between patches
    K = 4              # max number of components in each patch

    params_dict = {
        'fr': fr,
        'fnames': fname,
        'decay_time': decay_time,
        'gSig': gSig,
        'p': p,
        'motion_correct': False,
        'min_SNR': min_SNR,
        'nb': gnb,
        'init_batch': init_batch,
        'init_method': init_method,
        'rf': patch_size // 2,
        'stride': stride,
        'sniper_mode': True,
        'thresh_CNN_noisy': thresh_CNN_noisy,
        'K': K
    }
    opts = cnmf.params.CNMFParams(params_dict=params_dict)
    cnm = cnmf.online_cnmf.OnACID(params=opts)
    cnm.fit_online()
    cnm.save('test_online.hdf5')
    cnm2 = cnmf.online_cnmf.load_OnlineCNMF('test_online.hdf5')
    npt.assert_allclose(cnm.estimates.A.sum(), cnm2.estimates.A.sum())
    npt.assert_allclose(cnm.estimates.C, cnm2.estimates.C)
def demo(parallel=False):

    p = 2  # order of the AR model (in general 1 or 2)
    if parallel:
        c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                         n_processes=None,
                                                         single_thread=False)
    else:
        n_processes, dview = 2, None

    # LOAD MOVIE AND MEMORYMAP
    fname_new = cm.save_memmap(
        [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')],
        base_name='Yr',
        order='C')
    Yr, dims, T = cm.load_memmap(fname_new)
    # INIT
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=30,
                    gSig=[4, 4],
                    merge_thresh=.8,
                    p=p,
                    dview=dview,
                    Ain=None,
                    method_deconvolution='oasis')
    # FIT
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    cnm = cnm.fit(images)
    if parallel:
        cm.cluster.stop_server(dview=dview)

    # verifying the spatial components
    npt.assert_allclose(cnm.estimates.A.sum(), 281.1, 1e-2)
    # verifying the temporal components
    npt.assert_allclose(cnm.estimates.C.sum(), 66271668, 1e-2)
    try:
        dview.terminate()
    except:
        pass
import caiman as cm
from caiman.components_evaluation import estimate_components_quality_auto
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.paths import caiman_datadir

#%% start a cluster

c, dview, n_processes =\
    cm.cluster.setup_cluster(backend='local', n_processes=None,
                             single_thread=False)

#%% save files to be processed

# This datafile is distributed with Caiman
fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')]
# location of dataset  (can actually be a list of filed to be concatenated)
add_to_movie = -np.min(cm.load(fnames[0], subindices=range(200))).astype(float)
# determine minimum value on a small chunk of data
add_to_movie = np.maximum(add_to_movie, 0)
# if minimum is negative subtract to make the data non-negative
base_name = 'Yr'
name_new = cm.save_memmap_each(fnames, dview=dview, base_name=base_name,
                               add_to_movie=add_to_movie)
name_new.sort()
fname_new = cm.save_memmap_join(name_new, base_name='Yr', dview=dview)
#%% LOAD MEMORY MAPPABLE FILE
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Example #23
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %% start a cluster

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

    # %% set up some parameters
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]
    # file(s) to be analyzed
    is_patches = True  # flag for processing in patches or not
    fr = 10  # approximate frame rate of data
    decay_time = 5.0  # length of transient

    if is_patches:  # PROCESS IN PATCHES AND THEN COMBINE
        rf = 10  # half size of each patch
        stride = 4  # overlap between patches
        K = 4  # number of components in each patch
    else:  # PROCESS THE WHOLE FOV AT ONCE
        rf = None  # setting these parameters to None
        stride = None  # will run CNMF on the whole FOV
        K = 30  # number of neurons expected (in the whole FOV)

    gSig = [6, 6]  # expected half size of neurons
    merge_thresh = 0.80  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    gnb = 2  # global background order

    params_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'rf': rf,
        'stride': stride,
        'K': K,
        'gSig': gSig,
        'merge_thr': merge_thresh,
        'p': p,
        'nb': gnb
    }

    opts = params.CNMFParams(params_dict=params_dict)
    # %% Now RUN CaImAn Batch (CNMF)
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit_file()

    # %% plot contour plots of components
    Cns = local_correlations_movie_offline(fnames[0],
                                           remove_baseline=True,
                                           swap_dim=False,
                                           window=1000,
                                           stride=1000,
                                           winSize_baseline=100,
                                           quantil_min_baseline=10,
                                           dview=dview)
    Cn = Cns.max(axis=0)
    cnm.estimates.plot_contours(img=Cn)

    # %% load memory mapped file
    Yr, dims, T = cm.load_memmap(cnm.mmap_file)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    # %% refit
    cnm2 = cnm.refit(images, dview=dview)

    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)

    min_SNR = 2  # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.85  # space correlation threshold (if above this, accept)
    use_cnn = True  # use the CNN classifier
    min_cnn_thr = 0.99  # if cnn classifier predicts below this value, reject
    cnn_lowest = 0.1  # neurons with cnn probability lower than this value are rejected

    cnm2.params.set(
        'quality', {
            'min_SNR': min_SNR,
            'rval_thr': rval_thr,
            'use_cnn': use_cnn,
            'min_cnn_thr': min_cnn_thr,
            'cnn_lowest': cnn_lowest
        })

    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

    # %% visualize selected and rejected components
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)
    # %% visualize selected components
    cnm2.estimates.view_components(images,
                                   idx=cnm2.estimates.idx_components,
                                   img=Cn)
    #%% only select high quality components (destructive)
    # cnm2.estimates.select_components(use_object=True)
    # cnm2.estimates.plot_contours(img=Cn)
    #%% save results
    cnm2.estimates.Cn = Cn
    cnm2.save(cnm2.mmap_file[:-4] + 'hdf5')

    # %% play movie with results (original, reconstructed, amplified residual)
    cnm2.estimates.play_movie(images, magnification=4)

    # %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Example #24
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% Select file(s) to be processed (download if not present)
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies/Sue_2x_3000_40_-46.nwb')
    ]
    # estimates save path can be same or different from raw data path
    save_path = os.path.join(
        caiman_datadir(),
        'example_movies/Sue_2x_3000_40_-46_CNMF_estimates.nwb')
    # filename to be created or processed
    # dataset dependent parameters
    fr = 15.  # imaging rate in frames per second
    decay_time = 0.4  # length of a typical transient in seconds

    #%% load the file and save it in the NWB format (if it doesn't exist already)
    if not os.path.exists(fnames[0]):
        fnames_orig = 'Sue_2x_3000_40_-46.tif'  # filename to be processed
        if fnames_orig in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']:
            fnames_orig = [download_demo(fnames_orig)]
        orig_movie = cm.load(fnames_orig, fr=fr)

        # save file in NWB format with various additional info
        orig_movie.save(fnames[0],
                        sess_desc='test',
                        identifier='demo 1',
                        exp_desc='demo movie',
                        imaging_plane_description='single plane',
                        emission_lambda=520.0,
                        indicator='GCAMP6f',
                        location='parietal cortex',
                        experimenter='Sue Ann Koay',
                        lab_name='Tank Lab',
                        institution='Princeton U',
                        experiment_description='Experiment Description',
                        session_id='Session 1',
                        var_name_hdf5='TwoPhotonSeries')
#%% First setup some parameters for data and motion correction

# motion correction parameters
    dxy = (2., 2.)  # spatial resolution in x and y in (um per pixel)
    # note the lower than usual spatial resolution here
    max_shift_um = (12., 12.)  # maximum shift in um
    patch_motion_um = (100., 100.)  # patch size for non-rigid correction in um
    pw_rigid = True  # flag to select rigid vs pw_rigid motion correction
    # maximum allowed rigid shift in pixels
    max_shifts = [int(a / b) for a, b in zip(max_shift_um, dxy)]
    # start a new patch for pw-rigid motion correction every x pixels
    strides = tuple([int(a / b) for a, b in zip(patch_motion_um, dxy)])
    # overlap between patches (size of patch in pixels: strides+overlaps)
    overlaps = (24, 24)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'dxy': dxy,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': 'copy',
        'var_name_hdf5': 'acquisition/TwoPhotonSeries'
    }

    opts = params.CNMFParams(params_dict=mc_dict)

    # %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the video press q
    display_images = False
    if display_images:
        m_orig = cm.load_movie_chain(fnames,
                                     var_name_hdf5=opts.data['var_name_hdf5'])
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=60, magnification=2)

# %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames,
                       dview=dview,
                       var_name_hdf5=opts.data['var_name_hdf5'],
                       **opts.get_group('motion'))
    # note that the file is not loaded in memory

    # %% Run (piecewise-rigid motion) correction using NoRMCorre
    mc.motion_correct(save_movie=True)

    # %% compare with original movie
    if display_images:
        m_orig = cm.load_movie_chain(fnames,
                                     var_name_hdf5=opts.data['var_name_hdf5'])
        m_els = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate([
            m_orig.resize(1, 1, ds_ratio) - mc.min_mov * mc.nonneg_movie,
            m_els.resize(1, 1, ds_ratio)
        ],
                                     axis=2)
        moviehandle.play(fr=60, q_max=99.5, magnification=2)  # press q to exit

# %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap(mc.mmap_file,
                               base_name='memmap_',
                               order='C',
                               border_to_0=border_to_0)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

    # %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
                                                     n_processes=None,
                                                     single_thread=False)

    # %%  parameters for source extraction and deconvolution
    p = 1  # order of the autoregressive system
    gnb = 2  # number of global background components
    merge_thr = 0.85  # merging threshold, max correlation allowed
    rf = 15
    # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf = 6  # amount of overlap between the patches in pixels
    K = 4  # number of components per patch
    gSig = [4, 4]  # expected half size of neurons in pixels
    # initialization method (if analyzing dendritic data using 'sparse_nmf')
    method_init = 'greedy_roi'
    ssub = 2  # spatial subsampling during initialization
    tsub = 2  # temporal subsampling during intialization

    # parameters for component evaluation
    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'nb': gnb,
        'rf': rf,
        'K': K,
        'gSig': gSig,
        'stride': stride_cnmf,
        'method_init': method_init,
        'rolling_sum': True,
        'merge_thr': merge_thr,
        'n_processes': n_processes,
        'only_init': True,
        'ssub': ssub,
        'tsub': tsub
    }

    opts.change_params(params_dict=opts_dict)
    # %% RUN CNMF ON PATCHES
    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0)

    opts.change_params({'p': 0})
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit(images)

    # %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE

    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #  cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #  cnm1.fit_file(motion_correct=True)

    # %% plot contours of found components
    Cn = cm.local_correlations(images, swap_dim=False)
    Cn[np.isnan(Cn)] = 0
    cnm.estimates.plot_contours(img=Cn)
    plt.title('Contour plots of found components')

    #%% save results in a separate file (just for demonstration purposes)
    cnm.estimates.Cn = Cn
    cnm.save(fname_new[:-4] + 'hdf5')
    #cm.movie(Cn).save(fname_new[:-5]+'_Cn.tif')

    # %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    cnm.params.change_params({'p': p})
    cnm2 = cnm.refit(images, dview=dview)
    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier
    min_SNR = 2  # signal to noise ratio for accepting a component
    rval_thr = 0.85  # space correlation threshold for accepting a component
    cnn_thr = 0.99  # threshold for CNN based classifier
    cnn_lowest = 0.1  # neurons with cnn probability lower than this value are rejected

    cnm2.params.set(
        'quality', {
            'decay_time': decay_time,
            'min_SNR': min_SNR,
            'rval_thr': rval_thr,
            'use_cnn': True,
            'min_cnn_thr': cnn_thr,
            'cnn_lowest': cnn_lowest
        })
    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)
    #%%
    cnm2.estimates.Cn = Cn
    cnm2.save(fname_new[:-4] + 'hdf5')
    # %% PLOT COMPONENTS
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)

    # %% VIEW TRACES (accepted and rejected)

    if display_images:
        cnm2.estimates.view_components(images,
                                       img=Cn,
                                       idx=cnm2.estimates.idx_components)
        cnm2.estimates.view_components(images,
                                       img=Cn,
                                       idx=cnm2.estimates.idx_components_bad)
    #%% update object with selected components
    # cnm2.estimates.select_components(use_object=True)
    #%% Extract DF/F values

    cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250)

    #%% Show final traces
    cnm2.estimates.view_components(img=Cn)

    #%% reconstruct denoised movie (press q to exit)
    if display_images:
        cnm2.estimates.play_movie(images,
                                  q_max=99.9,
                                  gain_res=2,
                                  magnification=2,
                                  bpx=border_to_0,
                                  include_bck=False)  # background not shown

    #%% STOP CLUSTER and clean up log files

    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
    #%% save the results in the original NWB file

    cnm2.estimates.save_NWB(save_path,
                            imaging_rate=fr,
                            session_start_time=datetime.now(tzlocal()),
                            raw_data_file=fnames[0])
Example #25
0
    def save_with_compare(self,
                          istruth=False,
                          params=None,
                          dview=None,
                          Cn=None):
        """save the comparison as well as the images of the precision recall calculations


            depending on if we say this file will be ground truth or not, it wil be saved in either the tests or the ground truth folder
            if saved in test, a comparison to groundtruth will be added to the object 
            this comparison will be on 
                data : a normized difference of the normalized value of the arrays
                time : difference
            in order for this function to work, you must
                have previously given it the cnm objects after initializing them ( on patch and full frame)
                give the values of the time and data 
                have a groundtruth


            Args:
                self:  dictionnary
                   the object of this class tha tcontains every value

                istruth: Boolean
                    if we want it ot be the ground truth

                params:
                    movie parameters

                dview :
                    your dview object

                n_frames_per_bin:
                    you need to know those data before
                    they have been given to the base/rois functions

                dims_test:
                    you need to know those data before
                    they have been given to the base/rois functions

                Cn:
                    your correlation image

                Cmap:
                    a particular colormap for your Cn

            See Also:
                Example of utilisation on Demo Pipeline
\image caiman/tests/comparison/data.pdf

             Raises:
                 ('we now have ground truth\n')

                 ('we were not able to read the file to compare it\n')

                """
        # getting the DATA FOR COMPARISONS
        assert (params != None and self.cnmpatch != None)
        logging.info('we need the parameters in order to save anything\n')
        # actions on the sparse matrix
        cnm = self.cnmpatch.__dict__
        cnmpatch = deletesparse(cnm)

        # initialization
        dims_test = [self.dims[0], self.dims[1]]
        dims_gt = dims_test
        dt = datetime.datetime.today()
        dt = str(dt)
        plat = plt.platform()
        plat = str(plat)
        pro = plt.processor()
        pro = str(pro)
        # we store a big file which contains everything (INFORMATION)
        information = {
            'platform': plat,
            'time': dt,
            'processor': pro,
            'params': params,
            'cnmpatch': cnmpatch,
            'timer': {
                'cnmf_on_patch': self.comparison['cnmf_on_patch']['timer'],
                'cnmf_full_frame': self.comparison['cnmf_full_frame']['timer'],
                'rig_shifts': self.comparison['rig_shifts']['timer']
            }
        }

        rootdir = os.path.abspath(cm.__path__[0])[:-7]
        file_path = os.path.join(caiman_datadir(), "testdata",
                                 "groundtruth.npz")

        # OPENINGS
        # if we want to set this data as truth
        if istruth:
            # we just save it
            if os._exists(file_path):
                os.remove(file_path)
                logging.debug("nothing to remove\n")
            np.savez_compressed(
                file_path,
                information=information,
                A_full=self.comparison['cnmf_full_frame']['ourdata'][0],
                C_full=self.comparison['cnmf_full_frame']['ourdata'][1],
                A_patch=self.comparison['cnmf_on_patch']['ourdata'][0],
                C_patch=self.comparison['cnmf_on_patch']['ourdata'][1],
                rig_shifts=self.comparison['rig_shifts']['ourdata'])
            logging.info('we now have ground truth\n')
            return

        else:  # if not we create a comparison first
            try:
                with np.load(file_path, encoding='latin1') as dt:
                    rig_shifts = dt['rig_shifts'][()]
                    A_patch = dt['A_patch'][()]
                    A_full = dt['A_full'][()]
                    C_full = dt['C_full'][()]
                    C_patch = dt['C_patch'][()]
                    data = dt['information'][()]
            # if we cannot manage to open it or it doesnt exist:
            except (IOError, OSError):
                # we save but we explain why there were a problem
                logging.warning('we were not able to read the file ' +
                                str(file_path) + ' to compare it\n')
                file_path = os.path.join(caiman_datadir(), "testdata",
                                         "NC" + dt + ".npz")
                np.savez_compressed(
                    file_path,
                    information=information,
                    A_full=self.comparison['cnmf_full_frame']['ourdata'][0],
                    C_full=self.comparison['cnmf_full_frame']['ourdata'][1],
                    A_patch=self.comparison['cnmf_on_patch']['ourdata'][0],
                    C_patch=self.comparison['cnmf_on_patch']['ourdata'][1],
                    rig_shifts=self.comparison['rig_shifts']['ourdata'])
                return
        # creating the FOLDER to store our data
        # XXX Is this still hooked up to anything?
        i = 0
        dr = os.path.join(caiman_datadir(), "testdata")
        for name in os.listdir(dr):
            i += 1
        i = str(i)
        if not os.path.exists(dr + i):
            os.makedirs(dr + i)
        information.update({'diff': {}})
        information.update({
            'differences': {
                'proc': False,
                'params_movie': False,
                'params_cnm': False
            }
        })
        # INFORMATION FOR THE USER
        if data['processor'] != information['processor']:
            logging.info(
                "you don't have the same processor as groundtruth.. the time difference can vary"
                " because of that\n try recreate your own groundtruth before testing. Compare: "
                + str(data['processor']) + " to " +
                str(information['processor']) + "\n")
            information['differences']['proc'] = True
        if data['params'] != information['params']:
            logging.warning(
                "you are not using the same movie parameters... Things can go wrong"
            )
            logging.warning(
                'you must use the same parameters to compare your version of the code with '
                'the groundtruth one. look for the groundtruth parameters with the see() method\n'
            )
            information['differences']['params_movie'] = True
        # We must cleanup some fields to permit an accurate comparison
        if not normalised_compare_cnmpatches(data['cnmpatch'], cnmpatch):
            if data['cnmpatch'].keys() != cnmpatch.keys():
                logging.error(
                    'DIFFERENCES IN THE FIELDS OF CNMF'
                )  # TODO: Now that we have deeply nested data structures, find a module that gives you tight differences.
            diffkeys = [
                k for k in data['cnmpatch']
                if data['cnmpatch'][k] != cnmpatch[k]
            ]
            for k in diffkeys:
                logging.info("{}:{}->{}".format(k, data['cnmpatch'][k],
                                                cnmpatch[k]))

            logging.warning(
                'you are not using the same parameters in your cnmf on patches initialization\n'
            )
            information['differences']['params_cnm'] = True

        # for rigid
        # plotting part

        information['diff'].update({
            'rig':
            plotrig(init=rig_shifts,
                    curr=self.comparison['rig_shifts']['ourdata'],
                    timer=self.comparison['rig_shifts']['timer'] -
                    data['timer']['rig_shifts'],
                    sensitivity=self.comparison['rig_shifts']['sensitivity'])
        })
        try:
            pl.gcf().savefig(dr + str(i) + '/' + 'rigidcorrection.pdf')
            pl.close()
        except:
            pass

        # for cnmf on patch
        information['diff'].update({
            'cnmpatch':
            cnmf(Cn=Cn,
                 A_gt=A_patch,
                 A_test=self.comparison['cnmf_on_patch']['ourdata'][0],
                 C_gt=C_patch,
                 C_test=self.comparison['cnmf_on_patch']['ourdata'][1],
                 dview=dview,
                 sensitivity=self.comparison['cnmf_on_patch']['sensitivity'],
                 dims_test=dims_test,
                 dims_gt=dims_gt,
                 timer=self.comparison['cnmf_on_patch']['timer'] -
                 data['timer']['cnmf_on_patch'])
        })
        try:
            pl.gcf().savefig(dr + i + '/' + 'onpatch.pdf')
            pl.close()
        except:
            pass

# CNMF FULL FRAME
        information['diff'].update({
            'cnmfull':
            cnmf(Cn=Cn,
                 A_gt=A_full,
                 A_test=self.comparison['cnmf_full_frame']['ourdata'][0],
                 C_gt=C_full,
                 C_test=self.comparison['cnmf_full_frame']['ourdata'][1],
                 dview=dview,
                 sensitivity=self.comparison['cnmf_full_frame']['sensitivity'],
                 dims_test=dims_test,
                 dims_gt=dims_gt,
                 timer=self.comparison['cnmf_full_frame']['timer'] -
                 data['timer']['cnmf_full_frame'])
        })
        try:
            pl.gcf().savefig(dr + i + '/' + 'cnmfull.pdf')
            pl.close()
        except:
            pass


# Saving of everything
        target_dir = os.path.join(caiman_datadir(), "testdata", i)
        if not os.path.exists(target_dir):
            os.makedirs(
                os.path.join(caiman_datadir(), "testdata", i)
            )  # XXX If we ever go Python3, just use the exist_ok flag to os.makedirs
        file_path = os.path.join(target_dir, i + ".npz")
        np.savez_compressed(
            file_path,
            information=information,
            A_full=self.comparison['cnmf_full_frame']['ourdata'][0],
            C_full=self.comparison['cnmf_full_frame']['ourdata'][1],
            A_patch=self.comparison['cnmf_on_patch']['ourdata'][0],
            C_patch=self.comparison['cnmf_on_patch']['ourdata'][1],
            rig_shifts=self.comparison['rig_shifts']['ourdata'])

        self.information = information
Example #26
0
                    fontScale=1.2,
                    color=(0, 255, 0),
                    thickness=1)
        cv2.putText(vid_frame,
                    'Frame = ' + str(t),
                    (vid_frame.shape[1] // 2 - vid_frame.shape[1] // 10,
                     vid_frame.shape[0] - 20),
                    fontFace=5,
                    fontScale=1.2,
                    color=(0, 255, 255),
                    thickness=1)
        return vid_frame

    #%% Prepare object for OnACID
    cnm2 = deepcopy(cnm_init)
    path_to_model = os.path.join(caiman_datadir(), 'model',
                                 'cnn_model_online.h5')

    if save_init:
        cnm_init.dview = None
        save_object(cnm_init,
                    os.path.abspath(fls[0]) + '_DS_' + str(ds_factor) + '.pkl')
        cnm_init = load_object(
            os.path.abspath(fls[0]) + '_DS_' + str(ds_factor) + '.pkl')

    t1 = time()
    cnm2._prepare_object(np.asarray(Yr),
                         T1,
                         expected_comps,
                         idx_components=None,
                         min_num_trial=min_num_trial,
Example #27
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %%  download and list all files to be processed

    # folder inside ./example_movies where files will be saved
    fld_name = 'Mesoscope'
    download_demo('Tolias_mesoscope_1.hdf5', fld_name)
    download_demo('Tolias_mesoscope_2.hdf5', fld_name)
    download_demo('Tolias_mesoscope_3.hdf5', fld_name)

    # folder where files are located
    folder_name = os.path.join(caiman_datadir(), 'example_movies', fld_name)
    extension = 'hdf5'  # extension of files
    # read all files to be processed
    fnames = glob.glob(folder_name + '/*' + extension)

    # your list of files should look something like this
    logging.info(fnames)

    # %%   Set up some parameters

    fr = 10  # frame rate (Hz)
    decay_time = 2  # approximate length of transient event in seconds
    gSig = (3, 3)  # expected half size of neurons
    p = 1  # order of AR indicator dynamics
    min_SNR = 1  # minimum SNR for accepting new components
    ds_factor = 1  # spatial downsampling factor (increases speed but may lose some fine structure)
    gnb = 2  # number of background components
    gSig = tuple(np.ceil(
        np.array(gSig) /
        ds_factor).astype('int'))  # recompute gSig if downsampling is involved
    mot_corr = False  # flag for online motion correction
    pw_rigid = True  # flag for pw-rigid motion correction (slower but potentially more accurate)
    max_shifts_online = 6  # maximum allowed shift during motion correction
    sniper_mode = True  # use a CNN to detect new neurons (o/w space correlation)
    rval_thr = 0.9  # soace correlation threshold for candidate components
    # set up some additional supporting parameters needed for the algorithm
    # (these are default values but can change depending on dataset properties)
    init_batch = 200  # number of frames for initialization (presumably from the first file)
    K = 2  # initial number of components
    epochs = 2  # number of passes over the data
    show_movie = False  # show the movie as the data gets processed

    params_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'gSig': gSig,
        'p': p,
        'min_SNR': min_SNR,
        'rval_thr': rval_thr,
        'ds_factor': ds_factor,
        'nb': gnb,
        'motion_correct': mot_corr,
        'init_batch': init_batch,
        'init_method': 'bare',
        'normalize': True,
        'sniper_mode': sniper_mode,
        'K': K,
        'epochs': epochs,
        'max_shifts_online': max_shifts_online,
        'pw_rigid': pw_rigid,
        'dist_shape_update': True,
        'min_num_trial': 10,
        'show_movie': show_movie
    }
    opts = cnmf.params.CNMFParams(params_dict=params_dict)

    # %% fit online

    cnm = cnmf.online_cnmf.OnACID(params=opts)
    cnm.fit_online()

    # %% plot contours (this may take time)
    logging.info('Number of components: ' + str(cnm.estimates.A.shape[-1]))
    images = cm.load(fnames)
    Cn = images.local_correlations(swap_dim=False, frames_per_chunk=500)
    cnm.estimates.plot_contours(img=Cn, display_numbers=False)

    # %% view components
    cnm.estimates.view_components(img=Cn)

    # %% plot timing performance (if a movie is generated during processing, timing
    # will be severely over-estimated)

    T_motion = 1e3 * np.array(cnm.t_motion)
    T_detect = 1e3 * np.array(cnm.t_detect)
    T_shapes = 1e3 * np.array(cnm.t_shapes)
    T_track = 1e3 * np.array(cnm.t_online) - T_motion - T_detect - T_shapes
    plt.figure()
    plt.stackplot(np.arange(len(T_motion)), T_motion, T_track, T_detect,
                  T_shapes)
    plt.legend(labels=['motion', 'tracking', 'detect', 'shapes'], loc=2)
    plt.title('Processing time allocation')
    plt.xlabel('Frame #')
    plt.ylabel('Processing time [ms]')
    #%% RUN IF YOU WANT TO VISUALIZE THE RESULTS (might take time)
    c, dview, n_processes = \
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)
    if opts.online['motion_correct']:
        shifts = cnm.estimates.shifts[-cnm.estimates.C.shape[-1]:]
        if not opts.motion['pw_rigid']:
            memmap_file = cm.motion_correction.apply_shift_online(
                images, shifts, save_base_name='MC')
        else:
            mc = cm.motion_correction.MotionCorrect(fnames,
                                                    dview=dview,
                                                    **opts.get_group('motion'))

            mc.y_shifts_els = [[sx[0] for sx in sh] for sh in shifts]
            mc.x_shifts_els = [[sx[1] for sx in sh] for sh in shifts]
            memmap_file = mc.apply_shifts_movie(fnames,
                                                rigid_shifts=False,
                                                save_memmap=True,
                                                save_base_name='MC')
    else:  # To do: apply non-rigid shifts on the fly
        memmap_file = images.save(fnames[0][:-4] + 'mmap')
    cnm.mmap_file = memmap_file
    Yr, dims, T = cm.load_memmap(memmap_file)

    #    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    #    min_SNR = 2  # peak SNR for accepted components (if above this, acept)
    #    rval_thr = 0.85  # space correlation threshold (if above this, accept)
    #    use_cnn = True  # use the CNN classifier
    #    min_cnn_thr = 0.99  # if cnn classifier predicts below this value, reject
    #    cnn_lowest = 0.1  # neurons with cnn probability lower than this value are rejected
    #
    #    cnm.params.set('quality',   {'min_SNR': min_SNR,
    #                                'rval_thr': rval_thr,
    #                                'use_cnn': use_cnn,
    #                                'min_cnn_thr': min_cnn_thr,
    #                                'cnn_lowest': cnn_lowest})
    #
    #    cnm.estimates.evaluate_components(images, cnm.params, dview=dview)
    cnm.estimates.Cn = Cn
    cnm.save(os.path.splitext(fnames[0])[0] + '_results.hdf5')

    dview.terminate()
def main():
    pass # For compatibility between running under Spyder and the CLI

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

# %% set up some parameters
    fnames = [os.path.join(caiman_datadir(), 'split', 'first3000-ch1.tif'),
              os.path.join(caiman_datadir(), 'split', 'second3000-ch1.tif')]

    is_patches = True       # flag for processing in patches or not
    fr = 1.5                  # approximate frame rate of data
    decay_time = 5.0        # length of transient

    if is_patches:          # PROCESS IN PATCHES AND THEN COMBINE
        rf = 20             # half size of each patch
        stride = 4         # overlap between patches
        K = 2               # number of components in each patch
    else:                   # PROCESS THE WHOLE FOV AT ONCE
        rf = None           # setting these parameters to None
        stride = None       # will run CNMF on the whole FOV
        K = 10              # number of neurons expected (in the whole FOV)

    gSig = [6, 6]           # expected half size of neurons
    merge_thresh = 0.80     # merging threshold, max correlation allowed
    p = 2                   # order of the autoregressive system
    gnb = 2                 # global background order

    params_dict = {'fnames': fnames,
                   'fr': fr,
                   'decay_time': decay_time,
                   'rf': rf,
                   'stride': stride,
                   'K': K,
                   'gSig': gSig,
                   'merge_thr': merge_thresh,
                   'p': p,
                   'nb': gnb}

    opts = params.CNMFParams(params_dict=params_dict)
    # %% Now RUN CaImAn Batch (CNMF)
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #cnm.estimates.normalize_components()
    cnm = cnm.fit_file()

    # %% plot contour plots of components
    Cn = cm.load(fnames[0], subindices=slice(1000)).local_correlations(swap_dim=False)
    cnm.estimates.plot_contours(img=Cn)

    # %% load memory mapped file
    Yr, dims, T = cm.load_memmap(cnm.mmap_file)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    # %% refit
    cnm2 = cnm.refit(images, dview=dview)

# %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)

    min_SNR = 2      # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.85     # space correlation threshold (if above this, accept)
    use_cnn = False      # use the CNN classifier
    min_cnn_thr = 0.99  # if cnn classifier predicts below this value, reject
    cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected

    cnm2.params.set('quality', {'min_SNR': min_SNR,
                                'rval_thr': rval_thr,
                                'use_cnn': use_cnn,
                                'min_cnn_thr': min_cnn_thr,
                                'cnn_lowest': cnn_lowest})

    cnm2.estimates.detrend_df_f()
    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

    # %% visualize selected and rejected components
    cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components)
    # %% visualize selected components
    cnm2.estimates.nb_view_components(images, idx=cnm2.estimates.idx_components, img=Cn)
    cnm2.estimates.view_components(images, idx=cnm2.estimates.idx_components_bad, img=Cn)
    #%% only select high quality components
    cnm2.estimates.select_components(use_object=True)
    #%%
    cnm2.estimates.plot_contours(img=Cn)

    cnm2.estimates.detrend_df_f()
    import pickle
    f = open("/home/david/zebraHorse/df_f_day55.pkl", "wb")
    pickle.dump(cnm2.estimates.F_dff, f)
    f.close()
    
    # %% play movie with results (original, reconstructed, amplified residual)
    for j in range(10):
        cnm2.estimates.play_movie(images, magnification=4.0, frame_range = range(100 * j, 100 * (j + 1)))

    #import time
    #time.sleep(1000)
    
# %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Example #29
0
def evaluate_components_CNN(A,
                            dims,
                            gSig,
                            model_name: str = os.path.join(
                                caiman_datadir(), 'model', 'cnn_model'),
                            patch_size: int = 50,
                            loaded_model=None,
                            isGPU: bool = False) -> Tuple[Any, np.array]:
    """ evaluate component quality using a CNN network

    """

    import os
    if not isGPU:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    try:
        os.environ["KERAS_BACKEND"] = "tensorflow"
        from tensorflow.keras.models import model_from_json
        use_keras = True
        logging.info('Using Keras')
    except (ModuleNotFoundError):
        use_keras = False
        logging.info('Using Tensorflow')

    if loaded_model is None:
        if use_keras:
            if os.path.isfile(
                    os.path.join(caiman_datadir(), model_name + ".json")):
                model_file = os.path.join(caiman_datadir(),
                                          model_name + ".json")
                model_weights = os.path.join(caiman_datadir(),
                                             model_name + ".h5")
            elif os.path.isfile(model_name + ".json"):
                model_file = model_name + ".json"
                model_weights = model_name + ".h5"
            else:
                raise FileNotFoundError(
                    "File for requested model {} not found".format(model_name))
            with open(model_file, 'r') as json_file:
                print('USING MODEL:' + model_file)
                loaded_model_json = json_file.read()

            loaded_model = model_from_json(loaded_model_json)
            loaded_model.load_weights(model_name + '.h5')
            #loaded_model.compile('sgd', 'mse')
        else:
            if os.path.isfile(
                    os.path.join(caiman_datadir(), model_name + ".h5.pb")):
                model_file = os.path.join(caiman_datadir(),
                                          model_name + ".h5.pb")
            elif os.path.isfile(model_name + ".h5.pb"):
                model_file = model_name + ".h5.pb"
            else:
                raise FileNotFoundError(
                    "File for requested model {} not found".format(model_name))
            loaded_model = load_graph(model_file)

        logging.debug("Loaded model from disk")

    half_crop = np.minimum(gSig[0] * 4 + 1,
                           patch_size), np.minimum(gSig[1] * 4 + 1, patch_size)
    dims = np.array(dims)
    coms = [
        scipy.ndimage.center_of_mass(mm.toarray().reshape(dims, order='F'))
        for mm in A.tocsc().T
    ]
    coms = np.maximum(coms, half_crop)
    coms = np.array([np.minimum(cms, dims - half_crop)
                     for cms in coms]).astype(np.int)
    crop_imgs = [
        mm.toarray().reshape(
            dims, order='F')[com[0] - half_crop[0]:com[0] + half_crop[0],
                             com[1] - half_crop[1]:com[1] + half_crop[1]]
        for mm, com in zip(A.tocsc().T, coms)
    ]
    final_crops = np.array([
        cv2.resize(im / np.linalg.norm(im), (patch_size, patch_size))
        for im in crop_imgs
    ])
    if use_keras:
        predictions = loaded_model.predict(final_crops[:, :, :, np.newaxis],
                                           batch_size=32,
                                           verbose=1)
    else:
        tf_in = loaded_model.get_tensor_by_name('prefix/conv2d_20_input:0')
        tf_out = loaded_model.get_tensor_by_name('prefix/output_node0:0')
        with tf.Session(graph=loaded_model) as sess:
            predictions = sess.run(
                tf_out, feed_dict={tf_in: final_crops[:, :, :, np.newaxis]})
            sess.close()

    return predictions, final_crops
def main():
    pass  # For compatibility between running under Spyder and the CLI

    # %%  Load demo movie and ROIs
    fnames = download_demo('demo_voltage_imaging.hdf5', 'volpy')  # file path to movie file (will download if not present)
    path_ROIs = download_demo('demo_voltage_imaging_ROIs.hdf5', 'volpy')  # file path to ROIs file (will download if not present)

    # %% Setup some parameters for data and motion correction
    # dataset parameters
    fr = 400                                        # sample rate of the movie
    ROIs = None                                     # Region of interests
    index = None                                    # index of neurons
    weights = None                                  # reuse spatial weights by 
                                                    # opts.change_params(params_dict={'weights':vpy.estimates['weights']})
    # motion correction parameters
    pw_rigid = False                                # flag for pw-rigid motion correction
    gSig_filt = (3, 3)                              # size of filter, in general gSig (see below),
                                                    # change this one if algorithm does not work
    max_shifts = (5, 5)                             # maximum allowed rigid shift
    strides = (48, 48)                              # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24)                             # overlap between pathes (size of patch strides+overlaps)
    max_deviation_rigid = 3                         # maximum deviation allowed for patch with respect to rigid shifts
    border_nan = 'copy'

    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'index': index,
        'ROIs': ROIs,
        'weights': weights,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'gSig_filt': gSig_filt,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': border_nan
    }

    opts = volparams(params_dict=opts_dict)

    # %% play the movie (optional)
    # playing the movie using opencv. It requires loading the movie in memory.
    # To close the video press q
    display_images = False

    if display_images:
        m_orig = cm.load(fnames)
        ds_ratio = 0.2
        moviehandle = m_orig.resize(1, 1, ds_ratio)
        moviehandle.play(q_max=99.5, fr=60, magnification=2)

    # %% start a cluster for parallel processing

    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

    # %%% MOTION CORRECTION
    # Create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # Run piecewise rigid motion correction
    mc.motion_correct(save_movie=True)
    dview.terminate()

    # %% motion correction compared with original movie
    display_images = False

    if display_images:
        m_orig = cm.load(fnames)
        m_rig = cm.load(mc.mmap_file)
        ds_ratio = 0.2
        moviehandle = cm.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov * mc.nonneg_movie,
                                      m_rig.resize(1, 1, ds_ratio)], axis=2)
        moviehandle.play(fr=60, q_max=99.5, magnification=2)  # press q to exit

    # % movie subtracted from the mean
        m_orig2 = (m_orig - np.mean(m_orig, axis=0))
        m_rig2 = (m_rig - np.mean(m_rig, axis=0))
        moviehandle1 = cm.concatenate([m_orig2.resize(1, 1, ds_ratio),
                                       m_rig2.resize(1, 1, ds_ratio)], axis=2)
        moviehandle1.play(fr=60, q_max=99.5, magnification=2)

   # %% Memory Mapping
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

    border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0
    fname_new = cm.save_memmap_join(mc.mmap_file, base_name='memmap_',
                               add_to_mov=border_to_0, dview=dview, n_chunks=10)

    dview.terminate()

    # %% change fnames to the new motion corrected one
    opts.change_params(params_dict={'fnames': fname_new})

    # %% SEGMENTATION
    # Create mean and correlation image
    use_maskrcnn = True  # set to True to predict the ROIs using the mask R-CNN
    if not use_maskrcnn:                 # use manual annotations
        with h5py.File(path_ROIs, 'r') as fl:
            ROIs = fl['mov'][()]  # load ROIs
        opts.change_params(params_dict={'ROIs': ROIs,
                                        'index': list(range(ROIs.shape[0])),
                                        'method': 'SpikePursuit'})
    else:
        m = cm.load(mc.mmap_file[0], subindices=slice(0, 20000))
        m.fr = fr
        img = m.mean(axis=0)
        img = (img-np.mean(img))/np.std(img)
        m1 = m.computeDFF(secsWindow=1, in_place=True)[0]
        m = m - m1
        Cn = m.local_correlations(swap_dim=False, eight_neighbours=True)
        img_corr = (Cn-np.mean(Cn))/np.std(Cn)
        summary_image = np.stack([img, img, img_corr], axis=2).astype(np.float32)
        del m
        del m1

        # %%
        # Mask R-CNN
        config = neurons.NeuronsConfig()

        class InferenceConfig(config.__class__):
            # Run detection on one image at a time
            GPU_COUNT = 1
            IMAGES_PER_GPU = 1
            DETECTION_MIN_CONFIDENCE = 0.7
            IMAGE_RESIZE_MODE = "pad64"
            IMAGE_MAX_DIM = 512
            RPN_NMS_THRESHOLD = 0.7
            POST_NMS_ROIS_INFERENCE = 1000

        config = InferenceConfig()
        config.display()
        model_dir = os.path.join(caiman_datadir(), 'model')
        DEVICE = "/cpu:0"  # /cpu:0 or /gpu:0
        with tf.device(DEVICE):
            model = modellib.MaskRCNN(mode="inference", model_dir=model_dir,
                                      config=config)
        weights_path = download_model('mask_rcnn')
        model.load_weights(weights_path, by_name=True)
        results = model.detect([summary_image], verbose=1)
        r = results[0]
        ROIs_mrcnn = r['masks'].transpose([2, 0, 1])

    # %% visualize the result
        display_result = False

        if display_result:
            _, ax = plt.subplots(1,1, figsize=(16,16))
            visualize.display_instances(summary_image, r['rois'], r['masks'], r['class_ids'], 
                                    ['BG', 'neurons'], r['scores'], ax=ax,
                                    title="Predictions")

    # %% set rois
        opts.change_params(params_dict={'ROIs':ROIs_mrcnn,
                                        'index':list(range(ROIs_mrcnn.shape[0])),
                                        'method':'SpikePursuit'})

    # %% Trace Denoising and Spike Extraction
    c, dview, n_processes = cm.cluster.setup_cluster(
            backend='local', n_processes=None, single_thread=False, maxtasksperchild=1)
    vpy = VOLPY(n_processes=n_processes, dview=dview, params=opts)
    vpy.fit(n_processes=n_processes, dview=dview)

    # %% some visualization
    print(np.where(vpy.estimates['passedLocalityTest'])[0])    # neurons that pass locality test
    n = 0
    
    # Processed signal and spikes of neurons
    plt.figure()
    plt.plot(vpy.estimates['trace'][n])
    plt.plot(vpy.estimates['spikeTimes'][n],
             np.max(vpy.estimates['trace'][n]) * np.ones(vpy.estimates['spikeTimes'][n].shape),
             color='g', marker='o', fillstyle='none', linestyle='none')
    plt.title('signal and spike times')
    plt.show()

    # Location of neurons by Mask R-CNN or manual annotation
    plt.figure()
    if use_maskrcnn:
        plt.imshow(ROIs_mrcnn[n])
    else:
        plt.imshow(ROIs[n])
    mv = cm.load(fname_new)
    plt.imshow(mv.mean(axis=0),alpha=0.5)
    
    # Spatial filter created by algorithm
    plt.figure()
    plt.imshow(vpy.estimates['spatialFilter'][n])
    plt.colorbar()
    plt.title('spatial filter')
    plt.show()
    
    

    # %% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Example #31
0
def test_computational_performance(fnames, path_ROIs, n_processes):
    import os
    import cv2
    import glob
    import logging
    import matplotlib.pyplot as plt
    import numpy as np
    import tensorflow as tf
    import h5py
    from time import time

    try:
        cv2.setNumThreads(0)
    except:
        pass

    try:
        if __IPYTHON__:
            # this is used for debugging purposes only. allows to reload classes
            # when changed
            get_ipython().magic('load_ext autoreload')
            get_ipython().magic('autoreload 2')
    except NameError:
        pass

    import caiman as cm
    from caiman.motion_correction import MotionCorrect
    from caiman.utils.utils import download_demo, download_model
    from caiman.source_extraction.volpy.volparams import volparams
    from caiman.source_extraction.volpy.volpy import VOLPY
    from caiman.source_extraction.volpy.mrcnn import visualize, neurons
    import caiman.source_extraction.volpy.mrcnn.model as modellib
    from caiman.paths import caiman_datadir
    from caiman.summary_images import local_correlations_movie_offline
    from caiman.summary_images import mean_image
    from caiman.source_extraction.volpy.utils import quick_annotation
    from multiprocessing import Pool

    time_start = time()
    print('Start MOTION CORRECTION')

    # %%  Load demo movie and ROIs
    fnames = fnames
    path_ROIs = path_ROIs

    #%% dataset dependent parameters
    # dataset dependent parameters
    fr = 400  # sample rate of the movie

    # motion correction parameters
    pw_rigid = False  # flag for pw-rigid motion correction
    gSig_filt = (3, 3)  # size of filter, in general gSig (see below),
    # change this one if algorithm does not work
    max_shifts = (5, 5)  # maximum allowed rigid shift
    strides = (
        48, 48
    )  # start a new patch for pw-rigid motion correction every x pixels
    overlaps = (24, 24
                )  # overlap between pathes (size of patch strides+overlaps)
    max_deviation_rigid = 3  # maximum deviation allowed for patch with respect to rigid shifts
    border_nan = 'copy'

    opts_dict = {
        'fnames': fnames,
        'fr': fr,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'gSig_filt': gSig_filt,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': border_nan
    }

    opts = volparams(params_dict=opts_dict)

    # %% start a cluster for parallel processing
    dview = Pool(n_processes)
    #dview = None
    # %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # Run correction
    mc.motion_correct(save_movie=True)

    time_mc = time() - time_start
    print(time_mc)
    print('START MEMORY MAPPING')

    # %% restart cluster to clean up memory
    dview.terminate()
    dview = Pool(n_processes)

    # %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap_join(mc.mmap_file,
                                    base_name='memmap_',
                                    add_to_mov=border_to_0,
                                    dview=dview,
                                    n_chunks=1000)  # exclude border

    time_mmap = time() - time_start - time_mc
    print('Start Segmentation')
    # %% SEGMENTATION
    # create summary images
    img = mean_image(mc.mmap_file[0], window=1000, dview=dview)
    img = (img - np.mean(img)) / np.std(img)
    Cn = local_correlations_movie_offline(mc.mmap_file[0],
                                          fr=fr,
                                          window=1500,
                                          stride=1500,
                                          winSize_baseline=400,
                                          remove_baseline=True,
                                          dview=dview).max(axis=0)
    img_corr = (Cn - np.mean(Cn)) / np.std(Cn)
    summary_image = np.stack([img, img, img_corr], axis=2).astype(np.float32)

    #%% three methods for segmentation
    methods_list = [
        'manual_annotation',  # manual annotation needs user to prepare annotated datasets same format as demo ROIs 
        'quick_annotation',  # quick annotation annotates data with simple interface in python
        'maskrcnn'
    ]  # maskrcnn is a convolutional network trained for finding neurons using summary images
    method = methods_list[0]
    if method == 'manual_annotation':
        with h5py.File(path_ROIs, 'r') as fl:
            ROIs = fl['mov'][()]  # load ROIs

    elif method == 'quick_annotation':
        ROIs = quick_annotation(img_corr, min_radius=4, max_radius=10)

    elif method == 'maskrcnn':
        config = neurons.NeuronsConfig()

        class InferenceConfig(config.__class__):
            # Run detection on one image at a time
            GPU_COUNT = 1
            IMAGES_PER_GPU = 1
            DETECTION_MIN_CONFIDENCE = 0.7
            IMAGE_RESIZE_MODE = "pad64"
            IMAGE_MAX_DIM = 512
            RPN_NMS_THRESHOLD = 0.7
            POST_NMS_ROIS_INFERENCE = 1000

        config = InferenceConfig()
        config.display()
        model_dir = os.path.join(caiman_datadir(), 'model')
        DEVICE = "/cpu:0"  # /cpu:0 or /gpu:0
        with tf.device(DEVICE):
            model = modellib.MaskRCNN(mode="inference",
                                      model_dir=model_dir,
                                      config=config)
        weights_path = download_model('mask_rcnn')
        model.load_weights(weights_path, by_name=True)
        results = model.detect([summary_image], verbose=1)
        r = results[0]
        ROIs = r['masks'].transpose([2, 0, 1])

        display_result = False
        if display_result:
            _, ax = plt.subplots(1, 1, figsize=(16, 16))
            visualize.display_instances(summary_image,
                                        r['rois'],
                                        r['masks'],
                                        r['class_ids'], ['BG', 'neurons'],
                                        r['scores'],
                                        ax=ax,
                                        title="Predictions")

    time_seg = time() - time_mmap - time_mc - time_start
    print('Start SPIKE EXTRACTION')

    # %% restart cluster to clean up memory
    dview.terminate()
    dview = Pool(n_processes, maxtasksperchild=1)

    # %% parameters for trace denoising and spike extraction
    fnames = fname_new  # change file
    ROIs = ROIs  # region of interests
    index = list(range(len(ROIs)))  # index of neurons
    weights = None  # reuse spatial weights

    tau_lp = 5  # parameter for high-pass filter to remove photobleaching
    threshold = 4  # threshold for finding spikes, increase threshold to find less spikes
    contextSize = 35  # number of pixels surrounding the ROI to censor from the background PCA
    flip_signal = True  # Important! Flip signal or not, True for Voltron indicator, False for others

    opts_dict = {
        'fnames': fnames,
        'ROIs': ROIs,
        'index': index,
        'weights': weights,
        'tau_lp': tau_lp,
        'threshold': threshold,
        'contextSize': contextSize,
        'flip_signal': flip_signal
    }

    opts.change_params(params_dict=opts_dict)

    #%% Trace Denoising and Spike Extraction
    vpy = VOLPY(n_processes=n_processes, dview=dview, params=opts)
    vpy.fit(n_processes=n_processes, dview=dview)

    # %% STOP CLUSTER and clean up log files
    #dview.terminate()
    log_files = glob.glob('*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)

    time_ext = time() - time_mmap - time_mc - time_start - time_seg

    #%%
    print('file:' + fnames)
    print('number of processes' + str(n_processes))
    print(time_mc)
    print(time_mmap)
    print(time_seg)
    print(time_ext)
    time_list = [time_mc, time_mmap, time_seg, time_ext]

    return time_list
import cv2
from caiman.utils.visualization import plot_contours
from caiman.source_extraction.cnmf.online_cnmf import bare_initialization
from caiman.source_extraction.cnmf.utilities import detrend_df_f_auto
from caiman.paths import caiman_datadir

#%%  download and list all files to be processed

# folder inside ./example_movies where files will be saved
fld_name = 'Mesoscope'
download_demo('Tolias_mesoscope_1.hdf5', fld_name)
download_demo('Tolias_mesoscope_2.hdf5', fld_name)
download_demo('Tolias_mesoscope_3.hdf5', fld_name)

# folder where files are located
folder_name = os.path.join(caiman_datadir(), 'example_movies', fld_name)
extension = 'hdf5'                                  # extension of files
# read all files to be processed
fls = glob.glob(folder_name + '/*' + extension)

# your list of files should look something like this
print(fls)

#%%   Set up some parameters

# frame rate (Hz)
fr = 15
# approximate length of transient event in seconds
decay_time = 0.5
# expected half size of neurons
gSig = (3, 3)
Example #33
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% load data

    fname = os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    Y = cm.load(fname).astype(np.float32)  #
    # used as a background image
    Cn = cm.local_correlations(Y.transpose(1, 2, 0))
    #%% set up some parameters

    # frame rate (Hz)
    fr = 10
    # approximate length of transient event in seconds
    decay_time = 0.5
    # expected half size of neurons
    gSig = [6, 6]
    # order of AR indicator dynamics
    p = 1
    # minimum SNR for accepting new components
    min_SNR = 3.5
    # correlation threshold for new component inclusion
    rval_thr = 0.90
    # number of background components
    gnb = 3

    # set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics)

    # number of shapes to be updated each time (put this to a finite small value to increase speed)
    max_comp_update_shape = np.inf
    # maximum number of expected components used for memory pre-allocation (exaggerate here)
    expected_comps = 50
    # number of timesteps to consider when testing new neuron candidates
    N_samples = np.ceil(fr * decay_time)
    # exceptionality threshold
    thresh_fitness_raw = log_ndtr(-min_SNR) * N_samples
    # total length of file
    T1 = Y.shape[0]

    # set up CNMF initialization parameters

    # merging threshold, max correlation allowed
    merge_thresh = 0.8
    # number of frames for initialization (presumably from the first file)
    initbatch = 400
    # size of patch
    patch_size = 32
    # amount of overlap between patches
    stride = 3
    # max number of components in each patch
    K = 4

    #%% obtain initial batch file used for initialization
    # memory map file (not needed)
    fname_new = Y[:initbatch].save(os.path.join(caiman_datadir(),
                                                'example_movies', 'demo.mmap'),
                                   order='C')
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Cn_init = cm.local_correlations(np.reshape(Yr, dims + (T, ), order='F'))

    #%% RUN (offline) CNMF algorithm on the initial batch
    pl.close('all')
    cnm_init = cnmf.CNMF(2,
                         k=K,
                         gSig=gSig,
                         merge_thresh=merge_thresh,
                         fr=fr,
                         p=p,
                         rf=patch_size // 2,
                         stride=stride,
                         skip_refinement=False,
                         normalize_init=False,
                         options_local_NMF=None,
                         minibatch_shape=100,
                         minibatch_suff_stat=5,
                         update_num_comps=True,
                         rval_thr=rval_thr,
                         thresh_fitness_delta=-50,
                         gnb=gnb,
                         decay_time=decay_time,
                         thresh_fitness_raw=thresh_fitness_raw,
                         batch_update_suff_stat=False,
                         max_comp_update_shape=max_comp_update_shape,
                         expected_comps=expected_comps,
                         dview=None,
                         min_SNR=min_SNR)

    cnm_init = cnm_init.fit(images)

    print(('Number of components:' + str(cnm_init.estimates.A.shape[-1])))

    pl.figure()
    crd = plot_contours(cnm_init.estimates.A.tocsc(), Cn_init, thr=0.9)

    #%% run (online) OnACID algorithm

    cnm = deepcopy(cnm_init)
    cnm.params.data['dims'] = (60, 80)
    cnm._prepare_object(np.asarray(Yr), T1)

    t = initbatch

    Y_ = cm.load(fname)[initbatch:].astype(np.float32)
    for frame_count, frame in enumerate(Y_):
        cnm.fit_next(t, frame.copy().reshape(-1, order='F'))
        t += 1

#%% extract the results

    C, f = cnm.estimates.C_on[gnb:cnm.M], cnm.estimates.C_on[:gnb]
    A, b = cnm.estimates.Ab[:, gnb:cnm.M], cnm.estimates.Ab[:, :gnb]
    print(('Number of components:' + str(A.shape[-1])))

    #%% pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes)
    use_CNN = True
    if use_CNN:
        # threshold for CNN classifier
        thresh_cnn = 0.1
        from caiman.components_evaluation import evaluate_components_CNN
        predictions, final_crops = evaluate_components_CNN(
            A,
            dims,
            gSig,
            model_name=os.path.join(caiman_datadir(), 'model', 'cnn_model'))
        A_exclude, C_exclude = A[:, predictions[:, 1] < thresh_cnn], C[
            predictions[:, 1] < thresh_cnn]
        A, C = A[:,
                 predictions[:,
                             1] >= thresh_cnn], C[predictions[:,
                                                              1] >= thresh_cnn]
        noisyC = cnm.estimates.noisyC[gnb:cnm.M]
        YrA = noisyC[predictions[:, 1] >= thresh_cnn] - C
    else:
        YrA = cnm.estimates.noisyC[gnb:cnm.M] - C

#%% plot results
    pl.figure()
    crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9)

    view_patches_bar(Yr, A, C, b, f, dims[0], dims[1], YrA, img=Cn)
Example #34
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% start a cluster

    c, dview, n_processes =\
        cm.cluster.setup_cluster(backend='local', n_processes=None,
                                 single_thread=False)

    #%% save files to be processed

    # This datafile is distributed with Caiman
    fnames = [
        os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    ]
    # location of dataset  (can actually be a list of filed to be concatenated)
    add_to_movie = -np.min(cm.load(fnames[0],
                                   subindices=range(200))).astype(float)
    # determine minimum value on a small chunk of data
    add_to_movie = np.maximum(add_to_movie, 0)
    # if minimum is negative subtract to make the data non-negative
    base_name = 'Yr'
    fname_new = cm.save_memmap(fnames,
                               dview=dview,
                               base_name=base_name,
                               order='C',
                               add_to_movie=add_to_movie)

    #%% LOAD MEMORY MAPPABLE FILE
    Yr, dims, T = cm.load_memmap(fname_new)
    d1, d2 = dims
    images = np.reshape(Yr.T, [T] + list(dims), order='F')

    #%% play movie, press q to quit
    play_movie = False
    if play_movie:
        cm.movie(images[1400:]).play(fr=50, magnification=4, gain=3.)

#%% correlation image. From here infer neuron size and density
    Cn = cm.movie(images).local_correlations(swap_dim=False)
    plt.imshow(Cn, cmap='gray')
    plt.title('Correlation Image')

    #%% set up some parameters

    is_patches = True  # flag for processing in patches or not

    if is_patches:  # PROCESS IN PATCHES AND THEN COMBINE
        rf = 10  # half size of each patch
        stride = 4  # overlap between patches
        K = 4  # number of components in each patch
    else:  # PROCESS THE WHOLE FOV AT ONCE
        rf = None  # setting these parameters to None
        stride = None  # will run CNMF on the whole FOV
        K = 30  # number of neurons expected (in the whole FOV)

    gSig = [6, 6]  # expected half size of neurons
    merge_thresh = 0.80  # merging threshold, max correlation allowed
    p = 2  # order of the autoregressive system
    gnb = 2  # global background order

    #%% Now RUN CNMF
    cnm = cnmf.CNMF(n_processes,
                    method_init='greedy_roi',
                    k=K,
                    gSig=gSig,
                    merge_thresh=merge_thresh,
                    p=p,
                    dview=dview,
                    gnb=gnb,
                    rf=rf,
                    stride=stride,
                    rolling_sum=False)
    cnm = cnm.fit(images)

    #%% plot contour plots of components

    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm.A, Cn, thr=0.9)
    plt.title('Contour plots of components')

    #%%
    A_in, C_in, b_in, f_in = cnm.A[:, :], cnm.C[:], cnm.b, cnm.f
    cnm2 = cnmf.CNMF(n_processes=1,
                     k=A_in.shape[-1],
                     gSig=gSig,
                     p=p,
                     dview=dview,
                     merge_thresh=merge_thresh,
                     Ain=A_in,
                     Cin=C_in,
                     b_in=b_in,
                     f_in=f_in,
                     rf=None,
                     stride=None,
                     gnb=gnb,
                     method_deconvolution='oasis',
                     check_nan=True)

    cnm2 = cnm2.fit(images)
    #%% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier (this will pick up only neurons
    #           and filter out active processes)
    fr = 10  # approximate frame rate of data
    decay_time = 5.0  # length of transient
    min_SNR = 2.5  # peak SNR for accepted components (if above this, acept)
    rval_thr = 0.90  # space correlation threshold (if above this, accept)
    use_cnn = True  # use the CNN classifier
    min_cnn_thr = 0.95  # if cnn classifier predicts below this value, reject

    idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
        estimate_components_quality_auto(images, cnm.A, cnm.C, cnm.b, cnm.f,
                                         cnm.YrA, fr, decay_time, gSig, dims,
                                         dview=dview, min_SNR=min_SNR,
                                         r_values_min=rval_thr, use_cnn=use_cnn,
                                         thresh_cnn_min=min_cnn_thr)
    #%% visualize selected and rejected components
    plt.figure()
    plt.subplot(1, 2, 1)
    cm.utils.visualization.plot_contours(cnm2.A[:, idx_components],
                                         Cn,
                                         thr=0.9)
    plt.title('Selected components')
    plt.subplot(1, 2, 2)
    plt.title('Discaded components')
    cm.utils.visualization.plot_contours(cnm2.A[:, idx_components_bad],
                                         Cn,
                                         thr=0.9)

    #%%
    plt.figure()
    crd = cm.utils.visualization.plot_contours(cnm2.A.tocsc()[:,
                                                              idx_components],
                                               Cn,
                                               thr=0.9)
    plt.title('Contour plots of components')
    #%% visualize selected components
    cm.utils.visualization.view_patches_bar(Yr,
                                            cnm2.A.tocsc()[:, idx_components],
                                            cnm2.C[idx_components, :],
                                            cnm2.b,
                                            cnm2.f,
                                            dims[0],
                                            dims[1],
                                            YrA=cnm2.YrA[idx_components, :],
                                            img=Cn)
    #%% STOP CLUSTER and clean up log files
    cm.stop_server()

    log_files = glob.glob('Yr*_LOG_*')
    for log_file in log_files:
        os.remove(log_file)
Example #35
0
import logging
from pytictoc import TicToc
from caiman.source_extraction.cnmf import params as params
from caiman.source_extraction import cnmf as cnmf
import os
from caiman.paths import caiman_datadir

# %% ********* Creating named pipes for communication with MicroManager: *********
timer = TicToc()
timer.tic()  # start measuring time

sendPipeName = "/tmp/getPipeMMCaImAn.ser"  # FOR SENDING MESSAGES --> TO MicroManager
receivePipeName = "/tmp/sendPipeMMCaImAn.ser"  # FOR READING MESSAGES --> FROM MicroManager

MMfileDirectory = '/Applications/MicroManager 2.0 gamma/uMresults'
CaimanFileDirectory = caiman_datadir()  # specify where the file is saved

if os.path.exists(sendPipeName):
    os.remove(sendPipeName)
    os.mkfifo(sendPipeName)
    print("Removed old write-pipe, created new write-pipe.")
else:
    os.mkfifo(sendPipeName)
    print("Write-pipe created sucessfully!")

if os.path.exists(receivePipeName):
    os.remove(receivePipeName)
    os.mkfifo(receivePipeName)
    print("Removed old read-pipe, created new read-pipe.")
else:
    os.mkfifo(receivePipeName)
Example #36
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%%  download and list all files to be processed

    # folder inside ./example_movies where files will be saved
    fld_name = 'Mesoscope'
    download_demo('Tolias_mesoscope_1.hdf5', fld_name)
    download_demo('Tolias_mesoscope_2.hdf5', fld_name)
    download_demo('Tolias_mesoscope_3.hdf5', fld_name)

    # folder where files are located
    folder_name = os.path.join(caiman_datadir(), 'example_movies', fld_name)
    extension = 'hdf5'  # extension of files
    # read all files to be processed
    fls = glob.glob(folder_name + '/*' + extension)

    # your list of files should look something like this
    print(fls)

    #%%   Set up some parameters

    # frame rate (Hz)
    fr = 15
    # approximate length of transient event in seconds
    decay_time = 0.5
    # expected half size of neurons
    gSig = (3, 3)
    # order of AR indicator dynamics
    p = 1
    # minimum SNR for accepting new components
    min_SNR = 2.5
    # correlation threshold for new component inclusion
    rval_thr = 0.85
    # spatial downsampling factor (increases speed but may lose some fine structure)
    ds_factor = 1
    # number of background components
    gnb = 2
    # recompute gSig if downsampling is involved
    gSig = tuple(np.ceil(np.array(gSig) / ds_factor).astype('int'))
    # flag for online motion correction
    mot_corr = True
    # maximum allowed shift during motion correction
    max_shift = np.ceil(10. / ds_factor).astype('int')

    # set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics)

    # number of shapes to be updated each time (put this to a finite small value to increase speed)
    max_comp_update_shape = np.inf
    # number of files used for initialization
    init_files = 1
    # number of files used for online
    online_files = len(fls) - 1
    # number of frames for initialization (presumably from the first file)
    initbatch = 200
    # maximum number of expected components used for memory pre-allocation (exaggerate here)
    expected_comps = 300
    # initial number of components
    K = 2
    # number of timesteps to consider when testing new neuron candidates
    N_samples = np.ceil(fr * decay_time)
    # exceptionality threshold
    thresh_fitness_raw = scipy.special.log_ndtr(-min_SNR) * N_samples
    # number of passes over the data
    epochs = 2
    # upper bound for number of frames in each file (used right below)
    len_file = 1000
    # total length of all files (if not known use a large number, then truncate at the end)
    T1 = len(fls) * len_file * epochs

    #%%    Initialize movie

    # load only the first initbatch frames and possibly downsample them
    if ds_factor > 1:
        Y = cm.load(fls[0], subindices=slice(0, initbatch, None)).astype(
            np.float32).resize(1. / ds_factor, 1. / ds_factor)
    else:
        Y = cm.load(fls[0], subindices=slice(0, initbatch,
                                             None)).astype(np.float32)

    if mot_corr:  # perform motion correction on the first initbatch frames
        mc = Y.motion_correct(max_shift, max_shift)
        Y = mc[0].astype(np.float32)
        borders = np.max(mc[1])
    else:
        Y = Y.astype(np.float32)

    # minimum value of movie. Subtract it to make the data non-negative
    img_min = Y.min()
    Y -= img_min
    img_norm = np.std(Y, axis=0)
    # normalizing factor to equalize the FOV
    img_norm += np.median(img_norm)
    Y = Y / img_norm[None, :, :]  # normalize data

    _, d1, d2 = Y.shape
    dims = (d1, d2)  # dimensions of FOV
    Yr = Y.to_2D().T  # convert data into 2D array

    Cn_init = Y.local_correlations(swap_dim=False)  # compute correlation image
    #pl.imshow(Cn_init)
    #pl.title('Correlation Image on initial batch')
    #pl.colorbar()

    bnd_Y = np.percentile(Y, (0.001, 100 - 0.001))  # plotting boundaries for Y

    #%% initialize OnACID with bare initialization

    cnm_init = bare_initialization(Y[:initbatch].transpose(1, 2, 0),
                                   init_batch=initbatch,
                                   k=K,
                                   gnb=gnb,
                                   gSig=gSig,
                                   p=p,
                                   minibatch_shape=100,
                                   minibatch_suff_stat=5,
                                   update_num_comps=True,
                                   rval_thr=rval_thr,
                                   thresh_fitness_raw=thresh_fitness_raw,
                                   batch_update_suff_stat=True,
                                   max_comp_update_shape=max_comp_update_shape,
                                   deconv_flag=False,
                                   use_dense=False,
                                   simultaneously=False,
                                   n_refit=0)

    #%% Plot initialization results

    crd = plot_contours(cnm_init.A.tocsc(), Cn_init, thr=0.9)
    A, C, b, f, YrA, sn = cnm_init.A, cnm_init.C, cnm_init.b, cnm_init.f, cnm_init.YrA, cnm_init.sn
    view_patches_bar(Yr,
                     scipy.sparse.coo_matrix(A.tocsc()[:, :]),
                     C[:, :],
                     b,
                     f,
                     dims[0],
                     dims[1],
                     YrA=YrA[:, :],
                     img=Cn_init)

    bnd_AC = np.percentile(A.dot(C), (0.001, 100 - 0.005))
    bnd_BG = np.percentile(b.dot(f), (0.001, 100 - 0.001))

    #%% create a function for plotting results in real time if needed

    def create_frame(cnm2, img_norm, captions):
        A, b = cnm2.Ab[:, cnm2.gnb:], cnm2.Ab[:, :cnm2.gnb].toarray()
        C, f = cnm2.C_on[cnm2.gnb:cnm2.M, :], cnm2.C_on[:cnm2.gnb, :]
        # inferred activity due to components (no background)
        frame_plot = (frame_cor.copy() - bnd_Y[0]) / np.diff(bnd_Y)
        comps_frame = A.dot(C[:, t - 1]).reshape(cnm2.dims, order='F')
        bgkrnd_frame = b.dot(f[:, t - 1]).reshape(
            cnm2.dims, order='F')  # denoised frame (components + background)
        denoised_frame = comps_frame + bgkrnd_frame
        denoised_frame = (denoised_frame.copy() - bnd_Y[0]) / np.diff(bnd_Y)
        comps_frame = (comps_frame.copy() - bnd_AC[0]) / np.diff(bnd_AC)

        if show_residuals:
            #all_comps = np.reshape(cnm2.Yres_buf.mean(0), cnm2.dims, order='F')
            all_comps = np.reshape(cnm2.mean_buff, cnm2.dims, order='F')
            all_comps = np.minimum(np.maximum(all_comps, 0) * 2 + 0.25, 255)
        else:
            all_comps = np.array(A.sum(-1)).reshape(cnm2.dims, order='F')
            # spatial shapes
        frame_comp_1 = cv2.resize(
            np.concatenate([frame_plot, all_comps * 1.], axis=-1),
            (2 * np.int(cnm2.dims[1] * resize_fact),
             np.int(cnm2.dims[0] * resize_fact)))
        frame_comp_2 = cv2.resize(
            np.concatenate([comps_frame, denoised_frame], axis=-1),
            (2 * np.int(cnm2.dims[1] * resize_fact),
             np.int(cnm2.dims[0] * resize_fact)))
        frame_pn = np.concatenate([frame_comp_1, frame_comp_2], axis=0).T
        vid_frame = np.repeat(frame_pn[:, :, None], 3, axis=-1)
        vid_frame = np.minimum((vid_frame * 255.), 255).astype('u1')

        if show_residuals and cnm2.ind_new:
            add_v = np.int(cnm2.dims[1] * resize_fact)
            for ind_new in cnm2.ind_new:
                cv2.rectangle(vid_frame,
                              (int(ind_new[0][1] * resize_fact),
                               int(ind_new[1][1] * resize_fact) + add_v),
                              (int(ind_new[0][0] * resize_fact),
                               int(ind_new[1][0] * resize_fact) + add_v),
                              (255, 0, 255), 2)

        cv2.putText(vid_frame,
                    captions[0], (5, 20),
                    fontFace=5,
                    fontScale=0.8,
                    color=(0, 255, 0),
                    thickness=1)
        cv2.putText(vid_frame,
                    captions[1], (np.int(cnm2.dims[0] * resize_fact) + 5, 20),
                    fontFace=5,
                    fontScale=0.8,
                    color=(0, 255, 0),
                    thickness=1)
        cv2.putText(vid_frame,
                    captions[2], (5, np.int(cnm2.dims[1] * resize_fact) + 20),
                    fontFace=5,
                    fontScale=0.8,
                    color=(0, 255, 0),
                    thickness=1)
        cv2.putText(vid_frame,
                    captions[3], (np.int(cnm2.dims[0] * resize_fact) + 5,
                                  np.int(cnm2.dims[1] * resize_fact) + 20),
                    fontFace=5,
                    fontScale=0.8,
                    color=(0, 255, 0),
                    thickness=1)
        cv2.putText(vid_frame,
                    'Frame = ' + str(t),
                    (vid_frame.shape[1] // 2 - vid_frame.shape[1] // 10,
                     vid_frame.shape[0] - 20),
                    fontFace=5,
                    fontScale=0.8,
                    color=(0, 255, 255),
                    thickness=1)
        return vid_frame

#%% Prepare object for OnACID

    cnm2 = deepcopy(cnm_init)

    save_init = False  # flag for saving initialization object. Useful if you want to check OnACID with different parameters but same initialization
    if save_init:
        cnm_init.dview = None
        save_object(cnm_init, fls[0][:-4] + '_DS_' + str(ds_factor) + '.pkl')
        cnm_init = load_object(fls[0][:-4] + '_DS_' + str(ds_factor) + '.pkl')

    path_to_cnn_residual = os.path.join(caiman_datadir(), 'model',
                                        'cnn_model_online.h5')

    cnm2._prepare_object(np.asarray(Yr),
                         T1,
                         expected_comps,
                         idx_components=None,
                         min_num_trial=3,
                         max_num_added=3,
                         path_to_model=path_to_cnn_residual,
                         sniper_mode=False,
                         use_peak_max=False,
                         q=0.5)
    cnm2.thresh_CNN_noisy = 0.5

    #%% Run OnACID and optionally plot results in real time
    epochs = 1
    cnm2.Ab_epoch = []  # save the shapes at the end of each epoch
    t = cnm2.initbatch  # current timestep
    tottime = []
    Cn = Cn_init.copy()

    # flag for removing components with bad shapes
    remove_flag = False
    T_rm = 650  # remove bad components every T_rm frames
    rm_thr = 0.1  # CNN classifier removal threshold
    # flag for plotting contours of detected components at the end of each file
    plot_contours_flag = False
    # flag for showing results video online (turn off flags for improving speed)
    play_reconstr = True
    # flag for saving movie (file could be quite large..)
    save_movie = False
    movie_name = os.path.join(
        folder_name, 'sniper_meso_0.995_new.avi')  # name of movie to be saved
    resize_fact = 1.2  # image resizing factor

    if online_files == 0:  # check whether there are any additional files
        process_files = fls[:init_files]  # end processing at this file
        init_batc_iter = [initbatch]  # place where to start
        end_batch = T1
    else:
        process_files = fls[:init_files + online_files]  # additional files
        # where to start reading at each file
        init_batc_iter = [initbatch] + [0] * online_files

    shifts = []
    show_residuals = True
    if show_residuals:
        caption = 'Mean Residual Buffer'
    else:
        caption = 'Identified Components'
    captions = ['Raw Data', 'Inferred Activity', caption, 'Denoised Data']
    if save_movie and play_reconstr:
        fourcc = cv2.VideoWriter_fourcc('8', 'B', 'P', 'S')
        #    fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(
            movie_name, fourcc, 30.0,
            tuple([int(2 * x * resize_fact) for x in cnm2.dims]))

    for iter in range(epochs):
        if iter > 0:
            # if not on first epoch process all files from scratch
            process_files = fls[:init_files + online_files]
            init_batc_iter = [0] * (online_files + init_files)

        # np.array(fls)[np.array([1,2,3,4,5,-5,-4,-3,-2,-1])]:
        for file_count, ffll in enumerate(process_files):
            print('Now processing file ' + ffll)
            Y_ = cm.load(ffll,
                         subindices=slice(init_batc_iter[file_count], T1,
                                          None))

            # update max-correlation (and perform offline motion correction) just for illustration purposes
            if plot_contours_flag:
                if ds_factor > 1:
                    Y_1 = Y_.resize(1. / ds_factor, 1. / ds_factor, 1)
                else:
                    Y_1 = Y_.copy()
                if mot_corr:
                    templ = (cnm2.Ab.data[:cnm2.Ab.indptr[1]] *
                             cnm2.C_on[0, t - 1]).reshape(cnm2.dims,
                                                          order='F') * img_norm
                    newcn = (Y_1 - img_min).motion_correct(
                        max_shift, max_shift,
                        template=templ)[0].local_correlations(swap_dim=False)
                    Cn = np.maximum(Cn, newcn)
                else:
                    Cn = np.maximum(Cn, Y_1.local_correlations(swap_dim=False))

            old_comps = cnm2.N  # number of existing components
            for frame_count, frame in enumerate(Y_):  # now process each file
                if np.isnan(np.sum(frame)):
                    raise Exception('Frame ' + str(frame_count) +
                                    ' contains nan')
                if t % 100 == 0:
                    print(
                        'Epoch: ' + str(iter + 1) + '. ' + str(t) +
                        ' frames have beeen processed in total. ' +
                        str(cnm2.N - old_comps) +
                        ' new components were added. Total number of components is '
                        + str(cnm2.Ab.shape[-1] - gnb))
                    old_comps = cnm2.N

                t1 = time()  # count time only for the processing part
                frame_ = frame.copy().astype(np.float32)  #
                if ds_factor > 1:
                    frame_ = cv2.resize(frame_,
                                        img_norm.shape[::-1])  # downsampling

                frame_ -= img_min  # make data non-negative

                if mot_corr:  # motion correct
                    templ = cnm2.Ab.dot(cnm2.C_on[:cnm2.M, t - 1]).reshape(
                        cnm2.dims, order='F') * img_norm
                    frame_cor, shift = motion_correct_iteration_fast(
                        frame_, templ, max_shift, max_shift)
                    shifts.append(shift)
                else:
                    templ = None
                    frame_cor = frame_

                frame_cor = frame_cor / img_norm  # normalize data-frame
                cnm2.fit_next(t, frame_cor.reshape(
                    -1, order='F'))  # run OnACID on this frame
                # store time
                tottime.append(time() - t1)

                t += 1

                if t % T_rm == 0 and remove_flag:
                    prd, _ = evaluate_components_CNN(cnm2.Ab[:, gnb:], dims,
                                                     gSig)
                    ind_rem = np.where(prd[:, 1] < rm_thr)[0].tolist()
                    cnm2.remove_components(ind_rem)
                    print('Removing ' + str(len(ind_rem)) + ' components')

                if t % 1000 == 0 and plot_contours_flag:
                    pl.cla()
                    A = cnm2.Ab[:, cnm2.gnb:]
                    # update the contour plot every 1000 frames
                    crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9)
                    pl.pause(1)

                if play_reconstr:  # generate movie with the results
                    vid_frame = create_frame(cnm2, img_norm, captions)
                    if save_movie:
                        out.write(vid_frame)
                        if t - initbatch < 100:
                            #for rp in np.int32(np.ceil(np.exp(-np.arange(1,100)/30)*20)):
                            for rp in range(len(cnm2.ind_new) * 2):
                                out.write(vid_frame)
                    cv2.imshow('frame', vid_frame)
                    if t - initbatch < 100:
                        for rp in range(len(cnm2.ind_new) * 2):
                            cv2.imshow('frame', vid_frame)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

            print('Cumulative processing speed is ' +
                  str((t - initbatch) / np.sum(tottime))[:5] +
                  ' frames per second.')
        # save the shapes at the end of each epoch
        cnm2.Ab_epoch.append(cnm2.Ab.copy())

    if save_movie:
        out.release()
    cv2.destroyAllWindows()

    #%%  save results (optional)
    save_results = False

    if save_results:
        np.savez('results_analysis_online_MOT_CORR.npz',
                 Cn=Cn,
                 Ab=cnm2.Ab,
                 Cf=cnm2.C_on,
                 b=cnm2.b,
                 f=cnm2.f,
                 dims=cnm2.dims,
                 tottime=tottime,
                 noisyC=cnm2.noisyC,
                 shifts=shifts)

    #%% extract results from the objects and do some plotting
    A, b = cnm2.Ab[:, cnm2.gnb:], cnm2.Ab[:, :cnm2.gnb].toarray()
    C, f = cnm2.C_on[cnm2.gnb:cnm2.M,
                     t - t // epochs:t], cnm2.C_on[:cnm2.gnb,
                                                   t - t // epochs:t]
    noisyC = cnm2.noisyC[:, t - t // epochs:t]
    b_trace = [osi.b for osi in cnm2.OASISinstances] if hasattr(
        cnm2, 'OASISinstances') else [0] * C.shape[0]

    pl.figure()
    crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9)
    view_patches_bar(Yr,
                     scipy.sparse.coo_matrix(A.tocsc()[:, :]),
                     C[:, :],
                     b,
                     f,
                     dims[0],
                     dims[1],
                     YrA=noisyC[cnm2.gnb:cnm2.M] - C,
                     img=Cn)