Esempio n. 1
0
#%% normalize to sum 1
dims = 512, 512

A = [csc_matrix(A1 / A1.sum(0)) for A1 in data]
masks = [
    np.reshape(A_.toarray(), dims + (-1, ), order='F').transpose(2, 0, 1)
    for A_ in A
]

#%% register components across multiple days

max_thr = 0.1

A_forw, assign_forw, match_forw = register_multisession(A,
                                                        dims,
                                                        Cns,
                                                        max_thr=max_thr)
A_back, assign_back, match_back = register_multisession(A[::-1],
                                                        dims,
                                                        Cns[::-1],
                                                        max_thr=max_thr)

#%% align all pairs separately
N = len(A)

pairs = list(itertools.combinations(range(N), 2))

pairs_matches = []

for pair in pairs:
    match_1, match_2, non_1, non_, perf_, _ = register_ROIs(
    a = np.zeros(np.prod(dims))
    a[indx] = cumEn
    return a.reshape(dims, order='F')       

#%% normalize to sum 1 
dims = 512, 512

A = [csc_matrix(A1/A1.sum(0)) for A1 in data]
masks = [np.reshape(A_.toarray(), dims + (-1,),
         order='F').transpose(2, 0, 1) for A_ in A]

#%% register components across multiple days

max_thr = 0.1    
    
A_forw, assign_forw, match_forw = register_multisession(A, dims, Cns, max_thr=max_thr)
A_back, assign_back, match_back = register_multisession(A[::-1], dims, Cns[::-1], max_thr=max_thr)

#%% align all pairs separately
N = len(A)

pairs = list(itertools.combinations(range(N), 2))

pairs_matches = []

for pair in pairs:
    match_1, match_2, non_1, non_, perf_, _ = register_ROIs(A[pair[0]], A[pair[1]], dims, 
                                                            template1=Cns[pair[0]], 
                                                            template2=Cns[pair[1]],
                                                            plot_results=False,
                                                            max_thr=max_thr)
Esempio n. 3
0
plt.rc('font', **font)
lp, hp = np.nanpercentile(CI[0], [5, 98])

for i in range(N):
    plt.subplot(3, 2, 1 + 2 * i)
    plt.imshow(CI[i], vmin=lp, vmax=hp, cmap='gray')
    [
        plt.contour(norm_nrg(mm), levels=[0.95], colors='y', linewidths=1)
        for mm in masks[i]
    ]
    plt.title('Session #' + str(i + 1))

# %% register components across multiple days

max_thr = 0.1
A_reg, assign, match = register_multisession(A, dims, CI, max_thr=max_thr)
masks_reg = np.reshape(A_reg, dims + (-1, ), order='F').transpose(2, 0, 1)
# %% first compare results from sessions 1 and 2 (Fig. 14b)
# If you just have two sessions you can use the register_ROIs function

match_1 = extract_active_components(assign, [0], only=False)
match_2 = extract_active_components(assign, [1], only=False)
match_12 = extract_active_components(assign, [0, 1], only=False)

cl = ['y', 'g', 'r']
labels = ['Both Sessions', 'Session 1 (only)', 'Session 2 (only)']
plt.subplot(3, 2, 2)
plt.imshow(CI[N], vmin=lp, vmax=hp, cmap='gray')
plt.title('Session #1 and #2 union')
[
    plt.contour(norm_nrg(mm), levels=[0.95], colors='y', linewidths=1)
# Filter components
for session_i, session in enumerate(session_objs):
    logging.info('Filtering session from ' + session['exp_date'])
    session['cnm_obj_filtered'] = filter_components(session['cnm_obj'],
                                                    components_quality_params,
                                                    registration_params,
                                                    session['exp_date'])

# Do multisession registration
spatial_comps = [d['cnm_obj_filtered'].estimates.A for d in session_objs]
templates = [d['template'] for d in session_objs]

spatial_union, assignments, mappings = register_multisession(
    A=spatial_comps,
    dims=session_objs[0]['cnm_obj_filtered'].dims,
    templates=templates,
    thresh_cost=registration_params['thresh_cost'],
    max_dist=registration_params['max_dist'],
    max_thr=registration_params['max_thr'],
    use_opt_flow=True)

# Save updated cnm_obj and upload to drive
for sess_i, session in enumerate(session_objs):
    session_info = session['session_info']
    cnm_obj = session['cnm_obj_filtered']
    cnm_obj.estimates.registered_cell_ids = mappings[sess_i]
    import subprocess
    subprocess.run(
        ['mkdir', '-p',
         os.path.join(session['result_dir'], 'filtered')])
    cnm_obj_fpath = os.path.join(session['result_dir'], 'filtered',
                                 'analysis_results_filtered.hdf5')
Esempio n. 5
0
        Cns.append(ld['Cn'])

#%% normalize to sum 1

A = [csc_matrix(A1 / A1.sum(0)) for A1 in data]

#%% register components across multiple days

from caiman.base.rois import register_multisession, extract_active_components, register_ROIs
import matplotlib.pyplot as plt
import matplotlib.lines as mlines

dims = 512, 512

A_union, assignments, matchings = register_multisession(A,
                                                        dims,
                                                        Cns,
                                                        max_thr=0)
#%% register backwards

A_back, assignments_back, matchings_back = register_multisession(A[::-1],
                                                                 dims,
                                                                 Cns[::-1],
                                                                 max_thr=0)

#%%
N = len(A)
trip_forw = extract_active_components(assignments, list(range(N)), only=False)
trip_back = extract_active_components(assignments_back,
                                      list(range(N)),
                                      only=True)
Esempio n. 6
0

#%% normalize sum of each component to 1

dims = CI[0].shape

A = [csc_matrix(A1 / A1.sum(0)) for A1 in A]
masks = [
    np.reshape(A_.toarray(), dims + (-1, ), order='F').transpose(2, 0, 1)
    for A_ in A
]
#%% register components across multiple days

max_thr = 0.1

A_forw, assign_forw, match_forw = register_multisession(
    A, dims, CI, max_thr=max_thr)  # register forward
A_back, assign_back, match_back = register_multisession(
    A[::-1], dims, CI[::-1], max_thr=max_thr)  # register backwards

#%% read all pairings through the union
# for each pair of sessions find the components that were active in both
# sessions of the pair by reading the assugnments to the union matrix.
# Repeat the process for the backwards procedure.

N = len(A)
pairs = list(itertools.combinations(range(N), 2))
pairs_forw = []
pairs_back = []
assign_back_rev = assign_back[:, ::-1]
for pair in pairs:
    match_f = extract_active_components(assign_forw, list(pair), only=False)
Esempio n. 7
0
def run_registration(mouse, session, component_evaluation_v):
    """
    This is the main registering function. Is is supposed to be run after trial wise component evaluation.
    Registration takes over different contours of trial wise source extracted contours and do a matching between cells.
    It can use two different methods: Hungarian matching algorithm (RegisterMulti) (as implement in Giovannucci, et al.
    2019) or cell registration (CellReg)using centroids distance and spatial correlation (as implemented in Sheintuch, et al. 2017).
    Default method is registration with no modeling of distributions of centroids and spatial correlation.

    Args:
        component_evaluation_v: version of the previous step
        mouse: mouse's number
        session: which session you want to analyse
    """

    # Sort the dataframe correctly

    sql = "SELECT component_evaluation_main  FROM Analysis WHERE mouse = ? AND session=? AND component_evaluation_v=?"
    val = [mouse, session, component_evaluation_v]
    cursor.execute(sql, val)
    result = cursor.fetchall()
    file = []
    inter = []
    for x in result:
        inter += x
    for y in inter:
        file.append(y)

    trials_order = []
    for i in range(len(file)):
        sql = "SELECT is_rest FROM Analysis WHERE component_evaluation_main=?"
        val = [file[i]]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        is_rest = []
        trial = []
        for x in result:
            is_rest += x
        sql = "SELECT trial FROM Analysis WHERE component_evaluation_main=?"
        val = [file[i]]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        for x in result:
            trial += x

        for j in range(len(trial)):
            if is_rest[j] == 0:
                order = 2 * trial[j] - 1
            else:
                order = 2 * trial[j]

            sql1 = "UPDATE Analysis SET registration_trials_orders=? WHERE trial=? AND is_rest=? AND mouse = ? AND session=? AND component_evaluation_v=? AND component_evaluation_main=?"
            val1 = [
                order, trial[j], is_rest[j], mouse, session,
                component_evaluation_v, file[i]
            ]
            cursor.execute(sql1, val1)
            database.commit()
            trials_order.append(order)

        sql = "SELECT session_wise FROM Analysis WHERE component_evaluation_main=?"
        val = [file[i]]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        session_wise = []
        for x in result:
            session_wise += x
        if session_wise[1] == 'False':
            data_dir = os.environ[
                'DATA_DIR'] + 'data/interim/registration/trial_wise/'
        else:
            data_dir = os.environ[
                'DATA_DIR'] + 'data/interim/registration/session_wise/'

        # determine the file name
        sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,alignment_v,equalization_v,source_extraction_v, component_evaluation_v,registration_v,input,home_path,decoding_main FROM Analysis WHERE component_evaluation_main=?"
        val = [file[i]]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        data = []
        inter = []
        for x in result:
            inter = x
        for y in inter:
            data.append(y)

        # Update the database

        if data[11] == 0:
            data[11] = 1
            file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}.{data[9]}.{data[10]}.{data[11]}"
            sql1 = "UPDATE Analysis SET registration_main=?,registration_v=? WHERE component_evaluation_main=? "
            val1 = [file_name, data[11], file[i]]
            cursor.execute(sql1, val1)

        else:
            data[11] += 1
            file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[8]}.{data[9]}.{data[10]}.{data[11]}"
            sql2 = "UPDATE Analysis SET registration_main=?,registration_v=?  WHERE motion_correction_main=? "
            val2 = [file_name, data[11], file[i]]
            cursor.execute(sql2, val2)
            database.commit()
        database.commit()

        output_file_path = data_dir + f'{file_name}.pkl'

        # Take alignment data for the timeline of alignment

        sql = "SELECT alignment_timeline FROM Analysis WHERE mouse = ? AND session=? AND component_evaluation_v=?"
        val = [mouse, session, component_evaluation_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        alignment_timeline = []
        inter = []
        for x in result:
            inter += x
        for y in inter:
            alignment_timeline.append(y)

        # Multiple list created to append the relevant information for the registration and creation of a unique time trace
        # matrix (cnm.estimates.A  and cnm.estimates.C ) both taken after component evaluation

        A_list = []  # List for contour matrix on multiple trials
        FOV_size = [
        ]  # List for the cn filter dim (to verify it is always the same dims)
        A_number_components = [
        ]  # List with the total number of components extracted for each trial
        C_dims = []  # Dimension of C, to keep track of timeline
        C_list = []  # List with traces for each trial
        evaluated_trials = []
        typical_size = []

        if type(file[i]) == str:
            component_evaluation_hdf5_file_path = os.environ['DATA_DIR'] + '/interim/component_evaluation/trial_wise/' + \
                                                  file[i]
            sql = "SELECT source_extraction_corr FROM Analysis WHERE component_evaluation_main=?"
            val = [file[i]]
            cursor.execute(sql, val)
            result = cursor.fetchall()
            source_extraction_corr = []
            for x in result:
                source_extraction_corr += x
            corr_path = source_extraction_corr[i]
            cnm = load_CNMF(component_evaluation_hdf5_file_path)
            cn_filter = np.load(corr_path)
            FOV_size.append(cn_filter.shape)

            A_number_components.append(cnm.estimates.idx_components.shape[0])
            A_list.append(cnm.estimates.A[:, cnm.estimates.idx_components])
            C_dims.append(cnm.estimates.C.shape)
            size = cnm.estimates.A[:, cnm.estimates.idx_components].sum(axis=0)
            sql = "SELECT trial FROM Analysis WHERE component_evaluation_main=?"
            val = [file[i]]
            cursor.execute(sql, val)
            result = cursor.fetchall()
            trial = []
            for x in result:
                trial += x
            sql = "SELECT is_rest FROM Analysis WHERE component_evaluation_main=?"
            val = [file[i]]
            cursor.execute(sql, val)
            result = cursor.fetchall()
            is_rest = []
            for x in result:
                is_rest += x
            evaluated_trials.append(
                (trial[i] - 1) * 2 +
                is_rest[i])  # Number that goes from 0 to 42
            for j in range(len(cnm.estimates.idx_components)):
                typical_size.append(size[0, j])
            sql = "SELECT normalization FROM Analysis WHERE component_evaluation_main=?"
            val = [file[i]]
            cursor.execute(sql, val)
            result = cursor.fetchall()
            normalization_v = []
            for x in result:
                normalization_v += x
            if normalization_v[i] == 'True':
                if cnm.estimates.bl is None:
                    raw_normed, cnm_normed, res_normed, s_normed, noise_levels = normalization.normalize_traces(
                        cnm.estimates.C,
                        cnm.estimates.YrA,
                        cnm.estimates.S,
                        1,
                        offset_method="denoised_floor")
                else:
                    raw_normed, cnm_normed, res_normed, s_normed, noise_levels = normalization.normalize_traces(
                        cnm.estimates.C - cnm.estimates.bl[:, np.newaxis],
                        cnm.estimates.YrA,
                        cnm.estimates.S,
                        1,
                        offset_method="denoised_floor")
                C_list.append(cnm_normed[cnm.estimates.idx_components, :])
            else:
                if cnm.estimates.bl is None:
                    C_list.append(
                        cnm.estimates.C[cnm.estimates.idx_components, :])
                else:
                    C_list.append(
                        cnm.estimates.C[cnm.estimates.idx_components, :] -
                        cnm.estimates.bl[cnm.estimates.idx_components,
                                         np.newaxis])

        # Open the timeline and create the new traces matrix C_matrix
        with open(alignment_timeline[i], 'rb') as f:
            timeline = pickle.load(f)
        total_time1 = 0
        for i in range(len(C_list) - 1):
            total_time1 = total_time1 + C_list[i].shape[1]
        total_time2 = timeline[len(timeline) - 1][1] + C_list[np.argmax(
            evaluated_trials)].shape[1]
        total_time = max(total_time1, total_time2)
        timeline.append(['End', total_time])

        # Add a size restriction on the neurons that will further be processed. This restriction boundary
        # decision is based in the histogram of typical neuronal sizes
        new_A_list = []
        new_C_list = []
        A_components = []
        C_dims_new = []
        new_evaluated_trials = []
        for i in range(len(A_list)):
            accepted_size = []
            size = A_list[i].sum(axis=0)
            for j in range(size.shape[1]):
                if 10 < size[0, j] < 25:
                    accepted_size.append(j)
            if len(accepted_size) > 1:
                new_A_list.append(A_list[i][:, accepted_size])
                new_C_list.append(C_list[i][accepted_size, :])
                A_components.append(A_number_components[i])
                C_dims_new.append(new_C_list[-1].shape)
                new_evaluated_trials.append(evaluated_trials[i])
        A_list = new_A_list
        C_list = new_C_list

        # Run CaImAn registration routine that use the Hungarian matching algorithm in the contours list
        sql = "SELECT cost_threshold FROM Analysis WHERE mouse = ? AND session=? AND component_evaluation_v=?"
        val = [mouse, session, component_evaluation_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        for x in result:
            cost_threshold = x
        sql = "SELECT max_dist FROM Analysis WHERE mouse = ? AND session=? AND component_evaluation_v=?"
        val = [mouse, session, component_evaluation_v]
        cursor.execute(sql, val)
        result = cursor.fetchall()
        for x in result:
            max_dist = x
        spatial_union, assignments, match = register_multisession(
            A=A_list,
            dims=FOV_size[0],
            thresh_cost=cost_threshold,
            max_dist=max_dist)

        C_matrix = np.zeros((spatial_union.shape[1], total_time))

        new_assignments = np.zeros((spatial_union.shape[1], len(timeline)))
        for i in range(spatial_union.shape[1]):
            for j in range(assignments.shape[1]):
                trial = new_evaluated_trials[j]
                if not math.isnan(assignments[i, j]):
                    new_assignments[i][trial] = assignments[i, j] + 1
        for i in range(spatial_union.shape[1]):
            for j in range(assignments.shape[1]):
                trial = new_evaluated_trials[j]
                if not math.isnan(assignments[i, j]):
                    C_matrix[i][timeline[trial][1]:timeline[trial][1] + (
                        C_list[j])[int(assignments[i, j]), :].shape[0]] = (
                            C_list[j])[int(assignments[i, j]), :]

        cnm_registration = estimates(A=spatial_union, C=C_matrix)
        with open(output_file_path, 'wb') as output_file:
            pickle.dump(cnm_registration, output_file, pickle.HIGHEST_PROTOCOL)

    return
    cnm = load_CNMF(input_hdf5_file_path)
    corr_path = source_extraction_output['meta']['corr']['main']
    cn_filter = np.load(db.get_file(corr_path))
    A_dims.append(cn_filter.shape)
    A_pixel.append(cnm.estimates.A.shape[0])
    A_components.append(cnm.estimates.A.shape[1])
    A_list.append(cnm.estimates.A)

new_dims = A_dims[np.argmin(A_pixel)]
new_pixel = min(A_pixel)
new_A_list = []
for i in range(0, 5):
    current_list = A_list[i]
    new_A_list.append(current_list[:new_pixel])

spatial_union, assignments, match = register_multisession(A=new_A_list,
                                                          dims=new_dims)

figure, axes = plt.subplots(figsize=(15, 15))
coordinates = cm.utils.visualization.get_contours(spatial_union, new_dims, 0.2,
                                                  'max')

coordinates5 = coordinates[146:150]

for i in range(5):
    axes.imshow(cn_filter[:new_dims[0]][:new_dims[1]])
    for c in coordinates5:
        v = c['coordinates']
        c['bbox'] = [
            np.floor(np.nanmin(v[:, 1])),
            np.ceil(np.nanmax(v[:, 1])),
            np.floor(np.nanmin(v[:, 0])),
import glob
import imageio
#%% load pickled file (spatial components + mean intensity file)

infile = open(
    '/Users/hheiser/Desktop/testing data/chronic_test/Sample data/alignment.pickle',
    'rb')
data = pickle.load(infile)
infile.close()

spatial = data[0]
templates = data[1]

dims = templates[0].shape

spatial_union, assignments, matchings = register_multisession(
    spatial, dims=dims, templates=templates)

n_sess = len(spatial)
fig, ax = plt.subplots(nrows=1, ncols=2)
plt.sca(ax[0])

fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 2, 1)
ax2 = plt.subplot(1, 2, 2)
#ax3 = plt.subplot(1, 3, 3, sharex=ax1, sharey=ax1)

ax1.imshow(shift_map_y, cmap='viridis')
# ax1.set_xlim(200, 300)
# ax1.set_ylim(200, 300)
# ax1.set_axis_off()
ax1.set_title('Reference image')
Esempio n. 10
0
def run_registration(input_file):
    """
    This is the main registering function. Is is supposed to be run after trial wise component evaluation.
    Registration takes over different contours of trial wise source extracted contours and do a matching between cells.
    It can use two different methods: Hungarian matching algorithm (RegisterMulti) (as implement in Giovannucci, et al.
    2019) or cell registration (CellReg)using centroids distance and spatial correlation (as implemented in Sheintuch, et al. 2017).
    Default method is registration with no modeling of distributions of centroids and spatial correlation.

    """
    sql = "SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,alignment_v,source_extraction_v,equalization_v,component_evaluation_v,registration_v FROM Analysis WHERE component_evaluation_main=?"
    val = [
        input_file,
    ]
    cursor.execute(sql, val)
    result = cursor.fetchall()
    data = []
    inter = []
    for x in result:
        inter = x
    for y in inter:
        data.append(y)

    # Update the database

    if data[11] == 0:
        data[11] = 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}.{data[7]}.{data[9]}.{data[8]}.{data[10]}.{data[11]}"
        sql1 = "UPDATE Analysis SET motion_correction_meta=?,motion_correction_v=? WHERE cropping_main=? "
        val1 = [file_name, data[11], input_file]
        cursor.execute(sql1, val1)

    else:
        data[6] += 1
        file_name = f"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}"
        sql2 = "INSERT INTO Analysis (motion_correction_meta,motion_correction_v) VALUES (?,?)"
        val2 = [file_name, data[11]]
        cursor.execute(sql2, val2)
        database.commit()

    database.commit()

    if parameters['session_wise'] == False:
        data_dir = os.environ[
            'DATA_DIR'] + 'data/interim/registration/trial_wise/main/'
    else:
        data_dir = os.environ[
            'DATA_DIR'] + 'data/interim/registration/session_wise/main/'

    file_name = db.create_file_name(step_index, row_new.name)
    output_file_path = data_dir + f'{file_name}.pkl'

    ##create the dictionary with metadata information
    output = {
        'main': output_file_path,
        'meta': {
            'analysis': {
                'analyst': os.environ['ANALYST'],
                'date': datetime.datetime.today().strftime("%m-%d-%Y"),
                'time': datetime.datetime.today().strftime("%H:%M:%S")
            },
            'duration': {}
        }
    }

    ## take alignment data for the timeline of alingment
    first_row = df.iloc[0]
    alignmnet_output = eval(first_row['alignment_output'])
    alignment_timeline_file = alignmnet_output['meta']['timeline']

    ## multiple list created to append the relevant information for the registration and creation of a unique time trace
    ## matrix (cnm.estimates.A  and cnm.estimates.C ) both taken after component evaluation
    A_list = []  ## list for contour matrix on multiple trials
    #A_size = []  ## list for the size of A (just to verify it is always the same size)
    FOV_size = [
    ]  ## list for the cn filter dim (to verify it is always the same dims)
    A_number_components = [
    ]  ## list with the total number of components extracted for each trial
    C_dims = []  ## dimension of C, to keep track of timeline
    C_list = []  ## list with traces for each trial
    evaluated_trials = []
    evaluated_session = []
    typical_size = []
    for i in range(len(df)):
        row = df.iloc[i]
        component_evaluation_hdf5_file_path = eval(
            row['component_evaluation_output'])['main']
        corr_path = eval(
            row['source_extraction_output'])['meta']['corr']['main']
        cnm = load_CNMF(component_evaluation_hdf5_file_path)
        cn_filter = np.load(db.get_file(corr_path))

        FOV_size.append(cn_filter.shape)
        #A_size.append(cnm.estimates.A.shape[0])
        A_number_components.append(cnm.estimates.idx_components.shape[0])
        A_list.append(cnm.estimates.A[:, cnm.estimates.idx_components])
        C_dims.append(cnm.estimates.C.shape)
        size = cnm.estimates.A[:, cnm.estimates.idx_components].sum(axis=0)
        for j in range(len(cnm.estimates.idx_components)):
            typical_size.append(size[0, j])
        if cnm.estimates.bl is None:
            C_list.append(cnm.estimates.C[cnm.estimates.idx_components, :])
        else:
            C_list.append(cnm.estimates.C[cnm.estimates.idx_components, :] -
                          cnm.estimates.bl[cnm.estimates.idx_components,
                                           np.newaxis])
        evaluated_trials.append(
            (df.iloc[i].name[2] - 1) * 2 +
            df.iloc[i].name[3])  ## number that goes from 0 to 42
        evaluated_session.append(df.iloc[i].name[1])

    ## add a size restriction on the neurons that will further be processed. This restriction boundary
    # decision is based in the histogram of typical neuronal sizes
    min_size = parameters['min_cell_size']
    max_size = parameters['max_cell_size']
    new_A_list = []
    new_C_list = []
    A_components = []
    C_dims_new = []
    new_evaluated_trials = []
    new_evaluated_session = []
    for i in range(len(A_list)):
        accepted_size = []
        size = A_list[i].sum(axis=0)
        for j in range(size.shape[1]):
            if size[0, j] > 10 and size[0, j] < 25:
                accepted_size.append(j)
        if len(accepted_size) > 1:
            new_A_list.append(A_list[i][:, accepted_size])
            new_C_list.append(C_list[i][accepted_size, :])
            A_components.append(A_number_components[i])
            C_dims_new.append(new_C_list[-1].shape)
            new_evaluated_trials.append(evaluated_trials[i])
            new_evaluated_session.append(evaluated_session[i])
    A_list = new_A_list
    C_list = new_C_list

    ## run CaImAn registration rutine that use the Hungarian matching algorithm in the contours list
    spatial_union, assignments, match = register_multisession(
        A=A_list,
        dims=FOV_size[0],
        thresh_cost=parameters['cost_threshold'],
        max_dist=parameters['max_dist'])

    ## open the timeline and create the new traces matrix C_matrix
    with open(alignment_timeline_file, 'rb') as f:
        timeline = pickle.load(f)
    total_time = timeline[len(timeline) - 1][1] + C_list[len(C_list) -
                                                         1].shape[1]
    timeline.append(['End', total_time])
    C_matrix = np.zeros((spatial_union.shape[1], total_time))

    new_assignments = np.zeros((spatial_union.shape[1], len(timeline)))
    for i in range(spatial_union.shape[1]):
        for j in range(assignments.shape[1]):
            trial = new_evaluated_trials[j]
            if math.isnan(assignments[i, j]) == False:
                new_assignments[i][trial] = assignments[i, j] + 1

    unique_session = []
    for x in evaluated_session:
        if x not in unique_session:
            unique_session.append(x)
    session_vector = np.arange(0, len(unique_session))
    final_evaluated_session = []
    for i in range(assignments.shape[1]):
        for j in range(len(unique_session)):
            if new_evaluated_session[i] == unique_session[j]:
                final_evaluated_session.append(session_vector[j])

    for i in range(spatial_union.shape[1]):
        for j in range(assignments.shape[1]):
            trial = (final_evaluated_session[j] + 1) * new_evaluated_trials[j]
            print(trial)
            if math.isnan(assignments[i, j]) == False:
                C_matrix[i][timeline[trial][1]:timeline[trial][1] +
                            C_dims_new[j][1]] = (
                                C_list[j])[int(assignments[i, j]), :]

    cnm_registration = estimates(A=spatial_union, C=C_matrix)
    with open(output_file_path, 'wb') as output_file:
        pickle.dump(cnm_registration, output_file, pickle.HIGHEST_PROTOCOL)

    return
Esempio n. 11
0
        ## add a size restriction on the neurons that will further be proceced. This restriction boudary
        # decision is based in the histogram of typical neuronal sizes
        new_A_list = []
        new_C_list = []
        for i in range(len(A_list)):
            accepted_size = []
            size = A_list[i].sum(axis=0)
            for j in range(size.shape[1]):
                if size[0, j] > 15 and size[0, j] < 25:
                    accepted_size.append(j)
            new_A_list.append(A_list[i][:, accepted_size])
            new_C_list.append(C_list[i][accepted_size, :])
        A_list = new_A_list
        C_list = new_C_list
        spatial_union, assignments, match = register_multisession(
            A=A_list, dims=FOV_size[0], thresh_cost=0.9, max_dist=15)

        A.append(spatial_union)
        C.append(C_list)
        registration.append(assignments)

    time = 0
    timeline = [0]
    for i in range(42):
        time = time + C[0][i].shape[1]
        timeline.append(timeline[i] + C[0][i].shape[1])

    nneurons = 0
    for i in range(len(C)):
        nneurons = nneurons + A[i].shape[1]
A1 = csc_matrix(A1 / A1.sum(0))
A2 = csc_matrix(A2 / A2.sum(0))
A3 = csc_matrix(A3 / A3.sum(0))

#%% match consecutive pairs

from caiman.base.rois import register_ROIs, register_multisession
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np


#%% match using multisession function
dims = 512, 512

A_union, assignments, matchings = register_multisession([A1, A2, A3], dims, Cns)
#%%
triplets = assignments[np.where(np.isnan(assignments).sum(1) == 0)].astype(int)
    # extract triplets
    
matches_13 = assignments[np.intersect1d(
        np.where(np.isnan(assignments).sum(1) == 1), 
        np.where(np.isnan(assignments[:,1])))].astype(int)
    # example on how to extract components that are active on only days 1 and 3

matches_12 = assignments[np.intersect1d(
        np.where(np.isnan(assignments).sum(1) == 1), 
        np.where(np.isnan(assignments[:,2])))].astype(int)    

matches_23 = assignments[np.intersect1d(
        np.where(np.isnan(assignments).sum(1) == 1), 
Esempio n. 13
0
#%% normalize matrices
A1 = csc_matrix(A1 / A1.sum(0))
A2 = csc_matrix(A2 / A2.sum(0))
A3 = csc_matrix(A3 / A3.sum(0))

#%% match consecutive pairs

from caiman.base.rois import register_ROIs, register_multisession
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np

#%% match using multisession function
dims = 512, 512

A_union, assignments, matchings = register_multisession([A1, A2, A3], dims,
                                                        Cns)
#%%
triplets = assignments[np.where(np.isnan(assignments).sum(1) == 0)].astype(int)
# extract triplets

matches_13 = assignments[np.intersect1d(
    np.where(np.isnan(assignments).sum(1) == 1),
    np.where(np.isnan(assignments[:, 1])))].astype(int)
# example on how to extract components that are active on only days 1 and 3

matches_12 = assignments[np.intersect1d(
    np.where(np.isnan(assignments).sum(1) == 1),
    np.where(np.isnan(assignments[:, 2])))].astype(int)

matches_23 = assignments[np.intersect1d(
    np.where(np.isnan(assignments).sum(1) == 1),
Esempio n. 14
0
        rigid_template = np.load(rigid_template_fpath)
        #templates.append(rigid_template)
        # Create a template using spatial footprints of the cells
        # Apply a threshold masks on spatial images
        A1 = np.stack(
            [a * (a > max_thr * a.max()) for a in spatial[-1].toarray().T]).T
        # Calculate mean spatial footprint over all cells
        footprint_template = A1.mean(axis=1).reshape(
            cnm_obj.dims[::-1]).transpose()
        templates.append(footprint_template)
        dims = cnm_obj.dims

spatial_union, assignments, mappings = register_multisession(
    A=spatial,
    dims=dims,
    templates=templates,
    thresh_cost=thresh_cost,
    max_dist=max_dist,
    max_thr=max_thr)

# Merge traces
# traces = np.zeros(assignments.shape, dtype=np.ndarray)
# for i in range(traces.shape[0]):
#     for j in range(traces.shape[1]):
#         if np.isnan(assignments[i,j]):
#             traces[i, j] = [np.nan] * cnm_list[j].estimates.C.shape[1]
#         else:
#             traces[i,j] = cnm_list[j].estimates.C[int(assignments[i,j])]


def my_roi_image(A1, A2, dims, matched_ROIs1, matched_ROIs2, non_matched1,
Esempio n. 15
0
    target_mouse = 'DG_200701_a'
    # define the search query
    search_query = 'slug:' + target_mouse
    # # define the origin model
    # ori_type = 'preprocessing'
    # # get a dictionary with the search terms
    # dict_path = fd.parse_search_string(search_query)

    path_info = bd.query_database('video_experiment', search_query)

    # # get the info and paths
    # path_info, paths_all, parsed_query, date_list, animal_list = \
    #     fd.fetch_preprocessing(search_query + ', =analysis_type:' + ori_type)
    # # get the raw output_path
    # dict_path['analysis_type'] = analysis_type
    raw_path = os.path.join(paths.analysis_path,
                            'cellMatch_' + search_query + '.hdf5')

    # paths_all = os.path.join(paths.analysis_path, '')

# load the data for the matching
# for all the files
for files in path_info:
    current_file = cnmf.online_cnmf.load_OnlineCNMF(files['fluo_path'])

# run the matching software
spatial_union, assignments, matchings = register_multisession(
    A=footprint_list, dims=size_list[0], templates=template_list)
# save the matching results
# create the appropriate bondjango entry