示例#1
0
def plot_contours_evaluated(row = None, session_wise = False):
    '''
    Plot contours for all cells, selected cells and non selected cells, and saves it in
    figure_path = '/data/interim/component_evaluation/trial_wise/meta/figures/contours/'
    :param row: one analysis state row
    '''
    index = row.name

    corr_min = round(eval(row['source_extraction_parameters'])['min_corr'],1)
    pnr_min = round(eval(row['source_extraction_parameters'])['min_pnr'],1)
    r_min = eval(row['component_evaluation_parameters'])['rval_thr']
    snf_min = eval(row['component_evaluation_parameters'])['min_SNR']

    output_source_extraction = eval(row.loc['source_extraction_output'])
    corr_path = output_source_extraction['meta']['corr']['main']
    cn_filter = np.load(db.get_file(corr_path))

    output_component_evaluation =  eval(row.loc['component_evaluation_output'])
    cnm_file_path = output_component_evaluation['main']
    cnm = load_CNMF(db.get_file(cnm_file_path))
    figure, axes = plt.subplots(1, 3)
    axes[0].imshow(cn_filter)
    axes[1].imshow(cn_filter)
    axes[2].imshow(cn_filter)

    coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
    for c in coordinates:
        v = c['coordinates']
        c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                     np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
        axes[0].plot(*v.T, c='w')
    axes[0].set_title('All components')
    axes[0].set_ylabel('Corr=' + f'{corr_min}' + ', PNR = ' + f'{pnr_min}' + ', PCC = ' + f'{r_min}' + ', SNR =' + f'{snf_min}')

    idx = cnm.estimates.idx_components
    coordinates = cm.utils.visualization.get_contours(cnm.estimates.A[:,idx], np.shape(cn_filter), 0.2, 'max')

    for c in coordinates:
        v = c['coordinates']
        c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                     np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
        axes[1].plot(*v.T, c='b')
    axes[1].set_title('Accepted components')

    idx_b = cnm.estimates.idx_components_bad
    coordinates_b = cm.utils.visualization.get_contours(cnm.estimates.A[:,idx_b], np.shape(cn_filter), 0.2, 'max')

    for c in coordinates_b:
        v = c['coordinates']
        c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                     np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
        axes[2].plot(*v.T, c='r')
    axes[2].set_title('Rejected components')

    figure_path = '/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/component_evaluation/trial_wise/meta/figures/contours/'
    if session_wise:
        figure_path = '/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/component_evaluation/session_wise/meta/figures/contours/'
    figure_name = figure_path + db.create_file_name(5,index) + '.png'
    figure.savefig(figure_name)
    return figure
示例#2
0
def plot_crispness_for_parameters(selected_rows = None):
    '''
    This function plots crispness for all the selected rows motion correction states. The idea is to compare crispness results
    :param selected_rows: analysis states for which crispness is required to be ploted
    :return: figure that is also saved
    '''
    crispness_mean_original,crispness_corr_original, crispness_mean, crispness_corr = metrics.compare_crispness(selected_rows)
    total_states_number = len(selected_rows)

    fig, axes = plt.subplots(1,2)
    axes[0].set_title('Summary image = Mean')
    axes[0].plot(np.arange(1,total_states_number,1),crispness_mean_original)
    axes[0].plot(np.arange(1,total_states_number,1),crispness_mean)
    axes[0].legend(('Original', 'Motion_corrected'))
    axes[0].set_ylabel('Crispness')
    #axes[0].set_xlabel('#')

    axes[1].set_title('Summary image = Corr')
    axes[1].plot(np.arange(1,total_states_number,1),crispness_corr_original)
    axes[1].plot(np.arange(1,total_states_number,1),crispness_corr)
    axes[1].legend(('Original', 'Motion_corrected'))
    axes[1].set_ylabel('Crispness')
    #axes[0].set_xlabel('#')

    # Get output file paths
    index = selected_rows.iloc[0].name
    data_dir = 'data/interim/motion_correction/'
    step_index = db.get_step_index('motion_correction')
    file_name = db.create_file_name(step_index, index)
    output_meta_crispness = data_dir + f'meta/figures/crispness/{file_name}.png'

    fig.savefig(output_meta_crispness)
    return fig
示例#3
0
def plot_source_extraction_result(mouse_row_new):

    '''
    Generates and saves a contour plot and a trace plot for the specific mouse_row
    '''
    corr_min = round(eval(mouse_row_new['source_extraction_parameters'])['min_corr'], 1)
    pnr_min = round(eval(mouse_row_new['source_extraction_parameters'])['min_pnr'], 1)

    output_source_extraction = eval(mouse_row_new.loc['source_extraction_output'])
    corr_path = output_source_extraction['meta']['corr']['main']
    cn_filter = np.load(db.get_file(corr_path))

    cnm_file_path = output_source_extraction['main']
    cnm = load_CNMF(db.get_file(cnm_file_path))

    figure, axes = plt.subplots(1)
    axes.imshow(cn_filter)
    coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
    for c in coordinates:
        v = c['coordinates']
        c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                     np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
        axes.plot(*v.T, c='w')
    axes.set_title('min_corr = ' + f'{corr_min}')
    axes.set_ylabel('min_pnr = ' + f'{pnr_min}')

    fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/contours/'
    file_name = db.create_file_name(3, mouse_row_new.name)
    figure.savefig(fig_dir + file_name + '.png')
    ## up to here

    fig, ax = plt.subplots(1)
    C = cnm.estimates.C
    C[0] += C[0].min()
    for i in range(1, len(C)):
        C[i] += C[i].min() + C[:i].max()
        ax.plot(C[i])
    ax.set_xlabel('t [frames]')
    ax.set_yticks([])
    ax.set_ylabel('activity')
    fig.set_size_inches([10., .3 * len(C)])

    fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/traces/'
    fig_name = fig_dir + db.create_file_name(3, mouse_row_new.name) + '.png'
    fig.savefig(fig_name)

    return
示例#4
0
def plot_traces_multiple_evaluated(row = None, session_wise = False):
    '''
    Plots different versions of contour images that change the inicialization parameters for source extraccion.
    The idea is to see the impact of different seed selection in the final source extraction result.
    :param row: one analysis state row
    :return: figure
    '''

    corr_min = round(eval(row['source_extraction_parameters'])['min_corr'],1)
    pnr_min = round(eval(row['source_extraction_parameters'])['min_pnr'],1)
    r_min = eval(row['component_evaluation_parameters'])['rval_thr']
    snf_min = eval(row['component_evaluation_parameters'])['min_SNR']

    output_source_extraction = eval(row.loc['source_extraction_output'])
    corr_path = output_source_extraction['meta']['corr']['main']
    cn_filter = np.load(db.get_file(corr_path))
    cnm_file_path = output_source_extraction['main']
    cnm = load_CNMF(db.get_file(cnm_file_path))
    C = cnm.estimates.C

    output_component_evaluation =  eval(row.loc['component_evaluation_output'])
    cnm_file_path = output_component_evaluation['main']
    cnm_eval = load_CNMF(db.get_file(cnm_file_path))
    idx = cnm_eval.estimates.idx_components
    idx_b = cnm_eval.estimates.idx_components_bad

    fig, ax = plt.subplots(1)
    C[0] += C[0].min()
    for i in range(1, len(C)):
        C[i] += C[i].min() + C[:i].max()
        if i in idx_b:
            color = 'red'
        else:
            color = 'blue'
        ax.plot(C[i],color = color)
    ax.set_xlabel('t [frames]')
    ax.set_yticks([])
    ax.set_ylabel('activity')
    ax.set_title('Corr=' + f'{corr_min}' + ', PNR = ' + f'{pnr_min}' + ', PCC = ' + f'{r_min}' + ', SNR =' + f'{snf_min}')

    fig.set_size_inches([10., .3 * len(C)])

    fig_dir = 'data/interim/component_evaluation/trial_wise/meta/figures/traces/'
    if session_wise:
        fig_dir = 'data/interim/component_evaluation/session_wise/meta/figures/traces/'
    fig_name = fig_dir + db.create_file_name(5,row.name) + '.png'
    fig.savefig(fig_name)

    return
示例#5
0
def plot_source_extraction_result_specific_cell(mouse_row_new, cell_number):

    '''
    (Still need to be finished) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    THERE IS AN ERROR IN THE
    In the first plot shows correlation image and contour of the selected neurons.
    In the second plot shows the traces for the selected neurons.
    :param mouse_row_new: data base row
    :param cell_number: array with the cells that are selected to be ploted
    :return: None
    '''
    corr_min = round(eval(mouse_row_new['source_extraction_parameters'])['min_corr'], 1)
    pnr_min = round(eval(mouse_row_new['source_extraction_parameters'])['min_pnr'], 1)

    output_source_extraction = eval(mouse_row_new.loc['source_extraction_output'])
    corr_path = output_source_extraction['meta']['corr']['main']
    cn_filter = np.load(db.get_file(corr_path))

    cnm_file_path = output_source_extraction['main']
    cnm = load_CNMF(db.get_file(cnm_file_path))

    f, (a0, a1) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [3, 1]})
    a0.imshow(cn_filter)
    coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
    for i in cell_number:
        v = coordinates[i]['coordinates']
        coordinates[i]['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                     np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
        a0.plot(*v.T, c='w')
    a0.set_title('Contour Plot')

    fig, ax = plt.subplots(1)
    C = cnm.estimates.C
    C[0] += C[0].min()
    for i in range(cell_number):
        C[i] += C[i].min() + C[:i].max()
        a1.plot(C[i])
    a1.set_xlabel('t [frames]')
    a1.set_yticks([])
    a1.set_title('Calcium Traces')
    fig.set_size_inches([10., .3 * len(C)])

    fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/'
    fig_name = fig_dir + db.create_file_name(3, mouse_row_new.name) + '_example.png'
    f.savefig(fig_name)

    return
示例#6
0
def plot_multiple_contours_session_wise(selected_rows, version = None , corr_array = None, pnr_array = None):
    '''
    Plots different versions of contour images that change the initialization parameters for source extraction.
    The idea is to see the impact of different seed selection in the final source extraction result.
    :param selected_rows: all analysis state selected
    :param version: array containing the version numbers of source extraction that will be plotted
    :param corr_array: array of the same length of version and pnr_array containing the min_corr values for those versions
    :param pnr_array: array of the same length of version and corr_array containing the min_pnr values for those versions
    :return: figure
    '''

    states_df = db.open_analysis_states_database()

    figure, axes = plt.subplots(len(corr_array), len(pnr_array), figsize=(15, 15))

    color = ['w','b','r','m','c']
    for row in range(len(selected_rows)):
        mouse_row = selected_rows.iloc[row]
        index = mouse_row.name
        output = eval(mouse_row.loc['source_extraction_output'])
        corr_path = output['meta']['corr']['main']
        cn_filter = np.load(db.get_file(corr_path))
        for ii in range(corr_array.shape[0]):
            for jj in range(pnr_array.shape[0]):
                axes[ii, jj].imshow(cn_filter)
                new_row = db.select(states_df, 'component_evaluation', mouse=index[0], session=index[1],
                                    trial=index[2], is_rest=index[3], cropping_v=index[5], motion_correction_v=index[6],
                                    source_extraction_v=version[ii * len(pnr_array) + jj])
                new_row = new_row.iloc[0]
                output = eval(new_row.loc['source_extraction_output'])
                cnm_file_path = output['main']
                cnm = load_CNMF(db.get_file(cnm_file_path))
                coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
                for c in coordinates:
                    v = c['coordinates']
                    c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                                 np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
                    axes[ii, jj].plot(*v.T, c = color[row])
                axes[ii, jj].set_title('min_corr = ' + f'{round(corr_array[ii],2)}')
                axes[ii, jj].set_ylabel('min_pnr = ' + f'{round(pnr_array[jj],2)}')


    fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/contours/'
    fig_name = fig_dir + db.create_file_name(3, new_row.name)+'_corr_min' + f'{round(corr_array[0],1)}'+ '_pnr_min'+f'{round(pnr_array[0],1)}' + '_all.png'
    figure.savefig(fig_name)

    return figure
示例#7
0
def plot_multiple_contours(rows, version = None , corr_array = None, pnr_array = None,session_wise = False):
    '''
    Plots different versions of contour images that change the initialization parameters for source extraction.
    The idea is to see the impact of different seed selection in the final source extraction result.
    :param row: one analysis state row
    :param version: array containing the version numbers of source extraction that will be plotted
    :param corr_array: array of the same length of version and pnr_array containing the min_corr values for those versions
    :param pnr_array: array of the same length of version and corr_array containing the min_pnr values for those versions
    :return: figure
    '''


    figure, axes = plt.subplots(len(corr_array), len(pnr_array), figsize=(15, 15))

    for ii in range(corr_array.shape[0]):
        for jj in range(pnr_array.shape[0]):
            version_number = ii *corr_array.shape[0] + jj + 1
            if version_number in version:
                new_row = rows.query('(source_extraction_v == ' + f'{version_number}' + ')')
                new_row = new_row.iloc[0]
                output = eval(new_row.loc['source_extraction_output'])
                cnm_file_path = output['main']
                cnm = load_CNMF(db.get_file(cnm_file_path))
                corr_path = output['meta']['corr']['main']
                cn_filter = np.load(db.get_file(corr_path))
                axes[ii, jj].imshow(cn_filter)
                coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
                for c in coordinates:
                    v = c['coordinates']
                    c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                                 np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
                    axes[ii, jj].plot(*v.T, c='w')
                axes[ii, jj].set_title('min_corr = ' + f'{round(corr_array[ii],2)}')
                axes[ii, jj].set_ylabel('min_pnr = ' + f'{round(pnr_array[jj],2)}')

    fig_dir = 'data/interim/source_extraction/trial_wise/meta/figures/contours/'
    if session_wise:
        fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/contours/'
    fig_name = fig_dir + db.create_file_name(3, new_row.name)+'_corr_min' + f'{round(corr_array[0],1)}'+ '_pnr_min'+f'{round(pnr_array[0],1)}' + '_.png'
    figure.savefig(fig_name)

    return figure
示例#8
0
def plot_session_contours(selected_rows, version = None , corr_array = None, pnr_array = None):
    '''
    Plots different versions of contour images that change the initialization parameters for source extraction.
    The idea is to see the impact of different seed selection in the final source extraction result.
    :param selected_rows: rows corresponding to different trials
    :param version: array containing the version numbers of source extraction that will be plotted
    :param corr_array: array of the same length of version and pnr_array containing the min_corr values for those versions
    :param pnr_array: array of the same length of version and corr_array containing the min_pnr values for those versions
    :return: (saves multiple figures)
    '''

    for ii in range(corr_array.shape[0]):
        for jj in range(pnr_array.shape[0]):
            figure, axes = plt.subplots(len(selected_rows) / 5, 5, figsize=(50, 10*len(selected_rows) / 5))
            version_rows = selected_rows.query('(source_extraction_v == ' + f'{ii * len(corr_array.shape[0] + jj)}' + ')')
            for day in range(len(selected_rows)/5):
                for trial in range(5):
                    new_row = version_rows.iloc[day*5+trial]
                    output = eval(new_row.loc['source_extraction_output'])
                    cnm_file_path = output['main']
                    cnm = load_CNMF(db.get_file(cnm_file_path))
                    corr_path = output['meta']['corr']['main']
                    cn_filter = np.load(db.get_file(corr_path))
                    #axes[i].imshow(np.clip(cn_filter, min_corr, max_corr), cmap='viridis')
                    axes[day,trial].imshow(cn_filter)
                    coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
                    for c in coordinates:
                        v = c['coordinates']
                        c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                                     np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
                        axes[day,trial].plot(*v.T, c='w')
                    axes[day,trial].set_title('Trial = ' + f'{i+1}',fontsize=30)
                    axes[day,trial].set_xlabel('#cells = ' + f'{cnm.estimates.A.shape[1]}',fontsize=30)

            figure.suptitle('min_corr = ' + f'{round(corr_array[ii],2)}' + 'min_pnr = ' + f'{round(pnr_array[jj],2)}', fontsize=50)

            fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/contours/'
            fig_name = fig_dir + db.create_file_name(3, new_row.name)+'_version_' + f'{version[ii*len(pnr_array)+jj]}'+'.png'
            figure.savefig(fig_name)

    return
示例#9
0
def plot_traces_multiple(rows, version = None , corr_array = None, pnr_array = None, session_wise = False):
    '''
    Plots different versions of contour images that change the inicialization parameters for source extraccion.
    The idea is to see the impact of different seed selection in the final source extraction result.
    :param row: one analysis state row
    :param version: array containing the version numbers of source extraction that will be ploted
    :param corr_array: array of the same length of version and pnr_array containing the min_corr values for those versions
    :param pnr_array: array of the same length of version and corr_array containing the min_pnr values for those versions
    :param: session_wise bool that indicates where the figure is save
    :return: None
    '''

    for ii in range(corr_array.shape[0]):
        for jj in range(pnr_array.shape[0]):
            fig, ax = plt.subplots(1)
            new_row = rows.query('(source_extraction_v == ' + f'{ii *corr_array.shape[0] + jj + 1}' + ')')
            new_row = new_row.iloc[0]

            output = eval(new_row.loc['source_extraction_output'])
            cnm_file_path = output['main']
            cnm = load_CNMF(db.get_file(cnm_file_path))
            C = cnm.estimates.C
            idx_components = cnm.estimates.idx_components
            C[0] += C[0].min()
            for i in range(1, len(C)):
                C[i] += C[i].min() + C[:i].max()
                ax.plot(C[i])
            ax.set_xlabel('t [frames]')
            ax.set_yticks([])
            ax.set_ylabel('activity')
            fig.set_size_inches([10., .3 * len(C)])


            fig_dir = 'data/interim/source_extraction/trial_wise/meta/figures/traces/'
            if session_wise:
                fig_dir = 'data/interim/source_extraction/session_wise/meta/figures/traces/'
            fig_name = fig_dir + db.create_file_name(3,new_row.name) + '_corr_min' + f'{round(corr_array[ii], 1)}' + '_pnr_min' + f'{round(pnr_array[jj], 1)}' + '_.png'
            fig.savefig(fig_name)

    return
def get_corr_pnr_path(index, gSig_abs=None):
    fname = db.create_file_name(2, index)
    os.chdir(os.environ['PROJECT_DIR'])
    corr_dir = 'data/interim/source_extraction/trial_wise/meta/corr'
    corr_path = None
    for path in os.listdir(corr_dir):
        if fname in path:
            if gSig_abs == None:
                corr_path = os.path.join(corr_dir, path)
            else:
                if path[-5] == str(gSig_abs):
                    corr_path = os.path.join(corr_dir, path)
    pnr_dir = 'data/interim/source_extraction/trial_wise/meta/pnr'
    pnr_path = None
    for path in os.listdir(pnr_dir):
        if fname in path:
            if gSig_abs == None:
                pnr_path = os.path.join(pnr_dir, path)
            else:
                if path[-5] == str(gSig_abs):
                    pnr_path = os.path.join(pnr_dir, path)

    return corr_path, pnr_path
示例#11
0
def get_fig_gSig_filt_vals(row, gSig_filt_vals):
    '''
    Plot original cropped frame and several versions of spatial filtering for comparison
    :param row: analisis state row for which the filtering is computed
    :param gSig_filt_vals: array containing size of spatial filters that will be applyed
    :return: figure
    '''
    output = row['cropping_output']
    cropped_file = eval(output)['main']
    m = cm.load(cropped_file)
    temp = cm.motion_correction.bin_median(m)
    N = len(gSig_filt_vals)
    fig, axes = plt.subplots(int(math.ceil((N + 1) / 2)), 2)
    axes[0, 0].imshow(temp, cmap='gray')
    axes[0, 0].set_title('unfiltered')
    axes[0, 0].axis('off')
    for i in range(0, N):
        gSig_filt = gSig_filt_vals[i]
        m_filt = [high_pass_filter_space(m_, (gSig_filt, gSig_filt)) for m_ in m]
        temp_filt = cm.motion_correction.bin_median(m_filt)
        axes.flatten()[i + 1].imshow(temp_filt, cmap='gray')
        axes.flatten()[i + 1].set_title(f'gSig_filt = {gSig_filt}')
        axes.flatten()[i + 1].axis('off')
    if N + 1 != axes.size:
        for i in range(N + 1, axes.size):
            axes.flatten()[i].axis('off')

    # Get output file paths
    index = row.name
    data_dir = 'data/interim/motion_correction/'
    step_index = db.get_step_index('motion_correction')
    file_name = db.create_file_name(step_index, index)
    output_meta_gSig_filt = data_dir + f'meta/figures/frame_gSig_filt/{file_name}.png'

    fig.savefig(output_meta_gSig_filt)

    return fig
示例#12
0
def plot_temporal_evolution(row,session_wise = False):
    '''
    After decoding this plots the time evolution of some pixel values in the ROI, the histogram if pixel values and
    the ROI with the mark of the position for the randomly selected pixels
    If non specified it uses the trial video, if not it uses the aligned version of the videos.
    If alignement version == 2, then it uses the equalized version

    '''
    if session_wise:
        output = row['alignment_output']
    else:
        output = row['decoding_output']

    decoded_file = eval(output)['main']
    if row.name[7] == 2:
        decoded_file = eval(output)['equalization']['main']
    movie_original = cm.load(decoded_file)

    figure = plt.figure(constrained_layout=True)
    gs = figure.add_gridspec(5, 6)

    figure_ax1 = figure.add_subplot(gs[0:2, 0:3])
    figure_ax1.set_title('ROI: ' + f"mouse_{row.name[0]}", fontsize = 15)
    figure_ax1.set_yticks([])
    figure_ax1.set_xticks([])

    figure_ax2 = figure.add_subplot(gs[2:5, 0:3])
    figure_ax2.set_xlabel('Time [s]', fontsize = 15)
    figure_ax2.set_ylabel('Pixel value', fontsize = 15)
    figure_ax2.set_title('Temporal Evolution', fontsize = 15)
    figure_ax2.set_ylim((400,2000))


    figure_ax1.imshow(movie_original[0,:,:], cmap = 'gray')
    color = ['b', 'r' , 'g', 'c', 'm']
    for i in range(5):
        x = randrange(movie_original.shape[1]-5)+5
        y = randrange(movie_original.shape[2]-5)+5
        [x_, _x, y_, _y] = [x-5,x+5,y-5,y+5]
        rect = Rectangle((y_, x_), _y - y_, _x - x_, fill=False, color=color[i], linestyle='-', linewidth=2)
        figure_ax1.add_patch(rect)
        figure_ax2.plot(np.arange(0,movie_original.shape[0],)/10, movie_original[:,x,y], color = color[i])

        figure_ax_i = figure.add_subplot(gs[i, 4:])
        figure_ax_i.hist(movie_original[:,x,y],50, color = color[i])
        figure_ax_i.set_xlim((400,1100))
        figure_ax_i.set_ylabel('#')
        figure_ax_i.set_xlabel('Pixel value')

    path = os.environ['DATA_DIR']+ '/data/interim/decoding/meta/'
    name = db.create_file_name(1, row.name)
    if session_wise:
        path = os.environ['DATA_DIR'] + '/data/interim/alignment/meta/'
        name = db.create_file_name(4,row.name)
    if row.name[7] == 2:
        name = db.create_file_name(4, row.name)
    figure.savefig(path + name + '.png')

    figure2, axes = plt.subplots(1)
    mean_movie = np.mean(movie_original,axis = (1,2) )
    axes.plot(np.arange(0, movie_original.shape[0], ) / 10, mean_movie, color='b')
    axes.set_xlabel('Time [s]', fontsize = 15)
    axes.set_ylabel('Mean Pixel Value', fontsize = 15)
    axes.set_title('Temporal Evolution', fontsize = 15)
    axes.set_ylim((400,2000))
    if row.name[7] == 2:
        name = db.create_file_name(4, row.name)
    figure2.savefig(path + name + '_mean.png')

    return
示例#13
0
distance_list = []
for i in range(len(A_center_of_mass_list_x)):
    for j in range(i + 1, len(A_center_of_mass_list_x)):
        x1 = A_center_of_mass_list_x[i]
        x2 = A_center_of_mass_list_x[j]
        y1 = A_center_of_mass_list_y[i]
        y2 = A_center_of_mass_list_y[j]
        distance = math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
        distance_list.append(distance)

figure, axes = plt.subplots(1)
axes.hist(distance_list, bins=50)
axes.set_xlabel('Distance [pixels]', fontsize=12)
axes.set_ylabel('Number of pairs', fontsize=12)
figure.suptitle('Distance between center of mass', fontsize=20)
figure_name = db.create_file_name(5, row.name)
figure.savefig(figure_path + figure_name + '.png')

figure1, axes = plt.subplots(1)
for i in range(1, 126):
    new_vector_x = []
    new_vector_y = []
    for j in range(len(A_center_of_mass_list_y)):
        if trial_belonging[j] == i:
            new_vector_x.append(A_center_of_mass_list_x[j])
            new_vector_y.append(A_center_of_mass_list_y[j])
    axes.scatter(new_vector_y, new_vector_x)

## this part computes the pcc between contours templates for the entire session

pcc_list = []
def run_alignmnet(selected_rows, parameters, dview):
    '''
    This is the main function for the alignment step. It applies methods
    from the CaImAn package used originally in motion correction
    to do alignment.

    Args:
        df: pd.DataFrame
            A dataframe containing the analysis states you want to have aligned.
        parameters: dict
            The alignment parameters.
        dview: object
            The dview object

    Returns:
        df: pd.DataFrame
            A dataframe containing the aligned analysis states.
    '''

    # Sort the dataframe correctly
    df = selected_rows.copy()
    df = df.sort_values(by=paths.multi_index_structure)

    # Determine the mouse and session of the dataset
    index = df.iloc[0].name
    mouse, session, *r = index
    # alignment_v = index[len(paths.data_structure) + step_index]
    alignment_v = len(df)
    alignment_index = (mouse, session, alignment_v)

    # Determine the output .mmap file name
    file_name = f'mouse_{mouse}_session_{session}_v{alignment_v}'
    output_mmap_file_path = os.environ['DATA_DIR'] + f'data/interim/alignment/main/{file_name}.mmap'

    try:
        df.reset_index()[['session','trial', 'is_rest']].set_index(['session','trial', 'is_rest'], verify_integrity=True)
    except ValueError:
        logging.error('You passed multiple of the same trial in the dataframe df')
        return df

    output = {
        'meta': {
            'analysis': {
                'analyst': os.environ['ANALYST'],
                'date': datetime.datetime.today().strftime("%m-%d-%Y"),
                'time': datetime.datetime.today().strftime("%H:%M:%S")
            },
            'duration': {}
        }
    }

    # Get necessary parameters
    motion_correction_parameters_list = []
    motion_correction_output_list = []
    input_mmap_file_list = []
    trial_index_list = []
    x_ = []
    _x = []
    y_ = []
    _y = []
    for idx, row in df.iterrows():
        motion_correction_parameters_list.append(eval(row.loc['motion_correction_parameters']))
        motion_correction_output = eval(row.loc['motion_correction_output'])
        motion_correction_output_list.append(motion_correction_output)
        input_mmap_file_list.append(motion_correction_output['main'])
        trial_index_list.append(db.get_trial_name(idx[2], idx[3]))
        [x1,x2,y1,y2] = motion_correction_output['meta']['cropping_points']
        x_.append(x1)
        _x.append(x2)
        y_.append(y1)
        _y.append(y2)

    new_x1 = max(x_)
    new_x2 = max(_x)
    new_y1 = max(y_)
    new_y2 = max(_y)
    m_list = []
    for i in range(len(input_mmap_file_list)):
        m = cm.load(input_mmap_file_list[i])
        motion_correction_output = eval(df.iloc[i].loc['motion_correction_output'])
        [x1,x2,y1,y2] = motion_correction_output['meta']['cropping_points']
        m = m.crop(new_x1 - x1, new_x2 - x2, new_y1 - y1, new_y2 - y2, 0, 0)
        m_list.append(m)

    # Concatenate them using the concat function
    m_concat = cm.concatenate(m_list, axis=0)
    data_dir = os.environ['DATA_DIR'] + 'data/interim/alignment/main/'
    file_name = db.create_file_name(step_index, index)
    fname= m_concat.save(data_dir + file_name + '.mmap', order='C')

    #meta_pkl_dict['pw_rigid']['cropping_points'] = [x_, _x, y_, _y]
    #output['meta']['cropping_points'] = [x_, _x, y_, _y]
    # Save the movie
    #fname_tot_els  = m_els.save(data_dir + 'main/' + file_name + '_els' + '.mmap',  order='C')
    #logging.info(f'{index} Cropped and saved rigid movie as {fname_tot_els}')

    # MOTION CORRECTING EACH INDIVIDUAL MOVIE WITH RESPECT TO A TEMPLATE MADE OF THE FIRST MOVIE
    logging.info(f'{alignment_index} Performing motion correction on all movies with respect to a template made of \
    the first movie.')
    t0 = datetime.datetime.today()

    # Create a template of the first movie
    template_index = trial_index_list.index(parameters['make_template_from_trial'])
    m0 = cm.load(input_mmap_file_list[template_index ])
    [x1, x2, y1, y2] = motion_correction_output_list[template_index]['meta']['cropping_points']
    m0 = m0.crop(new_x1 - x1, new_x2 - x2, new_y1 - y1, new_y2 - y2, 0, 0)
    m0_filt = cm.movie(
        np.array([high_pass_filter_space(m_, parameters['gSig_filt']) for m_ in m0]))
    template0 = cm.motion_correction.bin_median(
        m0_filt.motion_correct(5, 5, template=None)[0])  # may be improved in the future

    # Setting the parameters
    opts = params.CNMFParams(params_dict=parameters)

    # Create a motion correction object
    mc = MotionCorrect(fname, dview=dview, **opts.get_group('motion'))

    # Perform non-rigid motion correction
    mc.motion_correct(template=template0, save_movie=True)

    # Cropping borders
    x_ = math.ceil(abs(np.array(mc.shifts_rig)[:, 1].max()) if np.array(mc.shifts_rig)[:, 1].max() > 0 else 0)
    _x = math.ceil(abs(np.array(mc.shifts_rig)[:, 1].min()) if np.array(mc.shifts_rig)[:, 1].min() < 0 else 0)
    y_ = math.ceil(abs(np.array(mc.shifts_rig)[:, 0].max()) if np.array(mc.shifts_rig)[:, 0].max() > 0 else 0)
    _y = math.ceil(abs(np.array(mc.shifts_rig)[:, 0].min()) if np.array(mc.shifts_rig)[:, 0].min() < 0 else 0)

    # Load the motion corrected movie into memory
    movie= cm.load(mc.fname_tot_rig[0])
    # Crop all movies to those border pixels
    movie.crop(x_, _x, y_, _y, 0, 0)
    output['meta']['cropping_points'] = [x_, _x, y_, _y]

    #save motion corrected and cropped movie
    output_mmap_file_path_tot = movie.save(data_dir + file_name  + '.mmap', order='C')
    logging.info(f'{index} Cropped and saved rigid movie as {output_mmap_file_path_tot}')
    # Save the path in teh output dictionary
    output['main'] = output_mmap_file_path_tot
    # Remove the remaining non-cropped movie
    os.remove(mc.fname_tot_rig[0])

    # Create a timeline and store it
    timeline = [[trial_index_list[0], 0]]
    timepoints = [0]
    for i in range(1, len(m_list)):
        m = m_list[i]
        timeline.append([trial_index_list[i], timeline[i - 1][1] + m.shape[0]])
        timepoints.append(timepoints[i-1]+ m.shape[0])
        timeline_pkl_file_path = os.environ['DATA_DIR'] + f'data/interim/alignment/meta/timeline/{file_name}.pkl'
        with open(timeline_pkl_file_path,'wb') as f:
            pickle.dump(timeline,f)
    output['meta']['timeline'] = timeline_pkl_file_path
    timepoints.append(movie.shape[0])

    dt = int((datetime.datetime.today() - t0).seconds / 60)  # timedelta in minutes
    output['meta']['duration']['concatenation'] = dt
    logging.info(f'{alignment_index} Performed concatenation. dt = {dt} min.')

    for idx, row in df.iterrows():
        df.loc[idx, 'alignment_output'] = str(output)
        df.loc[idx, 'alignment_parameters'] = str(parameters)

    ## modify all motion correction file to the aligned version
    data_dir = os.environ['DATA_DIR'] + 'data/interim/motion_correction/main/'
    for i in range(len(input_mmap_file_list)):
        row = df.iloc[i].copy()
        motion_correction_output_list.append(motion_correction_output)
        aligned_movie = movie[timepoints[i]:timepoints[i+1]]
        file_name = db.create_file_name(2, selected_rows.iloc[i].name)
        motion_correction_output_aligned = aligned_movie.save(data_dir + file_name + '_els' + '.mmap',  order='C')
        new_output= {'main' : motion_correction_output_aligned }
        new_dict = eval(row['motion_correction_output'])
        new_dict.update(new_output)
        row['motion_correction_output'] = str(new_dict)
        df = db.append_to_or_merge_with_states_df(df, row)

    #    # Delete the motion corrected movies
    #    for fname in mc.fname_tot_rig:
    #        os.remove(fname)

    return df
示例#15
0
def run_source_extraction(row, parameters, dview, session_wise = False):
    '''
    This is the function for source extraction.
    Its goal is to take in a .mmap file,
    perform source extraction on it using cnmf-e and save the cnmf object as a .pkl file.
    Args:
        row: pd.DataFrame object
            The row corresponding to the analysis state to be source extracted. 
            
    Returns:
        row: pd.DataFrame object
            The row corresponding to the source extracted analysis state.   
    '''
    step_index = 5
    row_local = row.copy()
    row_local.loc['source_extraction_parameters'] = str(parameters)
    row_local = db.set_version_analysis('source_extraction',row_local,session_wise)
    index = row_local.name

    # Determine input path
    if parameters['session_wise']:
        input_mmap_file_path = eval(row_local.loc['alignment_output'])['main']
        if parameters['equalization']:
            input_mmap_file_path =eval(row_local['equalization_output'])['main']
    else: 
        input_mmap_file_path = eval(row_local.loc['motion_correction_output'])['main']
    if not os.path.isfile(input_mmap_file_path):
        logging.error('Input file does not exist. Cancelling.')
        return row_local
    
    # Determine output paths
    file_name = db.create_file_name(step_index, index)
    if parameters['session_wise']:
        data_dir = os.environ['DATA_DIR'] + 'data/interim/source_extraction/session_wise/'
    else:
        data_dir = os.environ['DATA_DIR'] + 'data/interim/source_extraction/trial_wise/'
    output_file_path = data_dir + f'main/{file_name}.hdf5'
   
        
    # Create a dictionary with parameters
    output = {
            'main': output_file_path,
            'meta':{
                'analysis' : {
                        'analyst' : os.environ['ANALYST'],
                        'date' : datetime.datetime.today().strftime("%m-%d-%Y"),
                        'time' : datetime.datetime.today().strftime("%H:%M:%S"),
                        },
                    'duration': {}
                    }
                }
    
    # Load memmory mappable input file
    if os.path.isfile(input_mmap_file_path):
        Yr, dims, T = cm.load_memmap(input_mmap_file_path)
#        logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
        images = Yr.T.reshape((T,) + dims, order='F')
    else:
        logging.warning(f'{index} .mmap file does not exist. Cancelling')
        return row_local
    
    # SOURCE EXTRACTION
    # Check if the summary images are already there
    corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(index, gSig_abs = parameters['gSig'][0])
    
    if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):  
        # Already computed summary images
        logging.info(f'{index} Already computed summary images')
        cn_filter = np.load(corr_npy_file_path)
        pnr = np.load(pnr_npy_file_path)
    else:
        # Compute summary images
        t0 = datetime.datetime.today()
        logging.info(f'{index} Computing summary images')
        cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig = parameters['gSig'][0], swap_dim=False)
        dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes
        output['meta']['duration']['summary_images'] = dt 
        logging.info(f'{index} Computed summary images. dt = {dt} min')
        # Saving summary images as npy files
        gSig = parameters['gSig'][0]
        corr_npy_file_path = data_dir + f'/meta/corr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
        pnr_npy_file_path = data_dir + f'/meta/pnr/{db.create_file_name(3, index)}_gSig_{gSig}.npy'
        with open(corr_npy_file_path, 'wb') as f:
            np.save(f, cn_filter)
        with open(pnr_npy_file_path, 'wb') as f:
            np.save(f, pnr)
    
    # Store the paths in the meta dictionary 
    output['meta']['corr'] = {'main': corr_npy_file_path, 'meta': {}}
    output['meta']['pnr'] = {'main': pnr_npy_file_path, 'meta': {}}
    
    # Calculate min, mean, max value for cn_filter and pnr
    corr_min, corr_mean, corr_max = cn_filter.min(), cn_filter.mean(), cn_filter.max()
    output['meta']['corr']['meta'] = {'min': corr_min, 'mean': corr_mean, 'max': corr_max}
    pnr_min, pnr_mean, pnr_max = pnr.min(), pnr.mean(), pnr.max()
    output['meta']['pnr']['meta'] = {'min': pnr_min, 'mean': pnr_mean, 'max': pnr_max}
    
    # If min_corr and min_pnr are specified via a linear equation, calculate 
    # this value 
    if type(parameters['min_corr']) == list:
        min_corr = parameters['min_corr'][0]*corr_mean + parameters['min_corr'][1]
        parameters['min_corr'] = min_corr
        logging.info(f'{index} Automatically setting min_corr = {min_corr}')
    if type(parameters['min_pnr']) == list:
        min_pnr =  parameters['min_pnr'][0]*pnr_mean + parameters['min_pnr'][1]
        parameters['min_pnr'] = min_pnr
        logging.info(f'{index} Automatically setting min_pnr = {min_pnr}')

    # Set the parameters for caiman
    opts = params.CNMFParams(params_dict = parameters)   
    
    # SOURCE EXTRACTION 
    logging.info(f'{index} Performing source extraction')
    t0 = datetime.datetime.today()
    n_processes = psutil.cpu_count()
    logging.info(f'{index} n_processes: {n_processes}')
    cnm = cnmf.CNMF(n_processes = n_processes, dview = dview, params = opts)
    cnm.fit(images)
    cnm.estimates.dims = dims    
    
    # Store the number of neurons
    output['meta']['K'] = len(cnm.estimates.C)
    
    # Calculate the center of masses
    cnm.estimates.center = caiman.base.rois.com(cnm.estimates.A, images.shape[1], images.shape[2])
    
    # Save the cnmf object as a hdf5 file 
    logging.info(f'{index} Saving cnmf object')
    cnm.save(output_file_path)
    dt = int((datetime.datetime.today() - t0).seconds/60) # timedelta in minutes
    output['meta']['duration']['source_extraction'] = dt
    logging.info(f'{index} Source extraction finished. dt = {dt} min')
    
    # Write necessary variables in row and return
    row_local.loc['source_extraction_parameters'] = str(parameters)
    row_local.loc['source_extraction_output'] = str(output)
        
    return row_local
示例#16
0
            output_cropping = mouse_row['cropping_output']
            cropped_file = eval(output_cropping)['main']
            m = cm.load(cropped_file)
            axes[kk, 1].imshow(m[0, :, :], cmap='gray')

            output_source_extraction = eval(
                mouse_row['source_extraction_output'])
            cnm_file_path = output_source_extraction['main']
            cnm = load_CNMF(db.get_file(cnm_file_path))
            corr_path = output_source_extraction['meta']['corr']['main']
            cn_filter = np.load(db.get_file(corr_path))
            axes[kk, 2].imshow(cn_filter)
            coordinates = cm.utils.visualization.get_contours(
                cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
            for c in coordinates:
                v = c['coordinates']
                c['bbox'] = [
                    np.floor(np.nanmin(v[:, 1])),
                    np.ceil(np.nanmax(v[:, 1])),
                    np.floor(np.nanmin(v[:, 0])),
                    np.ceil(np.nanmax(v[:, 0]))
                ]
                axes[kk, 2].plot(*v.T, c='w', linewidth=3)

        fig_dir = '/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/cropping/meta/figures/cropping_inicialization/'
        fig_name = fig_dir + db.create_file_name(
            2, mouse_row.name
        ) + '_corr_' + f'{round(corr_limits[ii],1)}' + '_pnr_' + f'{round(pnr_limits[jj])}' + '.png'
        figure.savefig(fig_name)
示例#17
0
def run_cropper(row, parameters):
    '''
    This function takes in a decoded analysis state and crops it according to 
    specified cropping points.
    
    Args:
        index: tuple
            The index of the analysis state to be cropped. 
        row: pd.DataFrame object
            The row corresponding to the analysis state to be cropped. 
            
    Returns
        row: pd.DataFrame object
            The row corresponding to the cropped analysis state. 
    '''

    row_local = row.copy()
    index = row_local.name
    # Get the input tif file path
    input_tif_file_path = eval(row_local.loc['decoding_output'])['main']
    if index[4] == 2:
        input_tif_file_path = eval(
            row_local.loc['decoding_output'])['equalizing_output']['main']
    if not os.path.isfile(input_tif_file_path):
        db.get_expected_file_path('decoding', 'main/', index, '.tif')

    # Determine output .tif file path
    step_index = 1
    row_local.loc['cropping_parameters'] = str(parameters)
    row_local = db.set_version_analysis('cropping', row_local)
    index = row_local.name
    file_name = db.create_file_name(step_index, index)
    output_tif_file_path = os.environ[
        'DATA_DIR'] + f"data/interim/cropping/main/{file_name}.tif"

    # Create a dictionary with the output
    output = {
        'main': output_tif_file_path,
        'meta': {
            'analysis': {
                'analyst': os.environ['ANALYST'],
                'date': datetime.datetime.today().strftime("%m-%d-%Y"),
                'time': datetime.datetime.today().strftime("%H:%M:%S"),
            }
        }
    }

    # Spatial copping
    logging.info(f'{index} Loading movie')
    m = cm.load(input_tif_file_path)
    logging.info(f'{index} Loaded movie')
    [x_, _x, y_, _y] = parameters['cropping_points_spatial']
    if parameters['crop_spatial']:
        logging.info(f'{index} Performing spatial cropping')
        m = m[:, x_:_x, y_:_y]
        logging.info(f'{index} Spatial cropping finished')
    else:
        logging.info(f'{index} No spatial cropping')

    # Temporal cropping
    #if parameters['crop_temporal']:
    # m, timeline = do_temporal_cropping(m, parameters['cropping_points_temporal'])
    # The option below is to get a timeline which indicates on which
    # frames clips are cut out and how long those clips were.
    # I eventually decided this is not neccesary. The temporal cropping points are enough
    # to reconstruct this and are more easily saved (namely in the
    # master file list under 'cropping_parameters')


#        timeline_pkl_file_path = f'data/interim/cropping/meta/timeline/{file_name}.pkl'
#        output['meta']['timeline'] = timeline_pkl_file_path
#        with open(timeline_pkl_file_path,'wb') as f:
#            pickle.dump(timeline, f)

# Save the movie
    m.save(output_tif_file_path)
    # Write necessary variables to the trial index and row_local
    row_local.loc['cropping_parameters'] = str(parameters)
    row_local.loc['cropping_output'] = str(output)

    return row_local
def run_component_evaluation(row,
                             parameters,
                             set_version=None,
                             session_wise=False,
                             equalization=False):

    step_index = 6
    row_local = row.copy()
    row_local.loc['component_evaluation_parameters'] = str(parameters)
    row_local = db.set_version_analysis('component_evaluation', row_local,
                                        session_wise)
    index = row_local.name

    motion_correction_output = eval(row_local.loc['motion_correction_output'])
    if session_wise:
        motion_correction_output = eval(row_local.loc['alignment_output'])
    if equalization:
        motion_correction_output = eval(row_local['equalization_output'])

    source_extraction_output = eval(row_local.loc['source_extraction_output'])
    source_extraction_parameters = eval(
        row_local.loc['source_extraction_parameters'])
    input_hdf5_file_path = source_extraction_output['main']
    input_mmap_file_path = motion_correction_output['main']
    data_dir = os.environ[
        'DATA_DIR'] + 'data/interim/component_evaluation/session_wise/' if source_extraction_parameters[
            'session_wise'] else os.environ[
                'DATA_DIR'] + 'data/interim/component_evaluation/trial_wise/'
    file_name = db.create_file_name(step_index, index)
    output_file_path = data_dir + f'main/{file_name}.hdf5'

    # Create a dictionary with parameters
    output = {
        'main': output_file_path,
        'meta': {
            'analysis': {
                'analyst': os.environ['ANALYST'],
                'date': datetime.datetime.today().strftime("%m-%d-%Y"),
                'time': datetime.datetime.today().strftime("%H:%M:%S"),
            },
            'duration': {}
        }
    }

    # Load CNMF object
    cnm = load_CNMF(input_hdf5_file_path)

    # Load the original movie
    Yr, dims, T = cm.load_memmap(input_mmap_file_path)
    images = Yr.T.reshape((T, ) + dims, order='F')

    # Set the parmeters
    cnm.params.set('quality', parameters)

    # Stop the cluster if one exists
    n_processes = psutil.cpu_count()
    try:
        cm.cluster.stop_server()
    except:
        pass

    # Start a new cluster
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local',
        n_processes=
        n_processes,  # number of process to use, if you go out of memory try to reduce this one
        single_thread=False)
    # Evaluate components
    cnm.estimates.evaluate_components(images, cnm.params, dview=dview)

    logging.debug('Number of total components: ', len(cnm.estimates.C))
    logging.debug('Number of accepted components: ',
                  len(cnm.estimates.idx_components))

    # Stop the cluster
    dview.terminate()

    # Save CNMF object
    cnm.save(output_file_path)

    # Write necessary variables to the trial index and row
    row_local.loc['component_evaluation_parameters'] = str(parameters)
    row_local.loc['component_evaluation_output'] = str(output)

    return row_local
示例#19
0
def plot_multiple_contours_session_wise_evaluated(selected_rows):

    ## IN DEVELOPMENT!!!!!!!
    '''
    Plots different versions of contour images that change the initialization parameters for source extraction.
    The idea is to see the impact of different seed selection in the final source extraction result.
    :param selected_rows: all analysis state selected
    :return: figure
    '''

    figure, axes = plt.subplots(3, 5, figsize=(50, 30))

    for row in range(len(selected_rows)):
        mouse_row = selected_rows.iloc[row]
        index = mouse_row.name
        output = eval(mouse_row.loc['source_extraction_output'])
        corr_path = output['meta']['corr']['main']
        cn_filter = np.load(db.get_file(corr_path))
        axes[0,row].imshow(cn_filter)
        axes[1,row].imshow(cn_filter)
        axes[2,row].imshow(cn_filter)
        output = eval(mouse_row.loc['source_extraction_output'])
        cnm_file_path = output['main']
        cnm = load_CNMF(db.get_file(cnm_file_path))
        coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
        for c in coordinates:
            v = c['coordinates']
            c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                         np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
            axes[0,row].plot(*v.T, c = 'w', linewidth=3)
        axes[0,row].set_title('Trial = ' + f'{row}')
        axes[0,row].set_ylabel('')

        output = eval(mouse_row.loc['component_evaluation_output'])
        cnm_file_path = output['main']
        cnm = load_CNMF(db.get_file(cnm_file_path))
        idx = cnm.estimates.idx_components
        coordinates = cm.utils.visualization.get_contours(cnm.estimates.A[:, idx], np.shape(cn_filter), 0.2, 'max')
        for c in coordinates:
            v = c['coordinates']
            c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                         np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
            axes[1,row].plot(*v.T, c='b', linewidth=3)

        idx_b = cnm.estimates.idx_components_bad
        coordinates_b = cm.utils.visualization.get_contours(cnm.estimates.A[:,idx_b], np.shape(cn_filter), 0.2, 'max')

        for c in coordinates_b:
            v = c['coordinates']
            c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
                         np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
            axes[2,row].plot(*v.T, c='r', linewidth=3)


    source_extraction_parameters = eval(mouse_row['source_extraction_parameters'])
    corr_lim = source_extraction_parameters['min_corr']
    pnr_lim = source_extraction_parameters['min_pnr']
    component_evaluation_parameters = eval(mouse_row['component_evaluation_parameters'])
    pcc = component_evaluation_parameters['rval_thr']
    SNR = component_evaluation_parameters['min_SNR']
    figure.suptitle('Corr = ' + f'{corr_lim}' + 'PNR = ' + f'{pnr_lim}' + 'PCC = ' + f'{pcc}' + 'SNR = ' + f'{SNR}',
                    fontsize=50)
    fig_dir = 'data/interim/component_evaluation/session_wise/meta/figures/contours/'
    fig_name = fig_dir + db.create_file_name(3, index)+'_Corr = ' + f'{corr_lim}' + '_PNR = ' + f'{pnr_lim}' + '_PCC = ' + f'{pcc}' + '_SNR = ' + f'{SNR}' +'_.png'
    figure.savefig(fig_name)

    return figure
def make_figures(index, row, force=False):
    # Create file name
    file_name = db.create_file_name(step_index, index)

    # Load meta_pkl file
    output = eval(row.loc['motion_correction_output'])
    metrics_pkl_file_path = output['meta']['metrics']
    with open(metrics_pkl_file_path, 'rb') as f:
        x = pickle.load(f)

    # Possible figures
    figure_names = np.array([
        'rig_template', 'rig_shifts', 'els_template', 'correlations',
        'corelations_orig_vs_rig', 'correlations_rig_vs_els',
        'correlations_orig_vs_els', 'orig_local_correlations',
        'rig_local_correlations', 'els_local_correlations'
    ])

    def figure_flag(i):
        # This function determines which figures can be made. If they cannot be made, either
        # an analysis step has not been performed or metrics have to be computed
        if i == 0:
            return 'rigid' in x
        elif i == 1:
            return 'rigid' in x
        elif i == 2:
            return 'non-rigid' in x
        elif i == 3:
            return [
                'original' in x and 'correlations' in x['original'],
                'rigid' in x and 'correlations' in x['rigid'], 'non-rigid' in x
                and 'correlations' in x['non-rigid']
            ]
        elif i == 4:
            return ('original' in x and 'correlations' in x['original']) and (
                'rigid' in x and 'correlations' in x['rigid'])
        elif i == 5:
            return ('non-rigid' in x and 'correlations'
                    in x['non-rigid']) and ('rigid' in x
                                            and 'correlations' in x['rigid'])
        elif i == 6:
            return ('original' in x and 'correlations' in x['original']) and (
                'non-rigid' in x and 'correlations' in x['non-rigid'])
        elif i == 7:
            return 'original' in x and 'local_correlations' in x['original']
        elif i == 8:
            return 'rigid' in x and 'local_correlations' in x['rigid']
        elif i == 9:
            return 'non-rigid' in x and 'local_correlations' in x['non-rigid']

    def make_figure(i):
        # This function specifies how each of the figures are layed out
        if i == 0:
            plt.imshow(x['rigid']['total_template'], cmap='gray')
            plt.title('Total template rigid motion correction')
        elif i == 1:
            plt.plot(x['rigid']['shifts'])
            plt.legend(['x shifts', 'y shifts'])
            plt.xlabel('frames')
            plt.ylabel('pixels')
            plt.title('Shifts rigid motion correction')
        elif i == 2:
            plt.imshow(x['non-rigid']['total_template'], cmap='gray')
            plt.title('Total template piecewise-rigid motion correction')
        elif i == 3:
            legend = []
            for idx, flag in enumerate(figure_flag(i)):
                if flag:
                    string = ['original', 'rigid', 'non-rigid'][idx]
                    legend += string
                    plt.plot(x[string]['correlations'])
                plt.xlabel('frames')
                plt.legend(legend)
                plt.title('Correlations')
        elif i == 4:
            min_cor, max_cor = min(x['original']['correlations'] +
                                   x['rigid']['correlations']), max(
                                       x['original']['correlations'] +
                                       x['rigid']['correlations'])
            plt.scatter(x['original']['correlations'],
                        x['rigid']['correlations'])
            plt.xlabel('original')
            plt.ylabel('rigid')
            plt.plot([min_cor, max_cor], [min_cor, max_cor], 'r--')
            plt.title('Original v. rigid correlations')
        elif i == 5:
            min_cor, max_cor = min(x['rigid']['correlations'] +
                                   x['non-rigid']['correlations']), max(
                                       x['rigid']['correlations'] +
                                       x['non-rigid']['correlations'])
            plt.scatter(x['rigid']['correlations'],
                        x['non-rigid']['correlations'])
            plt.xlabel('rigid')
            plt.ylabel('non-rigid')
            plt.plot([min_cor, max_cor], [min_cor, max_cor], 'r--')
            plt.title('Rigid v. piecewise-rigid correlations')
        elif i == 6:
            min_cor, max_cor = min(x['original']['correlations'] +
                                   x['non-rigid']['correlations']), max(
                                       x['original']['correlations'] +
                                       x['non-rigid']['correlations'])
            plt.scatter(x['original']['correlations'],
                        x['non-rigid']['correlations'])
            plt.xlabel('original')
            plt.ylabel('non-rigid')
            plt.plot([min_cor, max_cor], [min_cor, max_cor], 'r--')
            plt.title('Original v. piecewise-rigid correlations')
        elif i == 7:
            plt.imshow(x['original']['local_correlations'])
            plt.title('Local correlations original movie')
        elif i == 8:
            plt.imshow(x['rigid']['local_correlations'])
            plt.title('Local correlations rigid motion correction')
        elif i == 9:
            plt.imshow(x['non-rigid']['local_correlations'])
            plt.title('Local correlations piecewise-rigid motion correction')

    for i in range(0, len(figure_names)):
        if (figure_flag(i) if i != 3 else (True in figure_flag(i))):
            plt.figure()
            make_figure(i)
            plt.savefig(
                f'data/interim/motion_correction/meta/figures/{file_name}_{figure_names[i]}.png'
            )
            plt.close()

    return
for kk in range(1, len(source_extraction_v_array)):
    for ll in range(1, len(component_evaluation_v_array)):
        list1.append(number_cell[kk - 1][ll - 1])
        list2.append(1 - fraction[kk - 1][ll - 1])
        list3.append(number_cell5[kk - 1][ll - 1])
        list4.append(1 - fraction5[kk - 1][ll - 1])

product = np.arange(0, len(list1))
false_positive = np.zeros((len(list1)))
false_positive_next = np.zeros((len(list1)))
for ii in range(len(product) - 1):
    product[ii] = list3[ii] * list2[ii]
    false_positive[ii] = list4[ii]
    false_positive_next[ii] = list4[ii + 1]

file_name_number = '/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/component_evaluation/session_wise/meta/metrics/' + db.create_file_name(
    5, mouse_row.name) + '_number'
np.save(file_name_number, number_cell5)
file_name_fraction = '/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/component_evaluation/session_wise/meta/metrics/' + db.create_file_name(
    5, mouse_row.name) + '_fraction'
np.save(file_name_fraction, fraction5)

#%% Working with matching!!!!!!

one_version = 13
selected_rows = db.select(states_df,
                          'source_extraction',
                          56165,
                          cropping_v=1,
                          motion_correction_v=1,
                          source_extraction_v=one_version)
示例#22
0
def create_video(row, time_cropping, session_wise = False):

    '''
    This fuction creates a complete video with raw movie (motion corrected), source extracted cells and source extraction + background.
    :param row: pandas dataframe containing the desired processing information to create the video. It can use the session_wise or trial_wise video.
    :return:
    '''

    if session_wise:
        input_mmap_file_path = eval(row.loc['alignment_output'])['main']
    else:
        input_mmap_file_path = eval(row.loc['motion_correction_output'])['main']

    #load the mmap file
    Yr, dims, T = cm.load_memmap(input_mmap_file_path)
    logging.debug(f'{row.name} Loaded movie. dims = {dims}, T = {T}.')
    #create a caiman movie with the mmap file
    images = Yr.T.reshape((T,) + dims, order='F')
    images = cm.movie(images)

    #load source extraction result
    output = eval(row.loc['source_extraction_output'])
    cnm_file_path = output['main']
    cnm = load_CNMF(db.get_file(cnm_file_path))

    #estimate the background from the extraction
    W, b0 = cm.source_extraction.cnmf.initialization.compute_W(Yr, cnm.estimates.A.toarray(), cnm.estimates.C,
                                                               cnm.estimates.dims, 1.4 * 5, ssub=2)
    cnm.estimates.W = W
    cnm.estimates.b0 = b0
    # this part could be use with the lastest caiman version
    # movie_dir = '/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/processed/movies/'
    # file_name = db.create_file_name(5,row.name)
    # cnm.estimates.play_movie(cnm.estimates, images, movie_name= movie_dir + file_name + '.avi')

    frame_range = slice(None, None, None)
    # create a movie with the model : estimated A and C matrix
    Y_rec = cnm.estimates.A.dot(cnm.estimates.C[:, frame_range])
    Y_rec = Y_rec.reshape(dims + (-1,), order='F')
    Y_rec = Y_rec.transpose([2, 0, 1])
    # convert the variable to a caiman movie type
    Y_rec = cm.movie(Y_rec)

    ## this part of the function is a copy from a caiman version
    ssub_B = int(round(np.sqrt(np.prod(dims) / W.shape[0])))
    B = images[frame_range].reshape((-1, np.prod(dims)), order='F').T - \
        cnm.estimates.A.dot(cnm.estimates.C[:, frame_range])
    if ssub_B == 1:
        B = b0[:, None] + W.dot(B - b0[:, None])
    else:
        B = b0[:, None] + (np.repeat(np.repeat(W.dot(
            downscale(B.reshape(dims + (B.shape[-1],), order='F'),
                      (ssub_B, ssub_B, 1)).reshape((-1, B.shape[-1]), order='F') -
            downscale(b0.reshape(dims, order='F'),
                      (ssub_B, ssub_B)).reshape((-1, 1), order='F'))
            .reshape(
            ((dims[0] - 1) // ssub_B + 1, (dims[1] - 1) // ssub_B + 1, -1), order='F'),
            ssub_B, 0), ssub_B, 1)[:dims[0], :dims[1]].reshape(
            (-1, B.shape[-1]), order='F'))
    B = B.reshape(dims + (-1,), order='F').transpose([2, 0, 1])

    Y_rec_2 = Y_rec + B
    Y_res = images[frame_range] - Y_rec - B

    images_np = np.zeros((time_cropping[1]-time_cropping[0],images.shape[1],images.shape[2]))
    images_np = images[time_cropping[0]:time_cropping[1],:,:]
    images_np = images_np / np.max(images_np)
    images_np = cm.movie(images_np)

    Y_rec_np = np.zeros((time_cropping[1]-time_cropping[0],images.shape[1],images.shape[2]))
    Y_rec_np = Y_rec[time_cropping[0]:time_cropping[1],:,:]
    Y_rec_np = Y_rec_np / np.max(Y_rec_np)
    Y_rec_np = cm.movie(Y_rec_np)

    Y_res_np = np.zeros((time_cropping[1]-time_cropping[0],images.shape[1],images.shape[2]))
    Y_res_np = Y_res[time_cropping[0]:time_cropping[1],:,:]
    Y_res_np = Y_res_np / np.max(Y_res_np)
    Y_res_np = cm.movie(Y_res_np)

    B_np = np.zeros((time_cropping[1]-time_cropping[0],images.shape[1],images.shape[2]))
    B_np = B[time_cropping[0]:time_cropping[1],:,:]
    B_np = B_np / np.max(B_np)
    B_np = cm.movie(B_np)

    mov1 = cm.concatenate((images_np, Y_rec_np), axis=2)

    mov2 = cm.concatenate((B_np, Y_res_np), axis=2)

    mov = cm.concatenate((mov1, mov2), axis=1)

    figure_path = '/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/movies/'
    figure_name = db.create_file_name(5,row.name)
    #mov.save(figure_path+figure_name+'.tif')
    mov.save(figure_path+figure_name+'_'+f'{time_cropping[0]}' + '_' + f'{time_cropping[1]}'+'.tif')

    return
def run_motion_correction(row, parameters, dview):
    '''
    This is the function for motion correction. Its goal is to take in a decoded and
    cropped .tif file, perform motion correction, and save the result as a .mmap file. 
    
    This function is only runnable on the cn76 server because it requires parralel processing. 
    
    Args:
        row: pd.DataFrame object
            The row corresponding to the analysis state to be motion corrected.
        parametrs :  motion_correction_parameters
        dview: cluster
            
    Returns:
        row: pd.DataFrame object
            The row corresponding to the motion corrected analysis state.      
    '''
    step_index = 2
    row_local = row.copy()
    index = row_local.name
    # Forcing parameters
    if not parameters['pw_rigid']:
        parameters['save_movie_rig'] = True

    # Get input file
    input_tif_file_path = eval(row_local.loc['cropping_output'])['main']
    if not os.path.isfile(input_tif_file_path):
        input_tif_file_path = db.get_expected_file_path(
            'cropping', 'main/', index, 'tif')
        if not os.path.isfile(input_tif_file_path):
            logging.error(
                'Cropping file not found. Cancelling motion correction.')
            return row_local

    row_local.loc['motion_correction_parameters'] = str(parameters)
    row_local = db.set_version_analysis('motion_correction', row_local)
    index = row_local.name

    # Get output file paths
    data_dir = os.environ['DATA_DIR'] + 'data/interim/motion_correction/'
    file_name = db.create_file_name(step_index, index)
    output_meta_pkl_file_path = data_dir + f'meta/metrics/{file_name}.pkl'

    # Create a dictionary with the output
    output = {
        'meta': {
            'analysis': {
                'analyst': os.environ['ANALYST'],
                'date': datetime.datetime.today().strftime("%m-%d-%Y"),
                'time': datetime.datetime.today().strftime("%H:%M:%S")
            },
            'metrics': {
                'other': output_meta_pkl_file_path
            }
        }
    }

    # Calculate movie minimum to subtract from movie
    min_mov = np.min(cm.load(input_tif_file_path))
    # Apply the parameters to the CaImAn algorithm
    caiman_parameters = parameters.copy()
    caiman_parameters['min_mov'] = min_mov
    opts = params.CNMFParams(params_dict=caiman_parameters)

    # Rigid motion correction (in both cases)
    logging.info(f'{index} Performing rigid motion correction')
    t0 = datetime.datetime.today()

    # Create a MotionCorrect object
    mc = MotionCorrect([input_tif_file_path],
                       dview=dview,
                       **opts.get_group('motion'))
    # Perform rigid motion correction
    mc.motion_correct_rigid(save_movie=parameters['save_movie_rig'],
                            template=None)
    dt = int(
        (datetime.datetime.today() - t0).seconds / 60)  # timedelta in minutes
    logging.info(f'{index} Rigid motion correction finished. dt = {dt} min')
    # Obtain template, rigid shifts and border pixels
    total_template_rig = mc.total_template_rig
    shifts_rig = mc.shifts_rig
    # Save template, rigid shifts and border pixels in a dictionary
    meta_pkl_dict = {
        'rigid': {
            'template': total_template_rig,
            'shifts': shifts_rig,
        }
    }
    output['meta']['duration'] = {'rigid': dt}
    output['meta']['cropping_points'] = [0, 0, 0, 0]

    if parameters['save_movie_rig']:
        # Load the movie saved by CaImAn, which is in the wrong
        # directory and is not yet cropped
        logging.info(f'{index} Loading rigid movie for cropping')
        m_rig = cm.load(mc.fname_tot_rig[0])
        logging.info(f'{index} Loaded rigid movie for cropping')
        # Get the cropping points determined by the maximal rigid shifts
        x_, _x, y_, _y = get_crop_from_rigid_shifts(shifts_rig)
        # Crop the movie
        logging.info(
            f'{index} Cropping and saving rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}'
        )
        m_rig = m_rig.crop(x_, _x, y_, _y, 0, 0)
        meta_pkl_dict['rigid']['cropping_points'] = [x_, _x, y_, _y]
        output['meta']['cropping_points'] = [x_, _x, y_, _y]
        # Save the movie
        rig_role = 'alternate' if parameters['pw_rigid'] else 'main'
        fname_tot_rig = m_rig.save(data_dir + rig_role + '/' + file_name +
                                   '_rig' + '.mmap',
                                   order='C')
        logging.info(
            f'{index} Cropped and saved rigid movie as {fname_tot_rig}')
        # Store the total path in output
        output[rig_role] = fname_tot_rig
        # Remove the remaining non-cropped movie
        os.remove(mc.fname_tot_rig[0])

    # If specified in the parameters, apply piecewise-rigid motion correction
    if parameters['pw_rigid']:
        logging.info(f'{index} Performing piecewise-rigid motion correction')
        t0 = datetime.datetime.today()
        # Perform non-rigid (piecewise rigid) motion correction. Use the rigid result as a template.
        mc.motion_correct_pwrigid(save_movie=True, template=total_template_rig)
        # Obtain template and filename
        total_template_els = mc.total_template_els
        fname_tot_els = mc.fname_tot_els[0]

        dt = int((datetime.datetime.today() - t0).seconds /
                 60)  # timedelta in minutes
        meta_pkl_dict['pw_rigid'] = {
            'template': total_template_els,
            'x_shifts': mc.x_shifts_els,
            'y_shifts': mc.
            y_shifts_els  # removed them initially because they take up space probably
        }
        output['meta']['duration']['pw_rigid'] = dt
        logging.info(
            f'{index} Piecewise-rigid motion correction finished. dt = {dt} min'
        )

        # Load the movie saved by CaImAn, which is in the wrong
        # directory and is not yet cropped
        logging.info(f'{index} Loading pw-rigid movie for cropping')
        m_els = cm.load(fname_tot_els)
        logging.info(f'{index} Loaded pw-rigid movie for cropping')
        # Get the cropping points determined by the maximal rigid shifts
        x_, _x, y_, _y = get_crop_from_pw_rigid_shifts(
            np.array(mc.x_shifts_els), np.array(mc.y_shifts_els))
        # Crop the movie
        logging.info(
            f'{index} Cropping and saving pw-rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}'
        )
        m_els = m_els.crop(x_, _x, y_, _y, 0, 0)
        meta_pkl_dict['pw_rigid']['cropping_points'] = [x_, _x, y_, _y]
        output['meta']['cropping_points'] = [x_, _x, y_, _y]
        # Save the movie
        fname_tot_els = m_els.save(data_dir + 'main/' + file_name + '_els' +
                                   '.mmap',
                                   order='C')
        logging.info(
            f'{index} Cropped and saved rigid movie as {fname_tot_els}')

        # Remove the remaining non-cropped movie
        os.remove(mc.fname_tot_els[0])

        # Store the total path in output
        output['main'] = fname_tot_els
        output['meta']['cropping_points'] = [x_, _x, y_, _y]

    # Write meta results dictionary to the pkl file
    pkl_file = open(output_meta_pkl_file_path, 'wb')
    pickle.dump(meta_pkl_dict, pkl_file)
    pkl_file.close()

    # Write necessary variables to the trial index and row
    row_local.loc['motion_correction_output'] = str(output)
    row_local.loc['motion_correction_parameters'] = str(parameters)

    return row_local
示例#24
0
def run_equalizer(selected_rows, states_df, parameters, session_wise=False):
    '''

    This function is meant to help with differences in contrast in different trials and session, to equalize general
    brightness or reduce photobleaching. It corrects the video and saves them in the corrected version. It can be run
    with the already aligned videos or trial by trial. for trial by trial, a template is required.

    params: selected_rows: pd.DataFrame ->  A dataframe containing the analysis states you want to have equalized
    params: states_df: pd.DataFrame -> A dataframe containing all the analysis data base
    params: parameters: dict -> contains parameters concerning equalization

    returns : None
    '''

    step_index = 4
    # Sort the dataframe correctly
    df = selected_rows.sort_values(by=paths.multi_index_structure)
    # Determine the output path
    output_tif_file_path = os.environ[
        'DATA_DIR'] + f'data/interim/equalizer/main/'
    mouse, session, init_trial, *r = df.iloc[0].name

    #histogram_name = f'mouse_{mouse}_session_{session}_init_trial_{init_trial}'
    #output_steps_file_path = f'data/interim/equalizer/meta/figures/histograms/'+histogram_name

    try:
        df.reset_index()[['session', 'trial', 'is_rest'
                          ]].set_index(['session', 'trial', 'is_rest'],
                                       verify_integrity=True)
    except ValueError:
        logging.error(
            'You passed multiple of the same trial in the dataframe df')
        return df

    #creates an output dictionary for the data base
    output = {
        'main': {},
        'meta': {
            'analysis': {
                'analyst': os.environ['ANALYST'],
                'date': datetime.datetime.today().strftime("%m-%d-%Y"),
                'time': datetime.datetime.today().strftime("%H:%M:%S")
            },
            'duration': {}
        }
    }

    if session_wise:
        row_local = df.iloc[0]
        input_tif_file_list = eval(row_local['alignment_output'])['main']
        movie_original = cm.load(
            input_tif_file_list)  # load video as 3d array already concatenated
        if parameters['make_template_from_trial'] == 0:
            movie_equalized = do_equalization(movie_original)
        else:
            movie_equalized = np.empty_like(movie_original)
            source = movie_original[0:100, :, :]
            # equalize all the videos loads in m_list_reshape with the histogram of source
            for j in range(int(movie_original.shape[0] / 100)):
                want_to_equalize = movie_original[j * 100:(j + 1) * 100, :, :]
                movie_equalized[j * 100:(j + 1) *
                                100, :, :] = do_equalization_from_template(
                                    reference=want_to_equalize, source=source)
        #Save the movie
        index = row_local.name
        new_index = db.replace_at_index1(index, 4 + 4,
                                         2)  ## version 2 is for session wise
        row_local.name = new_index
        equalized_path = movie_equalized.save(
            output_tif_file_path + db.create_file_name(4, row_local.name) +
            '.mmap',
            order='C')
        output['main'] = equalized_path
        #auxiliar = eval(row_local.loc['alignment_output'])
        #auxiliar.update({'equalizing_output' : output})
        # row_local.loc['alignment_output'] = str(auxiliar)
        row_local.loc['equalization_output'] = output
        states_df = db.append_to_or_merge_with_states_df(states_df, row_local)

    else:
        # Get necessary parameters and create a list with the paths to the relevant files
        decoding_output_list = []
        input_tif_file_list = []
        trial_index_list = []
        for idx, row in df.iterrows():
            decoding_output = eval(row.loc['decoding_output'])
            decoding_output_list.append(decoding_output)
            input_tif_file_list.append(decoding_output['main'])
            trial_index_list.append(db.get_trial_name(idx[2], idx[3]))

        # this was something for ploting while testing, can be removed
        #colors = []
        #for i in range(len(df)):
        #    colors.append('#%06X' % randint(0, 0xFFFFFF))

        #load the videos as np.array to be able to manipulate them
        m_list = []
        legend = []
        shape_list = []
        h_step = parameters['histogram_step']
        for i in range(len(input_tif_file_list)):
            im = io.imread(input_tif_file_list[i])  #load video as 3d array
            m_list.append(im)  # and adds all the videos to a list
            shape_list.append(
                im.shape
            )  # list of sizes to cut the videos in time for making all of them having the same length
            #legend.append('trial = ' + f'{df.iloc[i].name[2]}')

        min_shape = min(shape_list)
        new_shape = (100 * int(min_shape[0] / 100), min_shape[1], min_shape[2]
                     )  # new videos shape
        m_list_reshape = []
        m_list_equalized = []
        source = m_list[0][0:100, :, :]
        #equalize all the videos loaded in m_list_reshape with the histogram of source

        for i in range(len(input_tif_file_list)):
            video = m_list[i]
            if parameters['make_template_from_trial'] == 0:
                equalized_video = do_equalization(video)
            else:
                m_list_reshape.append(video[:new_shape[0], :, :])
                equalized_video = np.empty_like(video[:new_shape[0], :, :])
                for j in range(int(min_shape[0] / 100)):
                    want_to_equalize = m_list_reshape[i][j * 100:(j + 1) *
                                                         100, :, :]
                    equalized_video[j * 100:(j + 1) *
                                    100, :, :] = do_equalization_from_template(
                                        reference=want_to_equalize,
                                        source=source)
            m_list_equalized.append(equalized_video)

        #convert the 3d np.array to a caiman movie and save it as a tif file, so it can be read by motion correction script.
        for i in range(len(input_tif_file_list)):
            # Save the movie
            row_local = df.iloc[i]
            movie_original = cm.movie(m_list_reshape[i])
            movie_equalized = cm.movie(m_list_equalized[i])
            # Write necessary variables to the trial index and row_local
            index = row_local.name
            new_index = db.replace_at_index1(index, 4 + 0,
                                             1)  ## version 1 is for trial wise
            row_local.name = new_index
            output['main'] = output_tif_file_path + db.create_file_name(
                4, row_local.name) + '.tif'
            #auxiliar = eval(row_local.loc['decoding_output'])
            #auxiliar.update({'equalizing_output' : output})
            #row_local.loc['decoding_output'] = str(auxiliar)
            row_local.loc['equalization_output'] = output
            movie_equalized.save(output_tif_file_path +
                                 db.create_file_name(4, row_local.name) +
                                 '.tif')
            states_df = db.append_to_or_merge_with_states_df(
                states_df, row_local)

    db.save_analysis_states_database(states_df,
                                     paths.analysis_states_database_path,
                                     paths.backup_path)

    return
示例#25
0
def run_equalizer(states_df, parameters):
    '''

    This function is meant to help with differences in different trials and session, to equalize general brightness
    or reduce photobleaching. It corrects the video and saves them in the corrected version.

    params df: pd.DataFrame -> A dataframe containing the analysis states you want to have aligned.
    params parameters: dict -> contains parameters concerning equalization

    returns df: pd.DataFrame -> A dataframe containing the aligned analysis states.
    '''

    # Sort the dataframe correctly
    df = states_df.sort_values(by=paths.multi_index_structure)
    # Determine the output path
    output_mmap_file_path = f'data/interim/equalizer/main/'
    mouse, session, init_trial, *r = df.iloc[0].name

    histogram_name = f'mouse_{mouse}_session_{session}_init_trial_{init_trial}'
    output_steps_file_path = f'data/interim/equalizer/meta/figures/histograms/' + histogram_name

    try:
        df.reset_index()[['trial', 'is_rest']].set_index(['trial', 'is_rest'],
                                                         verify_integrity=True)
    except ValueError:
        logging.error(
            'You passed multiple of the same trial in the dataframe df')
        return df

    output = {
        'meta': {
            'analysis': {
                'analyst': os.environ['ANALYST'],
                'date': datetime.datetime.today().strftime("%m-%d-%Y"),
                'time': datetime.datetime.today().strftime("%H:%M:%S")
            },
            'duration': {}
        }
    }

    # Get necessary parameters
    decoding_output_list = []
    input_tif_file_list = []
    trial_index_list = []
    for idx, row in df.iterrows():
        decoding_output = eval(row.loc['decoding_output'])
        decoding_output_list.append(decoding_output)
        input_tif_file_list.append(decoding_output['main'])
        trial_index_list.append(db.get_trial_name(idx[2], idx[3]))

    colors = []
    for i in range(len(df)):
        colors.append('#%06X' % randint(0, 0xFFFFFF))

    m_list = []
    m_reshape_list = []
    legend = []
    min_list = []
    max_list = []
    h_step = parameters['histogram_step']
    fig, ax = plt.subplots(1, 2)

    for i in range(len(input_tif_file_list)):
        im = io.imread(input_tif_file_list[i])
        #m_list.append(im)
        reshape_m = np.reshape(im, (im.shape[0] * im.shape[1] * im.shape[2]))
        m_reshape_list.append(reshape_m)
        legend.append('trial = ' + f'{df.iloc[i].name[2]}')

    total_min = 200
    total_max = 1500
    cdf_list = []
    for i in range(len(input_tif_file_list)):
        hist, bins = np.histogram(
            m_reshape_list[i],
            bins=np.linspace(total_min, total_max,
                             (total_max - total_min) / h_step),
            density=True)
        cdf = np.cumsum(hist) * h_step
        cdf_list.append(cdf)
        ax[0].plot(bins[0:-1], hist, color=colors[i])
        ax[1].plot(bins[0:-1], cdf, color=colors[i])

    ax[0].set_xlabel('Pixel Intensity')
    ax[1].set_xlabel('Pixel Intensity')
    ax[0].set_ylabel('Density')
    ax[1].set_ylabel('CDF')
    ax[0].legend((legend))
    fig.savefig(output_steps_file_path + '.png')

    # Concatenate them using the concat function
    m_concat = cm.concatenate(m_list, axis=0)
    data_dir = 'data/interim/alignment/main'
    file_name = db.create_file_name(step_index, index)
    fname = m_concat.save(data_dir + '/' + file_name + '_pw_rig' + '.mmap',
                          order='C')

    cdf_m = np.ma.masked_equal(cdf, 0)
    cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())
    cdf = np.ma.filled(cdf_m, 0).astype('uint8')

    #meta_pkl_dict['pw_rigid']['cropping_points'] = [x_, _x, y_, _y]
    #output['meta']['cropping_points'] = [x_, _x, y_, _y]
    # Save the movie
    #fname_tot_els  = m_els.save(data_dir + 'main/' + file_name + '_els' + '.mmap',  order='C')
    #logging.info(f'{index} Cropped and saved rigid movie as {fname_tot_els}')

    # MOTION CORRECTING EACH INDIVIDUAL MOVIE WITH RESPECT TO A TEMPLATE MADE OF THE FIRST MOVIE
    logging.info(
        f'{alignment_index} Performing motion correction on all movies with respect to a template made of \
    the first movie.')
    t0 = datetime.datetime.today()

    # Create a template of the first movie
    template_index = trial_index_list.index(
        parameters['make_template_from_trial'])
    m0 = cm.load(input_mmap_file_list[template_index])
    [x1, x2, y1, y2] = motion_correction_output_list[template_index]['meta'][
        'cropping_points']
    m0 = m0.crop(new_x1 - x1, new_x2 - x2, new_y1 - y1, new_y2 - y2, 0, 0)
    m0_filt = cm.movie(
        np.array([
            high_pass_filter_space(m_, parameters['gSig_filt']) for m_ in m0
        ]))
    template0 = cm.motion_correction.bin_median(
        m0_filt.motion_correct(
            5, 5, template=None)[0])  # may be improved in the future

    # Setting the parameters
    opts = params.CNMFParams(params_dict=parameters)

    # Create a motion correction object
    mc = MotionCorrect(fname, dview=dview, **opts.get_group('motion'))

    # Perform non-rigid motion correction
    mc.motion_correct(template=template0, save_movie=True)

    # Cropping borders
    x_ = math.ceil(
        abs(np.array(mc.shifts_rig)[:, 1].max()
            ) if np.array(mc.shifts_rig)[:, 1].max() > 0 else 0)
    _x = math.ceil(
        abs(np.array(mc.shifts_rig)[:, 1].min()
            ) if np.array(mc.shifts_rig)[:, 1].min() < 0 else 0)
    y_ = math.ceil(
        abs(np.array(mc.shifts_rig)[:, 0].max()
            ) if np.array(mc.shifts_rig)[:, 0].max() > 0 else 0)
    _y = math.ceil(
        abs(np.array(mc.shifts_rig)[:, 0].min()
            ) if np.array(mc.shifts_rig)[:, 0].min() < 0 else 0)

    dt = int(
        (datetime.datetime.today() - t0).seconds / 60)  # timedelta in minutes
    output['meta']['duration']['motion_correction'] = dt
    logging.info(
        f'{alignment_index} Performed motion correction. dt = {dt} min.')

    # Create a timeline and store it
    timeline = [[trial_index_list[0], 0]]
    for i in range(1, len(m_list)):
        m = m_list[i]
        timeline.append([trial_index_list[i], timeline[i - 1][1] + m.shape[0]])
    #    timeline_pkl_file_path = f'data/interim/alignment/meta/timeline/{file_name}.pkl'
    #    with open(timeline_pkl_file_path,'wb') as f:
    #        pickle.dump(timeline,f)
    #    output['meta']['timeline'] = timeline_pkl_file_path
    output['meta']['timeline'] = timeline

    # Save the concatenated movie
    output_mmap_file_path_tot = m_concat.save(output_mmap_file_path, order='C')
    output['main'] = output_mmap_file_path_tot

    #    # Delete the motion corrected movies
    #    for fname in mc.fname_tot_rig:
    #        os.remove(fname)

    dt = int(
        (datetime.datetime.today() - t0).seconds / 60)  # timedelta in minutes
    output['meta']['duration']['concatenation'] = dt
    logging.info(f'{alignment_index} Performed concatenation. dt = {dt} min.')

    for idx, row in df.iterrows():
        df.loc[idx, 'alignment_output'] = str(output)
        df.loc[idx, 'alignment_parameters'] = str(parameters)

    return df