def main():
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading cell info...')
    cell_info, id_adjustor = rc.loadCellInfo(csv_dir)
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading stim info...')
    stim_info = loadmat(os.path.join(mat_dir, 'experiment2stimInfo.mat'))
    trials_info = rc.getStimTimesIds(stim_info)
    signal_corr_frame = pd.DataFrame(columns=[
        'region', 'first_cell_id', 'second_cell_id', 'signal_corr_coef',
        'p_values', 'bin_width'
    ])
    for region in rc.regions:
        print(dt.datetime.now().isoformat() + ' INFO: ' +
              'Making signal correlation frame for ' + region + '...')
        cell_ids = cell_info[(cell_info.region == region)
                             & (cell_info.group == args.group)].index.values
        spike_time_dict = rc.loadSpikeTimes(posterior_dir, frontal_dir,
                                            cell_ids, id_adjustor)
        responding_pairs = rc.getRespondingPairs(cell_ids,
                                                 trials_info,
                                                 spike_time_dict,
                                                 cell_info,
                                                 30,
                                                 is_strong=True)
        for bin_width in rc.bin_widths:
            signal_corr_frame = signal_corr_frame.append(
                getBinWidthSignalCorrFrame(responding_pairs, trials_info,
                                           spike_time_dict, cell_info,
                                           bin_width, region))
    csv_filename = os.path.join(csv_dir, 'signal_correlations_strong.csv')
    signal_corr_frame.to_csv(csv_filename, index=False)
    print(dt.datetime.now().isoformat() + ' INFO: ' + csv_filename + ' saved.')
def getAllWidthFrameForRegionStim(cell_info, stim_info, id_adjustor, region,
                                  stim_id, group, wanted_num_pairs, is_weak,
                                  bin_widths, threshold):
    print(dt.datetime.now().isoformat() +
          ' INFO: Getting correlations for all widths for region = ' + region +
          ', stim ID = ' + str(stim_id) + '...')
    cell_ids = cell_info[(cell_info.region == region)
                         & (cell_info.group == group)].index.values
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading trial info...')
    trials_info = rc.getStimTimesIds(stim_info, stim_id)
    spike_time_dict = rc.loadSpikeTimes(posterior_dir, frontal_dir, cell_ids,
                                        id_adjustor)
    print(dt.datetime.now().isoformat() + ' INFO: ' +
          'Getting pairs of responding cells...')
    responding_pairs = rc.getRespondingPairs(cell_ids,
                                             trials_info,
                                             spike_time_dict,
                                             cell_info,
                                             wanted_num_pairs,
                                             is_weak,
                                             strong_threshold=threshold)
    num_pairs = responding_pairs.shape[0]
    responding_cells = np.unique(responding_pairs)
    spike_time_dict = {k: spike_time_dict[k] for k in responding_cells}
    return pd.concat([
        getCorrFrameForWidth(bin_width, responding_pairs, trials_info,
                             spike_time_dict, cell_info, stim_id, region)
        for bin_width in bin_widths
    ],
                     ignore_index=True)
예제 #3
0
def main():
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading cell info...')
    cell_info, id_adjustor = rc.loadCellInfo(csv_dir)
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading stim info...')
    stim_info = loadmat(os.path.join(mat_dir, 'experiment2stimInfo.mat'))
    cell_ids = cell_info[cell_info.group==args.group].index.values
    trials_info = rc.getStimTimesIds(stim_info)
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Getting responding cells...')
    spike_time_dict = rc.loadSpikeTimes(posterior_dir, frontal_dir, cell_ids, id_adjustor)
    responding_cells = rc.getRespondingCells(cell_ids, trials_info, spike_time_dict, cell_info, num_cells=0, is_strong=args.is_strong, strong_threshold=args.threshold)
    num_responding_cells = responding_cells.size
    spike_time_dict = {k: spike_time_dict[k] for k in responding_cells}
    all_rates = pd.concat([getFiringRatesForWidth(responding_cells, trials_info, spike_time_dict, cell_info, bin_width) for bin_width in rc.bin_widths], ignore_index=True)
    all_rates.to_csv(os.path.join(csv_dir, args.filename))
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Done.')
def main():
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading cell info...')
    cell_info, id_adjustor = rc.loadCellInfo(csv_dir)
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading stim info...')
    stim_info = loadmat(os.path.join(mat_dir, 'experiment2stimInfo.mat'))
    trials_info = rc.getStimTimesIds(stim_info, args.stim_id)
    num_trials = trials_info.shape[0]
    delays = np.arange(-1000, 1001)
    for region in ['motor_cortex', 'striatum', 'hippocampus', 'thalamus', 'v1']:
        print(dt.datetime.now().isoformat() + ' INFO: ' + 'Starting region '+ region + '...')
        cell_ids = cell_info[(cell_info.region==region)&(cell_info.group==args.group)].index.values
        spike_time_dict = rc.loadSpikeTimes(posterior_dir, frontal_dir, cell_ids, id_adjustor)
        responding_pairs = rc.getRespondingPairs(cell_ids, trials_info, spike_time_dict, cell_info, 5, is_strong=True)
        responding_cells = np.unique(responding_pairs)
        exp_frame = rc.getExperimentFrame(responding_cells, trials_info, spike_time_dict, cell_info, 0.001)
        for pair in responding_pairs:
            print(dt.datetime.now().isoformat() + ' INFO: ' + 'Processing ' + str(pair) + '...')
            correlations, corr_std_errors = getCrossCorrelogramByPair(pair, exp_frame, num_trials, delays)
            filepath = plotWithStdErrors(correlations, corr_std_errors, delays, "Correlation Coefficient (a.u.)", pair, region, args.stim_id)
            print(dt.datetime.now().isoformat() + ' INFO: ' + 'Saved:' + filepath)
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Done.')
예제 #5
0
def main():
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading cell info...')
    cell_info, id_adjustor = rc.loadCellInfo(csv_dir)
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Selecting cells...')
    cell_ids = rc.getRandomSelection(cell_info, args.number_of_cells,
                                     args.group, args.probe, args.region)
    spike_time_dict = rc.loadSpikeTimes(posterior_dir, frontal_dir, cell_ids,
                                        id_adjustor)
    print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading trial info...')
    trials_info = rc.getStimTimesIds(stim_info, args.stim_id)
    print(dt.datetime.now().isoformat() + ' INFO: ' +
          'Creating experiment frame...')
    exp_frame = rc.getExperimentFrame(cell_ids, trials_info, spike_time_dict,
                                      cell_info, args.bin_length)
    print(dt.datetime.now().isoformat() + ' INFO: ' +
          'Measuring correlations...')
    correlation_dict = {}
    for stim_id in np.unique(stim_info['stimIDs'][0]).astype(int):
        correlation_dict[stim_id] = getCorrelationMatrixForStim(
            exp_frame, stim_id)
    correlation_dict[-1] = getCorrelationMatrixForStim(exp_frame,
                                                       -1)  # all stims at once
    if args.plot_correlation_matrix:  # show example correlation matrix for all stimuli
        print(dt.datetime.now().isoformat() + ' INFO: ' +
              'Plotting correlations...')
        plotCorrMatrixForStim(correlation_dict[-1][0], cell_info,
                              exp_frame['cell_id'].unique())
        if args.correlation_figure_filename == '':
            plt.show(block=False)
            showCellInfoTable(
                cell_info,
                exp_frame['cell_id'].unique())  # show cell info in table
            plt.show(block=False)
        else:
            plt.savefig(
                os.path.join(image_dir, 'pairwise_correlation_matrices',
                             args.correlation_figure_filename))
def getDataFrames(single_file, paired_file, bin_width):
    firing_frame = pd.read_csv(os.path.join(csv_dir, args.single_file), usecols=lambda x: x != 'Unnamed: 0')
    pairwise_frame = pd.read_csv(os.path.join(csv_dir, args.paired_file), usecols=lambda x: x != 'Unnamed: 0')
    firing_frame = firing_frame[firing_frame.bin_width == bin_width]
    pairwise_frame = pairwise_frame[pairwise_frame.bin_width == bin_width]
    return firing_frame, pairwise_frame

pair = np.array(args.pair)
cell_info, id_adjustor = rc.loadCellInfo(csv_dir)
stim_info = loadmat(os.path.join(mat_dir, 'experiment2stimInfo.mat'))
firing_frame, pairwise_frame = getDataFrames(args.single_file, args.paired_file, args.bin_width)
region = firing_frame.loc[firing_frame.cell_id == pair[0]].region.unique()[0]
best_stim = rc.getBestStimFromRegion(pairwise_frame, region)
pairwise_region_frame = pairwise_frame[(pairwise_frame.region == region) & (pairwise_frame.stim_id == best_stim)]
stim_firing_frame = firing_frame.loc[firing_frame.stim_id == best_stim,:]
trials_info = rc.getStimTimesIds(stim_info, best_stim)
spike_time_dict = rc.loadSpikeTimes(posterior_dir, frontal_dir, pair, id_adjustor)
exp_frame = rc.getExperimentFrame(pair, trials_info, spike_time_dict, cell_info, 1.0)

corr = pairwise_region_frame.loc[(pairwise_region_frame.first_cell_id == pair[0]) & (pairwise_region_frame.second_cell_id == pair[1]), 'corr_coef'].iloc[0]
info = pairwise_region_frame.loc[(pairwise_region_frame.first_cell_id == pair[0]) & (pairwise_region_frame.second_cell_id == pair[1]), 'mutual_info_qe'].iloc[0]
first_firing_rate = stim_firing_frame.loc[firing_frame.cell_id == pair[0], 'firing_rate_mean'].iloc[0]
second_firing_rate = stim_firing_frame.loc[firing_frame.cell_id == pair[1], 'firing_rate_mean'].iloc[0]
geom_mean_firing_rate = np.sqrt(first_firing_rate * second_firing_rate)
first_responses = exp_frame.loc[exp_frame.cell_id == pair[0], 'num_spikes']
second_responses = exp_frame.loc[exp_frame.cell_id == pair[1], 'num_spikes']

plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.scatter(first_responses, second_responses)
plt.xlabel('Spike counts, cell ' + str(pair[0]), fontsize='large')
예제 #7
0
                      cax=cax,
                      cmap=cmap,
                      norm=norm,
                      boundaries=bounds,
                      ticks=np.array(bounds)[1:] - 0.5)
    cb.set_ticklabels(
        [c.replace('_', ' ').capitalize() for c in [''] + colours])
    plt.tight_layout()


print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading cell info...')
cell_info, id_adjustor = rc.loadCellInfo(csv_dir)
print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading stim info...')
stim_info = loadmat(os.path.join(mat_dir, 'experiment2stimInfo.mat'))
print(dt.datetime.now().isoformat() + ' INFO: ' + 'Loading trials info...')
trials_info = rc.getStimTimesIds(stim_info, args.stim_id)
if args.numpy_file_prefix != '':
    print(dt.datetime.now().isoformat() + ' INFO: ' +
          'Loading data from disc...')
    region_sorted_cell_ids = np.load(os.path.join(
        npy_dir, args.numpy_file_prefix + '_responding_sorted_cell_ids.npy'),
                                     allow_pickle=True)
    pairs = np.load(os.path.join(
        npy_dir, args.numpy_file_prefix + '_responding_pairs.npy'),
                    allow_pickle=True)
    pairwise_measurements = pd.read_pickle(
        os.path.join(npy_dir,
                     args.numpy_file_prefix + '_pairwise_measurements.pkl'))
    info_matrix = np.load(os.path.join(
        npy_dir, args.numpy_file_prefix + '_info_matrix.npy'),
                          allow_pickle=True)