コード例 #1
0
ファイル: neural.py プロジェクト: qihongl/learn-hippo
def compute_cell_memory_similarity_stats(sim_dict, cond_ids):
    # re-organize as hierachical dict
    sim_stats = {cn: {'targ': {}, 'lure': {}} for cn in cond_ids.keys()}
    # for DM, RM conditions...
    for cond in ['DM', 'RM']:
        # compute stats for target & lure activation
        for m_type in sim_stats[cond].keys():
            s_ = compute_stats(np.mean(sim_dict[cond][m_type], axis=-1))
            sim_stats[cond][m_type]['mu'], sim_stats[cond][m_type]['er'] = s_
    # for NM trials, only compute lure activations, since there is no target
    s_ = compute_stats(np.mean(sim_dict['NM']['lure'], axis=-1))
    sim_stats['NM']['lure']['mu'], sim_stats['NM']['lure']['er'] = s_
    return sim_stats
コード例 #2
0
ファイル: preprocessing.py プロジェクト: qihongl/learn-hippo
def sep_by_qsource(matrix_p2, q_source_info, n_se=3):
    """separate values by query source and then compute statistics

    Parameters
    ----------
    matrix_p2 : type
        Description of parameter `matrix_p2`.
    q_source_info : type
        Description of parameter `q_source_info`.
    n_se : type
        Description of parameter `n_se`.

    Returns
    -------
    type
        Description of returned object.

    """
    stats = {}
    # loop over sources
    for q_source_name, q_source_id in q_source_info.items():
        T_ = np.shape(q_source_id)[1]
        mu_, er_ = np.zeros(T_, ), np.zeros(T_, )
        # loop over time
        for t in range(T_):
            # compute stats
            mu_[t], er_[t] = compute_stats(matrix_p2[q_source_id[:, t], t],
                                           n_se=n_se)
        # collect stats for this source
        stats[q_source_name] = [mu_, er_]
    return stats
コード例 #3
0
def plot_time_course_for_all_conds(
    matrix,
    cond_ids,
    ax,
    n_se=2,
    axis1_start=0,
    xlabel=None,
    ylabel=None,
    title=None,
    frameon=False,
    add_legend=True,
):
    for i, cond_name in enumerate(TZ_COND_DICT.values()):
        submatrix_ = matrix[cond_ids[cond_name], axis1_start:]
        M_, T_ = np.shape(submatrix_)
        mu_, er_ = compute_stats(submatrix_, axis=0, n_se=n_se)
        ax.errorbar(x=range(T_), y=mu_, yerr=er_, label=cond_name)
    if add_legend:
        ax.legend(frameon=frameon)
    ax.set_title(title)
    ax.set_ylabel(ylabel)
    ax.set_xlabel(xlabel)
コード例 #4
0
filenames = ['./data/soc-Epinions1/soc-Epinions1.txt']

g = analysis.load_graph(filenames[0], directed=True)
print('load graph: \t', int(divmod(time.time() - start, 60)[0]), 'min:', int(divmod(time.time() - start, 60)[1]),'s')

print('=====LSCC=====')
start_lscc = time.time()
lscc = analysis.calculate_largest_strongly_connected_comp(g)
print('calculate LSCC: \t', int(divmod(time.time() - start_lscc, 60)[0]), 'min:', int(divmod(time.time() - start, 60)[1]),'s')
print('LSCC edges: \t', lscc.num_edges())
print('LSCC nodes: \t', lscc.num_vertices())

lscc_dists = analysis.calculate_distances(lscc)
print('caluclate distances LSCC: \t', int(divmod(time.time() - start_lscc, 60)[0]), 'min:', int(divmod(time.time() - start, 60)[1]),'s')

s_median, s_mean, s_diam, s_eff_diam = analysis.compute_stats(lscc_dists)
print('median distance:\t', s_median)
print('mean distance:\t', s_mean)
print('diameter:\t', s_diam)
print('effective diameter:\t', s_eff_diam)
print('LSCC done: \t', int(divmod(time.time() - start_lscc, 60)[0]), 'min:', int(divmod(time.time() - start, 60)[1]),'s')
del lscc, lscc_dists

print('=====LWCC=====')
start_lwcc = time.time()
lwcc = analysis.calculate_largest_weakly_connected_comp(g)
print('calculate LWCC: \t', int(divmod(time.time() - start_lwcc, 60)[0]), 'min:', int(divmod(time.time() - start, 60)[1]),'s')
print('LWCC edges: \t', lwcc.num_edges())
print('LWCC nodes: \t', lwcc.num_vertices())

lwcc_dists = analysis.calculate_distances(lwcc)
コード例 #5
0
    p_rm_ob_rcl=p_rm_ob_rcl,
)
# sample
n_samples = 256
X, Y = task.sample(n_samples, to_torch=False)
# unpack
print(f'X shape: {np.shape(X)}, n_samples x T x x_dim')
print(f'Y shape: {np.shape(Y)},  n_samples x T x y_dim')
'''show uncertainty'''

dk_wm, dk_em = batch_compute_true_dk(X, task)
print(f'np.shape(dk_wm): {np.shape(dk_wm)}')
print(f'np.shape(dk_em): {np.shape(dk_em)}')

# compute stats
dk_em_mu, dk_em_er = compute_stats(dk_em)
dk_wm_mu, dk_wm_er = compute_stats(dk_wm)

# plot
f, ax = plt.subplots(1, 1, figsize=(7, 4))
ax.errorbar(x=range(len(dk_em_mu)),
            y=1 - dk_em_mu,
            yerr=dk_em_er,
            label='w/ EM')
ax.errorbar(x=np.arange(n_param, n_param * task.n_parts),
            y=1 - dk_wm_mu,
            yerr=dk_wm_er,
            label='w/o EM')
ax.axvline(n_param, color='grey', linestyle='--')
ax.set_title(f'Expected performance, delay = {pad_len} / {n_param}')
ax.set_xlabel('Time, 0 = prediction onset')
コード例 #6
0
'''plot spatial pattern isc '''
c_pal = sns.color_palette('colorblind', n_colors=8)
# sns.palplot(c_pal)
color_id_pick = [0, 4]
c_pal = [c_pal[color_id] for color_id in color_id_pick]

# compute stats
n_se = 1
mu_ = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
er_ = {rcn: {cn: [] for cn in all_conds} for rcn in all_conds}
for ref_cond in cond_ids.keys():
    for cond in cond_ids.keys():
        d_ = np.array(bs_bc_sisc[ref_cond][cond])
        if len(d_) > 0:
            mu_[ref_cond][cond], er_[ref_cond][cond] = compute_stats(np.mean(
                d_, axis=1),
                                                                     n_se=n_se)

# plot
f, ax = plt.subplots(1, 1, figsize=(7, 5))
color_id = 0
i_rc, ref_cond = 0, 'RM'
for i_c, cond in enumerate(['RM', 'DM']):
    if i_c >= i_rc:
        ax.errorbar(x=range(T_part),
                    y=mu_[ref_cond][cond][T_part:],
                    yerr=er_[ref_cond][cond][T_part:],
                    label=f'{ref_cond}-{cond}',
                    color=c_pal[color_id])
        color_id += 1
コード例 #7
0
                            def_prob=def_prob)
    X, Y, Misc = task.sample(n_samples, to_torch=False, return_misc=True)

    # compute inter-event similarity
    similarity_matrix = compute_event_similarity_matrix(Y, normalize=True)
    similarity_matrix_tril = similarity_matrix[np.tril_indices(n_samples,
                                                               k=-1)]

    one_matrix = np.ones((n_samples, n_samples))
    tril_mask = np.tril(one_matrix, k=-1).astype(bool)
    tril_k_mask = np.tril(one_matrix, k=-task.similarity_cap_lag).astype(bool)
    similarity_mask_recent = np.logical_and(tril_mask, ~tril_k_mask)

    event_sims[i] = similarity_matrix[similarity_mask_recent]
'''plot'''
mu, se = compute_stats(event_sims, axis=1)
cps = sns.color_palette(n_colors=len(mu))
f, ax = plt.subplots(1, 1, figsize=(9, 6))
for i in range(n_conditions):
    sns.kdeplot(event_sims[i], ax=ax, label=similarity_labels[i])
ax.legend()
for j, mu_j in enumerate(mu):
    ax.axvline(mu_j, color=cps[j], linestyle='--', alpha=.6)
ax.set_title('Event similarity by condition')
ax.set_xlabel('Event similarity')
sns.despine()
f.tight_layout()

# x = [0, 1]
# x.pop(0)
# x.append(2)
コード例 #8
0
        actual_n_subjs = len(def_path_int_g)

        # get the target memory activation for each subject
        tma_dmp2_g = np.array([
            ma_g[i_s]['DM']['targ']['mu'][n_param:]
            for i_s in range(actual_n_subjs)
        ])

        # split the data according to whether t is schematic ...

        # ... for target memory activation
        tma_s = ma.masked_array(tma_dmp2_g, np.logical_not(def_tps_g))
        tma_ns = ma.masked_array(tma_dmp2_g, def_tps_g)
        # compute mean
        tma_s_mu[pi, dpi], tma_s_se[pi,
                                    dpi] = compute_stats(np.mean(tma_s,
                                                                 axis=1))
        tma_ns_mu[pi,
                  dpi], tma_ns_se[pi,
                                  dpi] = compute_stats(np.mean(tma_ns, axis=1))

        # ... for the em gates
        inpt_s_l, inpt_ns_l = [[] for t in range(T)], [[] for t in range(T)]
        ms_s_l, ms_ns_l = [[] for t in range(T)], [[] for t in range(T)]

        for i_s in range(actual_n_subjs):
            n_trials = np.shape(targets_dmp2_g[i_s])[0]
            # compute the proto time point mask for subj i
            def_path_int_i_s_rep = np.tile(def_path_int_g[i_s], (n_trials, 1))
            def_tps_g_i_s_rep = np.tile(def_tps_g[i_s], (n_trials, 1))
            mask_s = def_tps_g_i_s_rep
            inpt_s_ = inpt_dmp2_g[i_s][:, def_tps_g[i_s]]
コード例 #9
0
    ma_cos_list_nonone = remove_none(ma_cos_list)
    n_actual_subjs = len(ma_cos_list_nonone)
    ma_cos_mu = {cond: np.zeros(n_actual_subjs, ) for cond in all_cond}
    ma_cos = {cond: np.zeros((n_actual_subjs, T)) for cond in all_cond}
    for i_s in range(n_actual_subjs):
        for c_i, c_name in enumerate(all_cond):
            # average over time
            ma_cos[c_name][i_s] = ma_cos_list_nonone[i_s][c_name]['lure'][
                'mu'][T:]
            ma_cos_mu[c_name][i_s] = np.mean(
                ma_cos_list_nonone[i_s][c_name]['lure']['mu'][T:])

    # compute stats across subjects
    for c_i, c_name in enumerate(all_cond):
        ma_cos_mumu[p_i][c_i], ma_cos_muse[p_i][c_i] = compute_stats(
            ma_cos_mu[c_name])
        ma_cos_tmu[p_i][c_i], ma_cos_tse[p_i][c_i] = compute_stats(
            ma_cos[c_name])

    # compute memory similarity
    memory_sim_mu[p_i], memory_sim_se[p_i] = compute_stats(
        remove_none(memory_sim_g))
'''plot the data'''

cpal = sns.color_palette("Blues", n_colors=len(all_cond))
f, ax = plt.subplots(1, 1, figsize=(6, 5))
for p_i, penalty_test in enumerate(penalty_test_list):
    ax.errorbar(x=range(len(all_cond)),
                y=ma_cos_mumu[p_i],
                yerr=ma_cos_muse[p_i],
                color=cpal[p_i])
コード例 #10
0
                for lca_pid, lca_pname in lca_pnames.items():
                    del lca_param[ptest][lca_pid][cond]['mu'][i_ms]
                    del lca_param[ptest][lca_pid][cond]['er'][i_ms]
            del ma_lca[ptest][i_ms]
    '''compute average meory activation'''
    ma_dmp2, sum_ma_dmp2 = defaultdict(), defaultdict()
    ma_dmp2_mu, ma_dmp2_se = defaultdict(), defaultdict()

    for p_test in penaltys_test:
        ma_dmp2[p_test] = np.array([
            ma_lca[p_test][s]['DM']['targ']['mu'][n_param:]
            for s in range(n_subjs)
        ])
        sum_ma_dmp2[p_test] = np.sum(ma_dmp2[p_test], axis=1)
        ma_dmp2_mu[p_test], ma_dmp2_se[p_test] = compute_stats(
            # ma_dmp2[p_test]
            np.mean(ma_dmp2[p_test], axis=1))

    ax.errorbar(x=range(len(penaltys_test)),
                y=list(ma_dmp2_mu.values()),
                yerr=list(ma_dmp2_se.values()),
                label=exp_name,
                color=cpal[ei])

    ax.set_xticks(range(len(penaltys_test)))
    ax.set_xticklabels(penaltys_test)
    ax.set_xlabel('Penalty, test')
    ax.set_ylabel('Average memory activation')
    ax.legend()
    sns.despine()
    f.tight_layout()
コード例 #11
0
# load data
data_dict = pickle_load_dict(fpath)
Yhat_all = np.array(data_dict['Yhat_all'])
Yob_all = np.array(data_dict['Yob_all'])
o_keys_p1_all = data_dict['o_keys_p1_all']
o_keys_p2_all = data_dict['o_keys_p2_all']

'''visualize the results'''
# print(f'overall acc: {np.mean(Y_match)}')
Y_match = Yhat_all == Yob_all
Y_pred_match = np.logical_and(np.logical_and(
    Yhat_all != -1, Yob_all != -1), Y_match)
np.shape(Yhat_all)

# compute the decoding accuracy OVER TIME, averaged across subjects
match_ovt_mu, match_ovt_se = compute_stats(
    np.mean(np.mean(Y_match, axis=1), axis=0), axis=1)
pmatch_ovt_mu, pmatch_ovt_se = compute_stats(
    np.mean(np.mean(Y_pred_match, axis=1), axis=0), axis=1)

f, axes = plt.subplots(1, 2, figsize=(12, 5))
axes[0].errorbar(
    x=range(n_param), y=pmatch_ovt_mu[:n_param], xerr=pmatch_ovt_se[:n_param])
axes[0].errorbar(
    x=range(n_param), y=match_ovt_mu[:n_param], xerr=match_ovt_se[:n_param])
axes[1].errorbar(
    x=range(n_param), y=pmatch_ovt_mu[n_param:], xerr=pmatch_ovt_se[n_param:])
axes[1].errorbar(
    x=range(n_param), y=match_ovt_mu[n_param:], xerr=match_ovt_se[n_param:])

axes[0].set_xlabel('Part 1')
axes[1].set_xlabel('Part 2')
コード例 #12
0
sns.heatmap(similarity_matrix,
            xticklabels=n_samples // 2,
            yticklabels=n_samples // 2,
            cmap='viridis',
            ax=ax)
ax.set_xlabel('event i')
ax.set_ylabel('event j')
ax.set_title('inter-event similarity')

one_matrix = np.ones((n_samples, n_samples))
tril_mask = np.tril(one_matrix, k=-1).astype(bool)
tril_k_mask = np.tril(one_matrix, k=-task.similarity_cap_lag).astype(bool)
similarity_mask_recent = np.logical_and(tril_mask, ~tril_k_mask)
similarity_mask_distant = tril_k_mask

mu_rc, er_rc = compute_stats(similarity_matrix[similarity_mask_recent])
mu_dt, er_dt = compute_stats(similarity_matrix[similarity_mask_distant])
bar_height = [mu_rc, mu_dt]
bar_yerr = [er_rc, er_dt]
xticks = range(len(bar_height))
xlabs = ['recent', 'distant']

f, ax = plt.subplots(1, 1, figsize=(5, 4))
ax.bar(x=xticks, height=bar_height, yerr=bar_yerr)
ax.set_title('Event similarity')
ax.set_ylabel('Param overlap')
ax.set_xticks(xticks)
ax.set_xticklabels(xlabs)
sns.despine()
f.tight_layout()
'''plot the distribution (for the lower triangular part)'''