コード例 #1
0
            p_rm_ob_enc=p_rm_ob_enc_test,
            p_rm_ob_rcl=p_rm_ob_rcl_test,
        )
        # create logging dirs
        log_path, log_subpath = build_log_path(subj_id,
                                               p,
                                               log_root=log_root,
                                               verbose=False)

        test_params = [penalty_test, pad_len_test, slience_recall_time]
        test_data_dir, test_data_subdir = get_test_data_dir(
            log_subpath, epoch_load, test_params)
        test_data_fname = get_test_data_fname(n_examples_test, fix_cond)
        fpath = os.path.join(test_data_dir, test_data_fname)

        test_data_dict = pickle_load_dict(fpath)
        results = test_data_dict['results']
        XY = test_data_dict['XY']

        [dist_a_, Y_, log_cache_, log_cond_] = results
        [X_raw, Y_raw] = XY

        # compute ground truth / objective uncertainty (delay phase removed)
        true_dk_wm_, true_dk_em_ = batch_compute_true_dk(X_raw, task)
        '''precompute some constants'''
        # figure out max n-time-steps across for all trials
        T_part = n_param + pad_len_test
        T_total = T_part * task.n_parts
        #
        n_conds = len(TZ_COND_DICT)
        memory_types = ['targ', 'lure']
コード例 #2
0
        )

        fname = 'p%d-%d.pkl' % (penalty_train, penalty_test)
        # pickle_save_dict(gdata_dict, os.path.join(dir_all_subjs, fname))
        log_path, _ = build_log_path(0,
                                     p,
                                     log_root=log_root,
                                     mkdir=False,
                                     verbose=False)
        data_load_path = os.path.join(os.path.dirname(log_path), fname)
        # load data
        # fname = '%s-dp%.2f-p%d-%d.pkl' % (
        #     exp_name, def_prob, penalty_train, penalty_test)
        # print(fname)
        # data_load_path = os.path.join(gdata_outdir, fname)
        data = pickle_load_dict(data_load_path)
        # unpack data
        inpt_dmp2_g = remove_none(data['inpt_dmp2_g'])
        actions_dmp2_g = remove_none(data['actions_dmp2_g'])
        targets_dmp2_g = remove_none(data['targets_dmp2_g'])
        def_path_int_g = remove_none(data['def_path_int_g'])
        def_tps_g = remove_none(data['def_tps_g'])
        ma_g = remove_none(data['lca_ma_list'])

        # clip the number of subjects
        n_rm = len(def_tps_g) - clip_subj
        if n_rm > 0:
            inpt_dmp2_g = inpt_dmp2_g[:len(def_tps_g) - n_rm]
            actions_dmp2_g = actions_dmp2_g[:len(def_tps_g) - n_rm]
            targets_dmp2_g = targets_dmp2_g[:len(def_tps_g) - n_rm]
            def_path_int_g = def_path_int_g[:len(def_tps_g) - n_rm]
コード例 #3
0
ma_cosine = defaultdict()

# for penalty_train in penaltys_train:

for ptest in penaltys_test:
    print(f'penalty_train={penalty_train}, ptest={ptest}')
    # create logging dirs
    log_path, _ = build_log_path(0,
                                 p,
                                 log_root=log_root,
                                 mkdir=False,
                                 verbose=False)
    # load data
    dir_all_subjs = os.path.dirname(log_path)
    fname = 'p%d-%d.pkl' % (penalty_train, ptest)
    data = pickle_load_dict(os.path.join(dir_all_subjs, fname))
    # unpack data
    lca_param[ptest] = data['lca_param_dicts']
    auc[ptest] = data['auc_list']
    acc[ptest] = data['acc_dict']
    mis[ptest] = data['mis_dict']
    dk[ptest] = data['dk_dict']
    ma_lca[ptest] = data['lca_ma_list']
    ma_cosine[ptest] = data['cosine_ma_list']

n_subjs_total = len(auc[ptest])

# process the data - identify missing subjects
missing_subjects = []
for ptest in penaltys_test:
    for lca_pid, lca_pname in lca_pnames.items():
コード例 #4
0
ma_cos_mumu = [np.zeros(len(all_cond)) for _ in range(len(penalty_test_list))]
ma_cos_muse = [np.zeros(len(all_cond)) for _ in range(len(penalty_test_list))]
ma_cos_tmu = [
    np.zeros((len(all_cond), T)) for _ in range(len(penalty_test_list))
]
ma_cos_tse = [
    np.zeros((len(all_cond), T)) for _ in range(len(penalty_test_list))
]
memory_sim_mu = np.zeros(len(penalty_test_list))
memory_sim_se = np.zeros(len(penalty_test_list))

for p_i, penalty_test in enumerate(penalty_test_list):
    fname = '%s-dp%.2f-p%d-%d.pkl' % (exp_name, def_prob, penalty_train,
                                      penalty_test)

    data = pickle_load_dict(os.path.join(gdata_outdir, fname))
    ma_cos_list = data['cosine_ma_list']
    memory_sim_g = data['memory_sim_g']
    '''group level memory activation by condition, averaged over time'''

    ma_cos_list_nonone = remove_none(ma_cos_list)
    n_actual_subjs = len(ma_cos_list_nonone)
    ma_cos_mu = {cond: np.zeros(n_actual_subjs, ) for cond in all_cond}
    ma_cos = {cond: np.zeros((n_actual_subjs, T)) for cond in all_cond}
    for i_s in range(n_actual_subjs):
        for c_i, c_name in enumerate(all_cond):
            # average over time
            ma_cos[c_name][i_s] = ma_cos_list_nonone[i_s][c_name]['lure'][
                'mu'][T:]
            ma_cos_mu[c_name][i_s] = np.mean(
                ma_cos_list_nonone[i_s][c_name]['lure']['mu'][T:])