Exemple #1
0
def consistency(Model: Type[models.Model]):
    df_model = pd.DataFrame({s: [] for s in Exp1.structures})
    df_human = pd.DataFrame({s: [] for s in Exp1.structures})
    df_conf = pd.DataFrame({s: [] for s in Exp1.structures})
    for pid in DataExp1.pids:
        data = DataExp1(pid)
        # model = data.build_model(Model)

        idx = data.match()
        choice = data.df['choice'].to_numpy()[idx]
        human_consistency = np.zeros(idx.shape[0] * idx.shape[1])
        human_consistency[idx[:, 0]] = human_consistency[
            idx[:, 1]] = choice[:, 0] == choice[:, 1]
        confidence = data.df['confidence'].to_numpy()[idx]
        confidence_consistency = np.zeros(idx.shape[0] * idx.shape[1])
        confidence_consistency[idx[:, 0]] = confidence_consistency[
            idx[:, 1]] = confidence[:, 0] == confidence[:, 1]
        df = data.cross_validate(Model)
        model_consistency = (df**2).sum(axis=1)

        df['human_consistency'] = human_consistency
        df['model_consistency'] = model_consistency
        df['confidence_consistency'] = confidence_consistency
        df['ground_truth'] = data.df['ground_truth']
        df_human = df_human.append(
            df.groupby('ground_truth')['human_consistency'].mean(),
            ignore_index=True)
        df_model = df_model.append(
            df.groupby('ground_truth')['model_consistency'].mean(),
            ignore_index=True)
        df_conf = df_conf.append(
            df.groupby('ground_truth')['confidence_consistency'].mean(),
            ignore_index=True)
    return {'human': df_human, 'model': df_model, 'confidence': df_conf}
def plot_3E(ax: plt.Axes):
    sns.set_palette(sns.color_palette(['white']))
    L = []
    for pid in DataExp2.pids:
        data = DataExp2(pid)
        m1 = data.load_model(
            models.ChoiceModel4Param,
            DataExp1(pid).build_model(models.ChoiceModel4Param).fit())
        df = data.cross_validate(models.ChoiceModel4Param)
        df['choice'] = data.df['choice']
        L.append(
            m1.fit().log_likelihood -
            np.log(df.apply(lambda row: row[row['choice']], axis=1)).sum())
    sns.boxplot(data=L, fliersize=0, ax=ax, linewidth=0.5, width=0.2)
    sns.scatterplot(x=np.linspace(-0.09, 0.09, 12),
                    y=L,
                    fc='white',
                    ec=sns_edge_color('white'),
                    ax=ax,
                    s=5,
                    linewidth=0.5,
                    zorder=11,
                    clip_on=False,
                    legend=False)
    # sns.stripplot(data=L, jitter=True, ax=ax, size=2.5, linewidth=0.5, zorder=10, clip_on=False)
    ax.axhline(0, 0, 1, color='k', zorder=1)
    ax.set_xlim(-0.2, 0.2)
    ax.set_ylabel(r'$\mathcal{L}$(transfer) - $\mathcal{L}$(fitted)')
    ax.set_xticks([])
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.spines['bottom'].set_visible(False)
    plt.tight_layout()
Exemple #3
0
def plot_2C(ax: plt.Axes):
    sns.set_palette(sns.color_palette([colors['consistency_human']]))
    human_consistency = []
    for pid in DataExp1.pids:
        data = DataExp1(pid)
        choice = np.array(data.extract('choice'))[data.match()]
        human_consistency.append(
            (choice[:, 0] == choice[:, 1]).sum() / len(choice))
    sns.boxplot(data=human_consistency,
                fliersize=0,
                ax=ax,
                linewidth=0.5,
                width=0.2)
    sns.scatterplot(x=np.linspace(-0.09, 0.09, 12),
                    y=human_consistency,
                    fc=colors['consistency_human'],
                    ec=sns_edge_color(colors['consistency_human']),
                    ax=ax,
                    s=5,
                    linewidth=0.5,
                    zorder=11,
                    clip_on=False,
                    legend=False)
    # m = np.mean(human_consistency)
    # print(m, np.sqrt(m * (1 - m) / 1200))
    ax.set_xlim(-0.2, 0.2)
    ax.set_ylim(0, 1)
    ax.set_ylabel('Choice consistency\nacross trial-repetitions')
    ax.set_xticks([])
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.spines['bottom'].set_visible(False)
    plt.tight_layout()
Exemple #4
0
def plot_2E(ax: plt.Axes,
            bin_edges_human: np.ndarray = np.array(
                [-1000, -90, -60, -30, -4, -2, 0, 2, 4, 30, 60, 1000]),
            bin_edges_model: np.ndarray = np.linspace(-160, 100, 40)):
    Δ, p_human, p_model = [], [], []
    for pid in DataExp1.pids:
        data = DataExp1(pid)
        model = data.build_model(models.BayesianIdealObserver)
        df = model.predict(model.fit())
        δ = np.stack([
            np.log(df[s]) - np.log(np.sum(df.loc[:, df.columns != s], axis=1))
            for s in data.structures
        ])
        Δ += list(δ.T.flatten())
        p_model += list(
            data.cross_validate(models.ChoiceModel4Param).to_numpy().flatten())
        p_human += list(
            np.array([data.df['choice'] == s
                      for s in Exp1.structures]).T.flatten())
    df = pd.DataFrame({'Δ': Δ, 'p_human': p_human, 'p_model': p_model})
    x_human, y_human, yerr_human, x_model, y_model, yerr_model = [], [], [], [], [], []
    df['bin'] = pd.cut(df['Δ'], bin_edges_human, labels=False)
    for i in range(len(bin_edges_human) - 1):
        _df = df[df['bin'] == i]
        x_human.append(_df['Δ'].mean())
        y_human.append(_df['p_human'].mean())
        yerr_human.append(_df['p_human'].sem())
    df['bin'] = pd.cut(df['Δ'], bin_edges_model, labels=False)
    for i in range(len(bin_edges_model) - 1):
        _df = df[df['bin'] == i]
        x_model.append(_df['Δ'].mean())
        y_model.append(_df['p_model'].mean())
        yerr_model.append(_df['p_model'].sem())
    ax.errorbar(x_human,
                y_human,
                yerr_human,
                label='Human ± sem',
                color=colors['decision_human'],
                fmt='.',
                capsize=2,
                ms=2,
                capthick=0.5,
                zorder=1)
    ax.plot(x_model,
            y_model,
            color=colors['decision_model'],
            label='Model',
            ms=1,
            zorder=0)
    ax.set_xlabel(r'logit( $P_\mathregular{ideal}$($S\,|\,\bf{X}$) )')
    ax.set_ylabel(r'$P($choice=$S\,|\,\bf{X}$)')
    ax.set_ylim(0, 1)
    handles, labels = ax.get_legend_handles_labels()
    ax.legend(handles[::-1],
              labels[::-1],
              loc='upper left',
              handler_map={ErrorbarContainer: HandlerErrorbar(yerr_size=0.25)})
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    plt.tight_layout()
Exemple #5
0
def bayesian_confidence(ax: plt.Axes,
                        bin_edges: np.ndarray = np.array([-1000, -40, -20, -10, -4.5, -1.5, 1.5, 4.5, 10, 20, 40, 1000])):
    x, y = [], []
    for pid in DataExp1.pids:
        data = DataExp1(pid)
        model = data.build_model(models.ChoiceModel4Param)
        L = model.L + model.L_uniform
        x += list(logsumexp(L, b=model.is_chosen, axis=1) - logsumexp(L, b=1 - model.is_chosen, axis=1))
        y += list((model.df['confidence'] == 'high').astype(float))
    df = pd.DataFrame({'x': x, 'y': y})
    df['bin'] = pd.cut(df['x'], bin_edges, labels=False)
    x, y, yerr = [], [], []
    for i in range(len(bin_edges) - 1):
        _df = df[df['bin'] == i]
        if len(_df) == 0:
            continue
        x.append(_df['x'].mean())
        y.append(_df['y'].mean())
        yerr.append(_df['y'].sem())
    ax.errorbar(x, y, yerr, label='Human $\pm$ sem', c='darkgreen', fmt='.-', capsize=2, ms=2, capthick=0.5)
    ax.set_yticks([0, 1])
    ax.set_yticklabels(['Low', 'High'])
    ax.set_ylabel('Avg. reported\nconfidence', labelpad=-16)
    # ax.set_xticks([0, 0.5, 1])
    ax.set_xlabel(r'logit$(\,P_{\mathregular{ideal}}(S\,|\,\bf{X}$$)\,)$')
    ax.legend(loc='lower right', handler_map={ErrorbarContainer: HandlerErrorbar(yerr_size=0.25)})
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    plt.tight_layout()
Exemple #6
0
def exp2all():
    n_col = len(DataExp2.pids) // 3
    _, axes = plt.subplots(3, n_col, figsize=(7.5, 5))
    h, l = None, None
    for i in range(len(DataExp2.pids)):
        pid = DataExp2.pids[i]
        ax = axes[i // n_col, i % n_col]
        ax.set_title(f'#{i + 1}')
        data = DataExp2(pid)
        mpl.rcParams['font.size'] -= 2
        h, l = data.plot_stacked_bar(ax, plot_legend=False)
        mpl.rcParams['font.size'] += 2
        y_human, err = data.plot_line_human()
        ax.errorbar(DataExp2.x, y_human, err, label='Human $\pm$ sem', color=colors['decision_human'],
                    capsize=3, capthick=1, lw=1, ms=2, fmt='o', zorder=3)
        m1 = data.load_model(models.ChoiceModel4Param, DataExp1(pid).build_model(models.ChoiceModel4Param).fit())
        y1 = data.plot_line_model(m1.predict(m1.fit()))
        ax.plot(DataExp2.x, y1, 'o--', label='Transfer model', color=colors['decision_transfer'], lw=1, ms=2, zorder=2)
        y2 = data.plot_line_model(data.cross_validate(models.ChoiceModel4Param))
        ax.plot(DataExp2.x, y2, 'o-', label='Fitted model', color=colors['decision_model'], lw=1, ms=2, zorder=2)
        handles, labels = ax.get_legend_handles_labels()
        h += handles[::-1]
        l += labels[::-1]
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.set_xlabel(' ', labelpad=18)
    plt.gcf().legend(h, l, loc='lower center', ncol=7,
                     handler_map={ErrorbarContainer: HandlerErrorbar(yerr_size=0.35)})
    plt.tight_layout()
Exemple #7
0
def plot_2B(ax: plt.Axes):
    cm = np.zeros((len(Exp1.structures), len(Exp1.structures)))
    for pid in DataExp1.pids:
        data = DataExp1(pid)
        cm += data.plot_confusion_matrix()
    cm /= len(DataExp1.pids)
    ticklabels = list(map(lambda s: f'${s}$', Exp1.structures))
    plot_confusion_matrix(cm, ticklabels, ticklabels, ax)
    ax.set_title('Human avg.')
    ax.set_xlabel('Choice')
    ax.set_ylabel('True Structure')
    plt.tight_layout()
def plot_3B(ax: plt.Axes):
    data = pool(DataExp2)
    data.plot_stacked_bar(ax)
    n = len(ExpConfig.glo_exp2)
    y_human, y1, y2 = np.zeros(n), np.zeros(n), np.zeros(n)
    for pid in DataExp2.pids:
        data = DataExp2(pid)
        y_human += data.plot_line_human()[0]
        m1 = data.load_model(
            models.ChoiceModel4Param,
            DataExp1(pid).build_model(models.ChoiceModel4Param).fit())
        y1 += data.plot_line_model(m1.predict(m1.fit()))
        y2 += data.plot_line_model(
            data.cross_validate(models.ChoiceModel4Param))
    y_human, y1, y2 = y_human / len(DataExp2.pids), y1 / len(
        DataExp2.pids), y2 / len(DataExp2.pids)
    err = [np.sqrt(p * (1 - p) / len(DataExp2.pids) / 20) for p in y_human]
    ax.errorbar(DataExp2.x,
                y_human,
                err,
                label='Human $\pm$ sem',
                color=colors['decision_human'],
                capsize=5,
                capthick=1,
                lw=1,
                ms=3,
                fmt='o',
                zorder=3)
    ax.plot(DataExp2.x,
            y1,
            'o--',
            label='Transfer model',
            color=colors['decision_transfer'],
            lw=1,
            ms=3,
            zorder=2)
    ax.plot(DataExp2.x,
            y2,
            'o-',
            label='Fitted model',
            color=colors['decision_model'],
            lw=1,
            ms=3,
            zorder=2)
    handles, labels = ax.get_legend_handles_labels()
    ax.legend(handles[::-1],
              labels[::-1],
              loc='upper right',
              handler_map={ErrorbarContainer: HandlerErrorbar(yerr_size=0.35)})
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    plt.tight_layout()
Exemple #9
0
def cross_evaluation(ax: plt.Axes, file='../data/cross_evaluation.dat'):
    if exists(file):
        with open(file, 'rb') as f:
            cm = pickle.load(f)
    else:
        cm = np.zeros((12, 12))
        for i in range(len(DataExp1.pids)):
            res = DataExp1(DataExp1.pids[i]).build_model(models.ChoiceModel4Param).fit()
            for j in range(len(DataExp1.pids)):
                cm[i][j] = DataExp1(DataExp1.pids[j]).load_model(models.ChoiceModel4Param, res).fit().log_likelihood
        cm = np.exp(cm - np.max(cm, axis=0))
        with open(file, 'wb+') as f:
            pickle.dump(cm, f)
    im = ax.imshow(cm)
    plt.colorbar(im, ax=ax)
    ax.set_xticks(np.arange(len(DataExp1.pids)))
    ax.set_yticks(np.arange(len(DataExp1.pids)))
    ax.set_xticklabels(map(str, np.arange(1, len(DataExp1.pids) + 1)), size=6)
    ax.set_yticklabels(map(str, np.arange(1, len(DataExp1.pids) + 1)), size=6)
    ax.set_xlabel('Participant fitted')
    ax.set_ylabel('Participant predicted')
    plt.tight_layout()
Exemple #10
0
def plot_2F(ax: plt.Axes):
    cm = np.zeros((len(Exp1.structures), len(Exp1.structures)))
    for pid in DataExp1.pids:
        data = DataExp1(pid)
        model = data.build_model(models.ChoiceModel4Param)
        pred = data.cross_validate(models.ChoiceModel4Param)
        cm += model.plot_confusion_matrix(pred)
    cm /= len(DataExp1.pids)
    ticklabels = list(map(lambda s: f'${s}$', Exp1.structures))
    plot_confusion_matrix(cm, ticklabels, ticklabels, ax)
    ax.set_title('Model avg.')
    ax.set_xlabel('Prediction')
    ax.set_ylabel('True Structure')
    plt.tight_layout()
Exemple #11
0
def plot_2H(ax: plt.Axes, pids=(3, 10)):
    pos = ax.get_position()
    ax.set_position((pos.x0, pos.y0 + 0.08, pos.width, pos.height))
    ax.set_frame_on(False)
    ax.set_xticks([])
    ax.set_xlabel('Human choice/model prediction', labelpad=10)
    ax.set_yticks([])
    ax.set_ylabel('True Structure', labelpad=16)
    fig = plt.gcf()
    gs = gridspec.GridSpecFromSubplotSpec(len(pids),
                                          2,
                                          subplot_spec=ax,
                                          wspace=0.1,
                                          hspace=0.1)
    mpl.rcParams['font.size'] -= 2
    mpl.rcParams['xtick.labelsize'] -= 2
    mpl.rcParams['ytick.labelsize'] -= 2
    for j in range(len(pids)):
        ax = plt.Subplot(fig, gs[0, j])
        data = DataExp1(DataExp1.pids[pids[j] - 1])
        data.plot_confusion_matrix(ax)
        ax.set_title(f'\uf007$\#${pids[j]}', size=7, fontproperties=fp)
        ax.set_xticks([])
        ax.set_xlabel('')
        if j == 0:
            ax.set_ylabel('Human')
        else:
            ax.set_yticks([])
            ax.set_ylabel('')
            # ax.yaxis.set_label_coords(-0.4, 0.6 - i * 0.2)
        fig.add_subplot(ax)

        ax = plt.Subplot(fig, gs[1, j])
        model = data.build_model(models.ChoiceModel4Param)
        model.plot_confusion_matrix(
            data.cross_validate(models.ChoiceModel4Param), ax)
        ax.set_xlabel('')
        if j == 0:
            ax.set_ylabel('Model')
        else:
            ax.set_yticks([])
            ax.set_ylabel('')
        fig.add_subplot(ax)
    mpl.rcParams['font.size'] += 2
    mpl.rcParams['xtick.labelsize'] += 2
    mpl.rcParams['ytick.labelsize'] += 2
Exemple #12
0
def correlation(ax: plt.Axes, file='../data/correlation.dat'):
    if exists(file):
        with open(file, 'rb') as f:
            df = pickle.load(f)
    else:
        ρ01, ρ02, ρ12, g = [], [], [], []
        for pid in DataExp1.pids:
            print(pid)
            data = DataExp1(pid)
            for i in data.idx:
                g.append(data.data[i]['ground_truth'])
                V = data.empirical_velocity()[i][:, data.data[i]['permutation']]
                ρ01.append(pearsonr(V[:, 0], V[:, 1])[0])
                ρ02.append(pearsonr(V[:, 0], V[:, 2])[0])
                ρ12.append(pearsonr(V[:, 1], V[:, 2])[0])
        df = pd.DataFrame({'ρ01': ρ01, 'ρ02': ρ02, 'ρ12': ρ12, 'g': g})
        with open(file, 'wb+') as f:
            pickle.dump(df, f)

    ρ_g, ρ_c = Exp1.presets['H'].Σ[0, 1:3] / 4
    R = {'I': ([0], [0], [0]),
         'G': ([ρ_g], [ρ_g], [ρ_g]),
         'C': ([ρ_g, 0, 0], [0, ρ_g, 0], [0, 0, ρ_g]),
         'H': ([ρ_g, ρ_c, ρ_c], [ρ_c, ρ_g, ρ_c], [ρ_c, ρ_c, ρ_g])}
    for s, c, m in zip(Exp1.structures, ['b', 'g', 'y', 'r'], ['x', 'o', '^', '+']):
        _df = df[df['g'] == s]
        alpha = 0.3 if s == 'G' else 0.1
        ax.scatter(_df['ρ01'], _df['ρ02'], _df['ρ12'], color=c, marker=m, alpha=alpha, s=16, label=s, linewidths=0.8)
        ax.scatter(R[s][0], R[s][1], R[s][2], color=c, marker=m, alpha=0.8, s=128, linewidths=0.8)

    ax.view_init(30, -45)
    ax.set_xlabel(r'$ρ_\mathregular{blue, green}$', labelpad=-4)
    ax.set_xlim(-1, 1)
    ax.set_xticks([-1, -0.5, 0, 0.5, 1])
    ax.set_ylabel(r'$ρ_\mathregular{blue, red}$', labelpad=-4)
    ax.set_ylim(-1, 1)
    ax.set_yticks([-1, -0.5, 0, 0.5, 1])
    ax.set_zlabel(r'$ρ_\mathregular{green, red}$', labelpad=-4)
    ax.set_zlim(-1, 1)
    ax.set_zticks([-1, -0.5, 0, 0.5, 1])
    legend = ax.legend(loc='lower right', handlelength=1, handletextpad=0)
    for lh in legend.legendHandles:
        lh.set_alpha(1)
    plt.tight_layout()
Exemple #13
0
def exp1all(ax: plt.Axes):
    ax.set_frame_on(False)
    ax.set_xticks([])
    ax.set_xlabel('Human choice/model prediction', labelpad=10)
    ax.set_yticks([])
    ax.set_ylabel('True structure', labelpad=20)
    fig = plt.gcf()
    n_col = len(DataExp1.pids) // 2
    outer = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=ax, wspace=0.1, hspace=0.2)
    half1 = gridspec.GridSpecFromSubplotSpec(2, n_col, subplot_spec=outer[0, 0], wspace=0.1, hspace=0.1)
    half2 = gridspec.GridSpecFromSubplotSpec(2, n_col, subplot_spec=outer[1, 0], wspace=0.1, hspace=0.1)
    mpl.rcParams['font.size'] -= 2
    mpl.rcParams['xtick.labelsize'] -= 2
    mpl.rcParams['ytick.labelsize'] -= 2
    for i in range(len(DataExp1.pids)):
        half = half1 if i < n_col else half2
        ax = plt.Subplot(fig, half[0, i % n_col])
        data = DataExp1(DataExp1.pids[i])
        data.plot_confusion_matrix(ax)
        ax.set_title(f'#{i + 1}')
        ax.set_xlabel('')
        ax.set_xticks([])
        if i % n_col == 0:
            ax.set_ylabel('Human')
        else:
            ax.set_ylabel('')
            ax.set_yticks([])
        fig.add_subplot(ax)

        ax = plt.Subplot(fig, half[1, i % n_col])
        model = data.build_model(models.ChoiceModel4Param)
        model.plot_confusion_matrix(data.cross_validate(models.ChoiceModel4Param), ax)
        ax.set_xlabel('')
        ax.set_ylabel('')
        if i % n_col == 0:
            ax.set_ylabel('Model')
        else:
            ax.set_ylabel('')
            ax.set_yticks([])
        fig.add_subplot(ax)
    mpl.rcParams['font.size'] += 2
    mpl.rcParams['xtick.labelsize'] += 2
    mpl.rcParams['ytick.labelsize'] += 2
Exemple #14
0
def proximity(ax: plt.Axes):
    proximity_all, accuracy_all = [], []
    for pid in DataExp1.pids:
        proximity, accuracy = [], []
        data = DataExp1(pid).data
        for trial in data:
            if trial['ground_truth'] == 'C':
                x = np.array(trial['φ'])[:, :3]
                dx = np.abs(x[:, 0] - x[:, 1])
                proximity.append(np.minimum(dx, 2 * np.pi - dx).mean())
                accuracy.append(trial['choice'] == 'C')
        proximity_all += proximity
        accuracy_all += accuracy
        df = pd.DataFrame({'proximity': proximity, 'accuracy': accuracy})
        df['accuracy'] = df['accuracy'].astype(float)
        print(pearsonr(df['proximity'], df['accuracy']))
    proximity_all, accuracy_all = np.array(proximity_all), np.array(accuracy_all)
    ax.boxplot([proximity_all[~accuracy_all], proximity_all[accuracy_all]])
    ax.set_xticklabels(['Non-$C$', '$C$'])
    ax.set_xlabel('Human choice')
    ax.set_ylabel('Avg. angular distance b/w clustered dots')
def plot_3C(ax: plt.Axes,
            bin_edges_human: np.ndarray = np.array(
                [-1000, -4.5, -3, -1.5, 0, 1.5, 3, 4.5, 1000]),
            bin_edges_model: np.ndarray = np.array(
                [-1000, -4.5, -3, -1.5, 0, 1.5, 3, 4.5, 1000])):
    Δ, p_human, p1, p2 = [], [], [], []
    for pid in DataExp2.pids:
        data = DataExp2(pid)
        model = data.build_model(models.BayesianIdealObserver)
        df = model.predict(model.fit())
        Δ += list(np.log(df['C']) - np.log(df['H']))
        model = data.load_model(
            models.ChoiceModel4Param,
            DataExp1(pid).build_model(models.ChoiceModel4Param).fit())
        p1 += list(model.predict(model.fit())['C'])
        p2 += list(data.cross_validate(models.ChoiceModel4Param)['C'])
        p_human += list((data.df['choice'] == 'C') * 1.0)
    df = pd.DataFrame({'Δ': Δ, 'p_human': p_human, 'p1': p1, 'p2': p2})
    x_human, y_human, yerr_human, x_model, y1, yerr1, y2, yerr2 = [], [], [], [], [], [], [], []
    df['bin'] = pd.cut(df['Δ'], bin_edges_human, labels=False)
    for i in range(len(bin_edges_human) - 1):
        _df = df[df['bin'] == i]
        x_human.append(_df['Δ'].mean())
        y_human.append(_df['p_human'].mean())
        yerr_human.append(_df['p_human'].sem())
    df['bin'] = pd.cut(df['Δ'], bin_edges_model, labels=False)
    for i in range(len(bin_edges_model) - 1):
        _df = df[df['bin'] == i]
        x_model.append(_df['Δ'].mean())
        y1.append(_df['p1'].mean())
        yerr1.append(_df['p1'].sem())
        y2.append(_df['p2'].mean())
        yerr2.append(_df['p2'].sem())
    ax.errorbar(x_human,
                y_human,
                yerr_human,
                label='Human $\pm$ sem',
                color=colors['decision_human'],
                fmt='.',
                capsize=2,
                ms=2,
                capthick=0.5,
                zorder=1)
    ax.plot(x_model,
            y1,
            '--',
            color=colors['decision_transfer'],
            label='Transfer model',
            ms=1,
            zorder=0)
    ax.plot(x_model,
            y2,
            '-',
            color=colors['decision_model'],
            label='Fitted model',
            ms=1,
            zorder=0)
    ax.set_xlabel(r'logit( $P_\mathregular{ideal}(S=C\,|\,\bf{X}$) )')
    ax.set_ylabel(r'$P$(choice=$C\,|\,\bf{X}$)')
    ax.set_ylim(0, 1)
    handles, labels = ax.get_legend_handles_labels()
    ax.legend(handles[::-1],
              labels[::-1],
              loc='lower right',
              handler_map={ErrorbarContainer: HandlerErrorbar(yerr_size=0.25)})
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    plt.tight_layout()
from typing import Dict, List, Type
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

from analysis.data_exp1 import DataExp1
import analysis.models as models
from analysis.utils.groupBMC import GroupBMC

Models = [
    models.ChoiceModel4Param, models.BiasFreeChoiceModel,
    models.LapseFreeChoiceModel, models.NonBayesianChoiceModel4Param
]
L: Dict[Type[models.Model], List[float]] = {Model: [] for Model in Models}
for pid in DataExp1.pids:
    data = DataExp1(pid)
    for Model in Models:
        df = data.cross_validate(Model)
        df['choice'] = data.df['choice']
        L[Model].append(
            np.log(df.apply(lambda row: row[row['choice']], axis=1)).sum())


def plot_4A(ax: plt.Axes):
    x = np.arange(1, len(DataExp1.pids) + 1)
    ax.hlines(0,
              0,
              len(DataExp1.pids) + 1,
              label='Full model',
              colors=models.ChoiceModel4Param.color)
    baseline = np.array(L[models.ChoiceModel4Param])