예제 #1
0
def run(dest, results_path, version, block, query):
    fits = pd.read_csv(os.path.join(results_path, "num_samples.csv"))\
        .groupby(['version', 'query', 'block'])\
        .get_group((version, query, block))\
        .set_index('k')
    best_k = fits['mse'].argmin()
    omega = np.sqrt(fits['intercept'][best_k])

    fh = open(dest, "w")
    fh.write(util.newcommand("BestFitK", r"$n={:d}$".format(int(best_k))))
    fh.write(util.newcommand("BestFitOmega", r"$\omega={:.2f}$".format(omega)))
    fh.close()
예제 #2
0
def run(dest, results_path):
    results = pd.read_csv(
        os.path.join(results_path, "human_mass_accuracy_by_participant.csv"))

    results = results.set_index(['version', 'num_mass_trials', 'kappa0', 'pid']) 

    replace = {
        'G': 'Two',
        'H': 'One',
        'I': 'Three',
        8: '',
        20: '',
        1: 'OneTrial',
        2: 'TwoTrials',
        3: 'ThreeTrials',
        4: 'FourTrials',
        5: 'FiveTrials'
    }

    fh = open(dest, "w")

    for (version, num_mass_trials), stats in results.groupby(level=['version', 'num_mass_trials']):
        cmdname = "SubjAccExp{}{}Min".format(replace[version], replace[num_mass_trials])
        acc_min = np.round(100 * float(stats.min()), 1)
        if int(acc_min) == acc_min:
            cmd = r"{}\%".format(int(acc_min))
        else:
            cmd = r"{:.1f}\%".format(acc_min)
        fh.write(util.newcommand(cmdname, cmd))

        cmdname = "SubjAccExp{}{}Max".format(replace[version], replace[num_mass_trials])
        acc_max = np.round(100 * float(stats.max()), 1)
        if int(acc_max) == acc_max:
            cmd = r"{}\%".format(int(acc_max))
        else:
            cmd = r"{:.1f}\%".format(acc_max)
        fh.write(util.newcommand(cmdname, cmd))

        cmdname = "SubjAccExp{}{}Most".format(replace[version], replace[num_mass_trials])
        pct, = np.percentile(stats, [5])
        pct = np.round(100 * float(pct), 1)
        if int(pct) == pct:
            cmd = r"{}\%".format(int(pct))
        else:
            cmd = r"{:.1f}\%".format(pct)
        fh.write(util.newcommand(cmdname, cmd))

    fh.close()
예제 #3
0
def run(dest, results_path, query):
    results = pd\
        .read_csv(os.path.join(results_path, 'fit_sigmoids.csv'))\
        .query('(likelihood == "empirical") | (likelihood == "ipe_{}")'.format(query))\
        .set_index(['likelihood', 'counterfactual', 'random'])
    latex_beta = util.load_config()["latex"]["beta"]

    fh = open(dest, "w")
    for (lh, cf, random), row in results.iterrows():
        if lh == "ipe_" + query:
            lh = 'Ipe'
        else:
            lh = lh.capitalize()
        if cf:
            cf = "CF"
        else:
            cf = ""
        if random:
            random = "Random"
        else:
            random = ""

        cmdname = "SigmoidCoef{}{}{}".format(lh, cf, random)
        value = latex_beta.format(**row)
        cmd = util.newcommand(cmdname, value)
        fh.write(cmd)

    fh.close()
def run(data, results_path, seed):
    np.random.seed(seed)

    results = {}
    exclude = ['expA', 'expB', 'gs']
    for key, df in data.iteritems():
        if key in exclude:
            continue
        y = df.groupby(['stimulus', 'theta', 'flipped'])['correct']
        alpha = 0.05 / len(y.groups)
        chance = y.apply(util.beta, [alpha]).unstack(-1)[alpha] <= 0.5
        results[key] = chance

    results = pd.DataFrame.from_dict(results).stack().reset_index()
    results.columns = ['stimulus', 'theta', 'flipped', 'model', 'chance']

    pth = results_path.joinpath(filename)
    results.set_index("stimulus").to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for model, chance in results.groupby('model')['chance']:
            num = chance.sum()
            if num < len(words):
                num = words[num]
            cmd = util.newcommand(
                "%sNumChance" % model.capitalize(), num)
            fh.write(cmd)

    return pth
def run(data, results_path, seed):
    np.random.seed(seed)
    keys = ['exp', 'expA', 'expB']

    means = pd.read_csv(results_path.joinpath("trial_accuracy_means.csv"))

    results = {}
    for key in keys:
        df = means.groupby('model').get_group(key)
        trials = df['trial']
        accuracy = df['median']
        corr = util.bootcorr(trials, accuracy, method='spearman')
        results[key] = corr

    results = pd.DataFrame.from_dict(results, orient='index')
    results.index.name = 'model'
    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for model, stats in results.iterrows():
            cmd = util.newcommand(
                "%sTrialAccuracyCorr" % model.capitalize(),
                util.latex_spearman.format(**dict(stats)))
            fh.write(cmd)

    return pth
def run(dest, results_path):
    results = pd.read_csv(
        os.path.join(results_path, "mass_accuracy_by_trial_corrs.csv"))

    results = results\
        .set_index(['version', 'num_mass_trials'])\
        .ix[[('G', 8), ('H', 20), ('I', -1), ('I', 5)]]

    replace = {
        'H': 'One',
        'G': 'Two',
        'I': 'Three'
    }

    latex_pearson = util.load_config()["latex"]["spearman"]

    fh = open(dest, "w")

    for (version, num), corrs in results.iterrows():
        if version == 'I' and num == 5:
            cmdname = "Exp{}MassTrialCorrWithinSubjs".format(
                replace[version])
        else:
            cmdname = "Exp{}MassTrialCorr".format(
                replace[version])
        cmd = latex_pearson.format(**corrs)
        fh.write(util.newcommand(cmdname, cmd))

    fh.close()
    return dest
예제 #7
0
def run(data, results_path, seed):
    np.random.seed(seed)

    results = {}
    for name in sorted(data.keys()):
        correct = data[name][data[name]['correct']]
        results[name] = 1. / util.bootstrap_mean(1. / correct['time'])

    results = pd.DataFrame.from_dict(results, orient='index')
    results.index.name = 'model'
    # these are out of order, so fix them
    results = results.rename(columns={
        'lower': 'upper',
        'upper': 'lower'})

    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for model, stats in results.iterrows():
            cmd = util.newcommand(
                "%sTime" % model.capitalize(),
                util.latex_msec.format(**dict(stats)))
            fh.write(cmd)

    return pth
def run(data, results_path, seed):
    np.random.seed(seed)

    means = pd.read_csv(results_path.joinpath("accuracy_means.csv"))
    results = {}
    exclude = ['expA', 'expB', 'gs']
    for (model, flipped), df in means.groupby(['model', 'flipped']):
        if model in exclude:
            continue
        results[(model, flipped)] = util.bootcorr(
            df['modtheta'], df['median'], method='spearman')

    results = pd.DataFrame.from_dict(results, orient='index')
    results.index = pd.MultiIndex.from_tuples(
        results.index, names=['model', 'flipped'])
    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for (model, flipped), stats in results.iterrows():
            cmd = util.newcommand(
                "%sThetaAccuracyCorr%s" % (
                    model.capitalize(),
                    flipped.capitalize()),
                util.latex_spearman.format(**dict(stats)))
            fh.write(cmd)

    return pth
예제 #9
0
def run(dest, results_path):
    results = pd.read_csv(os.path.join(results_path, ("num_chance.csv")))

    alpha = results.columns[-1]
    results = results\
        .groupby('version')\
        .sum()[alpha]

    replace = {'H': 'One', 'G': 'Two', 'I': 'Three'}

    fh = open(dest, "w")

    for version, num in results.iteritems():
        name = "MassAccNumChanceExp{}".format(replace[version])
        fh.write(util.newcommand(name, int(num)))
        fh.write(util.newcommand("{}Correction".format(name), alpha))

    fh.close()
예제 #10
0
def run(dest, results_path):
    results = pd.read_csv(
        os.path.join(results_path, "precision_recall.csv"))

    results = results\
        .groupby(['version', 'model', 'fitted'])\
        .get_group(('H', 'static', False))\
        .set_index(['likelihood', 'counterfactual'])

    query = util.load_query()

    fh = open(dest, "w")

    for (lh, cf), res in results.iterrows():
        if lh == 'ipe_' + query:
            lh = 'Ipe'
        else:
            lh = "".join([x.capitalize() for x in lh.split('_')])

        if cf:
            suffix = "{}CF".format(lh)
        else:
            suffix = "{}NoCF".format(lh)

        cmdname = "ExpOneFScore{}".format(suffix)
        cmd = "F_1={F1:.2f}".format(**res)
        fh.write(util.newcommand(cmdname, cmd))

        cmdname = "ExpOnePrecision{}".format(suffix)
        cmd = "{precision:.2f}".format(**res)
        fh.write(util.newcommand(cmdname, cmd))

        cmdname = "ExpOneRecall{}".format(suffix)
        cmd = "{recall:.2f}".format(**res)
        fh.write(util.newcommand(cmdname, cmd))

        cmdname = "ExpOneAccuracy{}".format(suffix)
        cmd = r"{:.1f}\%".format(res['accuracy'] * 100)
        fh.write(util.newcommand(cmdname, cmd))

    fh.close()
예제 #11
0
def run(dest, results_path):
    results = pd.read_csv(os.path.join(results_path,
                                       "payrate.csv")).set_index('version').T

    replace = {'H': 'One', 'G': 'Two', 'I': 'Three'}

    fh = open(dest, "w")

    for version, data in results.iteritems():
        cmdname = "Exp{}MedianTime".format(replace[version])
        time = "{:.1f}".format(
            pd.to_timedelta(data['median_time']).total_seconds() / 60.0)
        fh.write(util.newcommand(cmdname, time))

    fh.close()
def run(dest, results_path):
    main_query = util.load_query()
    results = pd\
        .read_csv(os.path.join(results_path, "fall_response_corrs.csv"))\
        .set_index(['block', 'X', 'Y'])

    format_pearson = util.load_config()["latex"]["pearson"]

    fh = open(dest, "w")

    for (block, x, y), corrs in results.iterrows():
        x = "".join([i.capitalize() for i in x.split("_")])
        cmdname = "FallCorr{}v{}{}".format(x, y, block)
        cmd = format_pearson.format(**corrs)
        fh.write(util.newcommand(cmdname, cmd))

    fh.close()
예제 #13
0
def run(dest, results_path):
    results = pd.read_csv(os.path.join(results_path, "num_participants.csv"))

    results = results\
        .set_index('version')\
        .stack()

    replace = {'H': 'One', 'G': 'Two', 'I': 'Three'}

    fh = open(dest, "w")

    for (version, note), num in results.iteritems():
        note = ''.join([x.capitalize() for x in note.split('_')])
        cmdname = "Exp{}{}".format(replace[version], note)
        fh.write(util.newcommand(cmdname, int(num)))

    fh.close()
def run(data, results_path, seed):
    np.random.seed(seed)

    means = pd.read_csv(results_path.joinpath("response_time_means.csv"))
    means = means\
        .set_index(['stimulus', 'theta', 'flipped', 'model'])['median']\
        .unstack('model')

    results = {}
    exclude = ['exp', 'expA', 'expB', 'gs']
    for key in means:
        if key in exclude:
            continue

        for flipped in ['same', 'flipped']:
            corr = util.bootcorr(
                means['exp'].unstack('flipped')[flipped],
                means[key].unstack('flipped')[flipped],
                method='pearson')
            results[(key, flipped)] = corr

        corr = util.bootcorr(
            means['exp'],
            means[key],
            method='pearson')
        results[(key, 'all')] = corr

    results = pd.DataFrame.from_dict(results, orient='index')
    results.index = pd.MultiIndex.from_tuples(
        results.index, names=['model', 'flipped'])
    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for (model, flipped), stats in results.iterrows():
            if flipped != 'all':
                continue
            cmd = util.newcommand(
                "%sTimeCorr" % model.capitalize(),
                util.latex_pearson.format(**dict(stats)))
            fh.write(cmd)

    return pth
def run(data, results_path, seed):
    np.random.seed(seed)

    results = {}
    for name in sorted(data.keys()):
        df = data[name]
        a = util.beta(df['correct']) * 100
        results[name] = a

    results = pd.DataFrame.from_dict(results, orient='index')
    results.index.name = 'model'
    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for model, stats in results.iterrows():
            cmd = util.newcommand(
                "%sAccuracy" % model.capitalize(),
                util.latex_percent.format(**dict(stats)))
            fh.write(cmd)

    return pth
예제 #16
0
def run(dest, results_path, counterfactual):
    results = pd\
        .read_csv(os.path.join(results_path, 'bayes_factors.csv'))\
        .groupby('counterfactual')\
        .get_group(counterfactual)\
        .set_index(['likelihood', 'version', 'num_mass_trials'])

    replace = {
        'G': 'ExpTwo',
        'H': 'ExpOne',
        'I': 'ExpThree',
        1: 'OneTrial',
        2: 'TwoTrials',
        3: 'ThreeTrials',
        4: 'FourTrials',
        5: 'FiveTrials',
        -1: 'AcrossSubjs',
        8: '',
        20: ''
    }

    query = util.load_query()

    fh = open(dest, "w")

    for (lh, version, num_trials), logk in results.iterrows():
        if lh == "ipe_" + query:
            lh = 'Ipe'
        else:
            lh = "".join([x.capitalize() for x in lh.split("_")])

        cmdname = "BayesFactor{}{}{}".format(lh, replace[version],
                                             replace[num_trials])
        cmd = r"{logK:.2f}".format(**logk)
        fh.write(util.newcommand(cmdname, cmd))

    fh.close()
def run(dest, results_path):
    results = pd\
        .read_csv(os.path.join(results_path, "human_mass_accuracy.csv"))\
        .set_index(['version', 'kappa0']) * 100

    latex_percent = util.load_config()["latex"]["percent"]

    replace = {
        'H': 'One',
        'G': 'Two',
        'I': 'Three',
        '-1.0': 'KappaLow',
        '1.0': 'KappaHigh',
        'all': ''
    }

    fh = open(dest, "w")

    for (version, kappa0), accuracy in results.iterrows():
        cmdname = "MassAccExp{}{}".format(replace[version], replace[kappa0])
        cmd = latex_percent.format(**accuracy)
        fh.write(util.newcommand(cmdname, cmd))

    fh.close()
def run(dest, results_path):
    accuracy = pd\
        .read_csv(os.path.join(results_path, "mass_accuracy_by_stimulus_corrs.csv"))\
        .groupby('version')\
        .get_group('H')\
        .set_index(['likelihood', 'counterfactual'])

    responses = pd\
        .read_csv(os.path.join(results_path, "mass_responses_by_stimulus_corrs.csv"))\
        .groupby('version')\
        .get_group('H')\
        .set_index(['likelihood', 'counterfactual'])

    human = pd\
        .read_csv(os.path.join(results_path, "mass_by_stimulus_human_corrs.csv"))\
        .set_index('judgment')\
        .T

    latex_pearson = util.load_config()["latex"]["pearson"]
    query = util.load_query()

    fh = open(dest, "w")

    for (lh, cf), corrs in accuracy.iterrows():
        if lh == 'ipe_' + query:
            lh = 'Ipe'
        else:
            lh = "".join([x.capitalize() for x in lh.split('_')])

        if cf:
            suffix = "{}CF".format(lh)
        else:
            suffix = "{}NoCF".format(lh)

        cmdname = "ExpOneMassAccStimCorr{}".format(suffix)
        cmd = latex_pearson.format(**corrs)
        fh.write(util.newcommand(cmdname, cmd))

    for (lh, cf), corrs in responses.iterrows():
        if lh == 'ipe_' + query:
            lh = 'Ipe'
        else:
            lh = "".join([x.capitalize() for x in lh.split('_')])

        if cf:
            suffix = "{}CF".format(lh)
        else:
            suffix = "{}NoCF".format(lh)

        cmdname = "ExpOneMassRespStimCorr{}".format(suffix)
        cmd = latex_pearson.format(**corrs)
        fh.write(util.newcommand(cmdname, cmd))

    fh.write(
        util.newcommand("MassRespHumanCorr",
                        latex_pearson.format(**human['mass? response'])))
    fh.write(
        util.newcommand("MassAccHumanCorr",
                        latex_pearson.format(**human['mass? correct'])))

    fh.close()
def run(data, results_path, seed):
    np.random.seed(seed)

    exp = data['exp'].set_index(['pid', 'trial'])

    pids = pd.Series(exp.index.get_level_values('pid')).drop_duplicates()
    pids.sort_values(inplace=True)
    pids = pids.reset_index(drop=True)

    n = len(pids)
    m = 100
    time_corrs = np.empty(m)
    acc_corrs = np.empty(m)

    for i in xrange(m):
        idx = np.arange(n)
        np.random.shuffle(idx)

        p0 = pids[idx[:int(n / 2)]]
        p1 = pids[idx[int(n / 2):]]

        df0 = exp.drop(p0, axis=0, level='pid').reset_index()
        df1 = exp.drop(p1, axis=0, level='pid').reset_index()

        tm0 = df0[df0['correct']]\
            .groupby(['stimulus', 'theta', 'flipped'])['time']\
            .apply(util.logmean)
        tm1 = df1[df1['correct']]\
            .groupby(['stimulus', 'theta', 'flipped'])['time']\
            .apply(util.logmean)

        am0 = df0\
            .groupby(['stimulus', 'theta', 'flipped'])['correct']\
            .apply(util.beta).unstack(-1)['median']
        am1 = df1\
            .groupby(['stimulus', 'theta', 'flipped'])['correct']\
            .apply(util.beta).unstack(-1)['median']

        means = pd.DataFrame({'tm0': tm0, 'tm1': tm1, 'am0': am0, 'am1': am1})
        time_corrs[i] = scipy.stats.pearsonr(means['tm0'], means['tm1'])[0]
        acc_corrs[i] = scipy.stats.pearsonr(means['am0'], means['am1'])[0]

    time_stats = np.percentile(time_corrs, [2.5, 50, 97.5])
    acc_stats = np.percentile(acc_corrs, [2.5, 50, 97.5])

    results = pd.DataFrame({
        'time': time_stats,
        'accuracy': acc_stats
    }, index=['lower', 'median', 'upper']).T.reset_index()
    results['model'] = 'exp'
    results = results\
        .rename(columns={'index': 'measure'})\
        .set_index(['model', 'measure'])

    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for (model, measure), stats in results.iterrows():
            cmd = util.newcommand(
                "Exp%sCorr" % measure.capitalize(),
                util.latex_pearson.format(**dict(stats)))
            fh.write(cmd)

    return pth