def run(dest, results_path, seed, version):
    np.random.seed(seed)

    human = pd\
        .read_csv(os.path.join(results_path, "human_fall_responses.csv"))\
        .set_index(['version', 'block', 'stimulus', 'kappa0'])['median']\
        .sortlevel()

    human_raw = pd\
        .read_csv(os.path.join(results_path, "human_fall_responses_raw.csv"))\
        .set_index(['version', 'block', 'stimulus', 'kappa0', 'pid'])['fall? response']\
        .sortlevel()

    human_version = human.ix[version]
    human_exp1 = human.ix['H']
    human_exp2 = human.ix['G']

    model = pd.read_csv(
        os.path.join(results_path, "single_model_fall_responses.csv"))

    results = {}
    for block in ['A', 'B']:
        h1 = human_exp1.ix[block]
        h2 = human_exp2.ix[block]
        results[(block, 'H', 'G')] = util.bootcorr(h1, h2)

        for query in model['query'].unique():
            hv = human_version.ix[block]
            m = model\
                .groupby(['block', 'query'])\
                .get_group((block, query))\
                .set_index(['stimulus', 'kappa0'])['median']\
                .sortlevel()\
                .ix[hv.index]

            results[(block, query, 'Human')] = util.bootcorr(m, hv)

        for version in ['H', 'G']:
            hraw = np.asarray(human_raw.ix[(version, block)].unstack('pid'))
            corrs = np.array([corr(hraw) for _ in range(10000)])
            results[(block, version, version)] = pd.Series(
                np.percentile(corrs, [2.5, 50, 97.5]),
                index=['lower', 'median', 'upper'])

    results = pd.DataFrame.from_dict(results).T
    results.index = pd.MultiIndex.from_tuples(results.index,
                                              names=['block', 'X', 'Y'])

    results.to_csv(dest)
def run(data, results_path, seed):
    np.random.seed(seed)

    means = pd.read_csv(results_path.joinpath("accuracy_means.csv"))
    results = {}
    exclude = ['expA', 'expB', 'gs']
    for (model, flipped), df in means.groupby(['model', 'flipped']):
        if model in exclude:
            continue
        results[(model, flipped)] = util.bootcorr(
            df['modtheta'], df['median'], method='spearman')

    results = pd.DataFrame.from_dict(results, orient='index')
    results.index = pd.MultiIndex.from_tuples(
        results.index, names=['model', 'flipped'])
    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for (model, flipped), stats in results.iterrows():
            cmd = util.newcommand(
                "%sThetaAccuracyCorr%s" % (
                    model.capitalize(),
                    flipped.capitalize()),
                util.latex_spearman.format(**dict(stats)))
            fh.write(cmd)

    return pth
def run(data, results_path, seed):
    np.random.seed(seed)
    keys = ['exp', 'expA', 'expB']

    means = pd.read_csv(results_path.joinpath("trial_accuracy_means.csv"))

    results = {}
    for key in keys:
        df = means.groupby('model').get_group(key)
        trials = df['trial']
        accuracy = df['median']
        corr = util.bootcorr(trials, accuracy, method='spearman')
        results[key] = corr

    results = pd.DataFrame.from_dict(results, orient='index')
    results.index.name = 'model'
    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for model, stats in results.iterrows():
            cmd = util.newcommand(
                "%sTrialAccuracyCorr" % model.capitalize(),
                util.latex_spearman.format(**dict(stats)))
            fh.write(cmd)

    return pth
def run(data, results_path, seed):
    np.random.seed(seed)

    means = pd.read_csv(results_path.joinpath("response_time_means.csv"))
    means = means\
        .set_index(['stimulus', 'theta', 'flipped', 'model'])['median']\
        .unstack('model')

    results = {}
    exclude = ['exp', 'expA', 'expB', 'gs']
    for key in means:
        if key in exclude:
            continue

        for flipped in ['same', 'flipped']:
            corr = util.bootcorr(
                means['exp'].unstack('flipped')[flipped],
                means[key].unstack('flipped')[flipped],
                method='pearson')
            results[(key, flipped)] = corr

        corr = util.bootcorr(
            means['exp'],
            means[key],
            method='pearson')
        results[(key, 'all')] = corr

    results = pd.DataFrame.from_dict(results, orient='index')
    results.index = pd.MultiIndex.from_tuples(
        results.index, names=['model', 'flipped'])
    pth = results_path.joinpath(filename)
    results.to_csv(pth)

    with open(results_path.joinpath(texname), "w") as fh:
        fh.write("%% AUTOMATICALLY GENERATED -- DO NOT EDIT!\n")
        for (model, flipped), stats in results.iterrows():
            if flipped != 'all':
                continue
            cmd = util.newcommand(
                "%sTimeCorr" % model.capitalize(),
                util.latex_pearson.format(**dict(stats)))
            fh.write(cmd)

    return pth
 def bootcorr(x, y, **kwargs):
     name, x = x
     corr = util.bootcorr(x, y, **kwargs)
     corr.name = name
     return corr