def get_unconditional_results(df, action, monitor_num, dep_var):

    data = utils.get_data(df, action, monitor_num)

    results = utils.group_bootstrap(lambda x: utils.diff_in_means(
        x, dep_var, 'condition', 'control', action),
                                    data,
                                    'coin_name',
                                    samples=1000)

    return results
def empirical_state_actions(version):
    import sys
    sys.path.append('../experiment/lib')
    from analysis_utils import get_data

    data = get_data(version, '../experiment/data')
    tdf = data['mouselab-mdp'].query('block == "test"')

    def extract(q):
        return list(map(int, q['click']['state']['target']))

    return zip(tdf.trial_id, tdf.queries.apply(extract))
def main(file_name=None):
    """ Run the analysis on data in a file"""

    # Define these two within the scope of main:
    def func(pars):
        a,b,c = pars
        return a*(x**2) + b*x + c 

    def errfunc(pars):
        return y-func(pars)

    if file_name is None: 
        #path_to_files = '/Volumes/Plata1/Shared/Ariel/texture_data/'
        file_name =  fileOpenDlg()[0]
    
    p,l,data_rec = utils.get_data(file_name)
    neutral = data_rec[np.where(data_rec['neutral'])]
    peripheral = data_rec[np.where(data_rec['neutral']==0)]
    cond_str = ['Neutral', 'Cued']
    colors = ['b','r']
    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)

    print("SOA used was: %s msec"%(1000*p[' texture_dur']))
    print("% correct: ")

    for cond_idx,cond_rec in enumerate([neutral,peripheral]):
        correct = cond_rec['correct']
        ecc = cond_rec['target_ecc']

        #Bin the eccentricities: 
        a = np.floor(ecc)
        eccs_used = np.unique(a)

        #Initialize counters: 
        b = np.zeros(len(eccs_used))
        c = np.zeros(len(eccs_used))

        #Loop over the trials and add the correct to one counter and the number of
        #trials to the other: 
        for i in xrange(len(correct)):
                idx = np.where(eccs_used==np.floor(ecc[i]))
                b[idx]+=correct[i]
                c[idx]+=1.0

        p_correct = b/c
        print("%s: %s "%(cond_str[cond_idx], np.mean(p_correct)*100))
        
        for i,p in enumerate(p_correct):
                ax.plot(eccs_used[i],p,'o',color=colors[cond_idx],markersize=c[i])

        x = []
        y = []

        for i,this_ecc in enumerate(eccs_used):
            x = np.hstack([x,c[i]*[this_ecc]])    
            y = np.hstack([y,c[i]*[p_correct[i]]])

        guess = 1,1,1
        fit, mesg = leastsq(errfunc,guess)
        x = np.arange(0,np.max(x),0.01)        
        ax.plot(x,func(fit),'--',color=colors[cond_idx],
                label=cond_str[cond_idx])
        
    ax.legend()
    ax.set_xlim([-1,13])
    ax.set_ylim([0,1.1])
    ax.set_xlabel('Eccentricity (degrees)')
    ax.set_ylabel('Proportion correct responses')

    fig_name = 'figures/' + file_name.split('.')[0].split('/')[-1] + '.png'
    fig.savefig(fig_name)
    os.system('open %s'%fig_name)
Exemple #4
0
import sys

from matplotlib.mlab import csv2rec
import matplotlib.pyplot as plt
import numpy as np

import analysis_utils as a

if __name__ == "__main__":

    path_to_files = '/Volumes/Plata1/Shared/Ariel/fiorentini_data/'
    file_name = sys.argv[1]
    p, l, data_rec = a.get_data('%s/%s' % (path_to_files, file_name))
    fix_idx = np.where(data_rec['task'] == 'fixation')

    per_idx = np.where(data_rec['task'] == 'periphery')
    fix_idx = np.where(data_rec['task'] == 'fixation')
    c = data_rec['correct']
    amp = data_rec['contrast']

    th_per = a.analyze(amp[per_idx], c[per_idx])
    th_fix = a.analyze(amp[fix_idx], c[fix_idx])

    x, y = th_per[4], th_per[5]
    fig = plt.figure()
    ax = fig.add_subplot(1, 2, 1)
    ax.plot(x, y, 'o')
    x_for_plot = np.linspace(np.min(x), np.max(x), 100)

    ax.plot(x_for_plot, a.weibull(x_for_plot, th_per[0], th_per[3]))
def main(file_name=None):
    """ Run the analysis on data in a file"""

    # Define these two within the scope of main:
    def func(pars):
        a, b, c = pars
        return a * (x ** 2) + b * x + c

    def errfunc(pars):
        return y - func(pars)

    if file_name is None:
        # path_to_files = '/Volumes/Plata1/Shared/Ariel/texture_data/'
        file_name = fileOpenDlg()[0]

    p, l, data_rec = utils.get_data(file_name)

    # For backwards compatibility, check if this variable exists:
    if "eye_moved" in l:
        data_rec = data_rec[np.where(data_rec["eye_moved"] == 0)]

    neutral = data_rec[np.where(data_rec["neutral"])]
    peripheral = data_rec[np.where(data_rec["neutral"] == 0)]
    cond_str = ["Neutral", "Cued"]
    colors = ["b", "r"]
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)

    print("SOA used was: %s msec" % (1000 * p[" texture_dur"]))
    print("% correct: ")

    for cond_idx, cond_rec in enumerate([neutral, peripheral]):
        correct = cond_rec["correct"]
        ecc = cond_rec["target_ecc"]

        # Bin the eccentricities:
        a = np.floor(ecc)
        eccs_used = np.unique(a)

        # Initialize counters:
        b = np.zeros(len(eccs_used))
        c = np.zeros(len(eccs_used))

        # Loop over the trials and add the correct to one counter and the number of
        # trials to the other:
        for i in xrange(len(correct)):
            idx = np.where(eccs_used == np.floor(ecc[i]))
            b[idx] += correct[i]
            c[idx] += 1.0

        p_correct = b / c
        print("%s: %s " % (cond_str[cond_idx], np.mean(p_correct) * 100))

        for i, p in enumerate(p_correct):
            ax.plot(eccs_used[i], p, "o", color=colors[cond_idx], markersize=c[i])

        x = []
        y = []

        for i, this_ecc in enumerate(eccs_used):
            x = np.hstack([x, c[i] * [this_ecc]])
            y = np.hstack([y, c[i] * [p_correct[i]]])

        guess = 1, 1, 1
        fit, mesg = leastsq(errfunc, guess)
        x = np.arange(0, np.max(x), 0.01)
        ax.plot(x, func(fit), "--", color=colors[cond_idx], label=cond_str[cond_idx])

    ax.legend()
    ax.set_xlim([-1, 13])
    ax.set_ylim([0, 1.1])
    ax.set_xlabel("Eccentricity (degrees)")
    ax.set_ylabel("Proportion correct responses")

    fig_name = "figures/" + file_name.split(".")[0].split("/")[-1] + ".png"
    fig.savefig(fig_name)
    os.system("open %s" % fig_name)
    bootstrap_n = 1000

    #Weibull params set in analysis_utils: The guessing rate is 0.25 for 4afc
    guess = 0.5
    flake = 0.01
    slope = 3.5

    file_names = gui.fileOpenDlg(tryFilePath='./data')

    for file_idx, file_name in enumerate(file_names):
        print file_idx
        if file_idx == 0:
            file_stem =  file_name.split('/')[-1].split('.')[0]
        else:
            file_stem = file_stem + file_name[-8]
        p, l, data_rec = ana.get_data(str(file_name))
        trials_per_condition = float(p[' trials_per_block'])*(float(p[' num_blocks'])/2.0)
        print trials_per_condition
        contrast = np.ones([len(file_names)*trials_per_condition,1])
        correct = np.ones([len(file_names)*trials_per_condition,1])
        data_rec = csv2rec(file_name)
        contrast_this_run = data_rec['annulus_target_contrast']
        correct_this_run = data_rec['correct']
        block_type = data_rec['block_type']
        print p[' trials_per_dummy']
        for n in range(trials_per_condition):
            if n >= p[' trials_per_dummy']:
                contrast[n+(trials_per_condition*file_idx)] *= contrast_this_run[n]
                correct[n+(trials_per_condition*file_idx)] *= correct_this_run[n]
        if not os.path.exists('data/analyzed_data'):
            os.mkdir('data/analyzed_data')
Exemple #7
0
import sys

from matplotlib.mlab import csv2rec
import matplotlib.pyplot as plt
import numpy as np

import analysis_utils as a

if __name__ == "__main__":

    path_to_files = "/Volumes/Plata1/Shared/Ariel/fiorentini_data/"
    file_name = sys.argv[1]
    p, l, data_rec = a.get_data("%s/%s" % (path_to_files, file_name))
    fix_idx = np.where(data_rec["task"] == "fixation")

    per_idx = np.where(data_rec["task"] == "periphery")
    fix_idx = np.where(data_rec["task"] == "fixation")
    c = data_rec["correct"]
    amp = data_rec["contrast"]

    th_per = a.analyze(amp[per_idx], c[per_idx])
    th_fix = a.analyze(amp[fix_idx], c[fix_idx])

    x, y = th_per[4], th_per[5]
    fig = plt.figure()
    ax = fig.add_subplot(1, 2, 1)
    ax.plot(x, y, "o")
    x_for_plot = np.linspace(np.min(x), np.max(x), 100)

    ax.plot(x_for_plot, a.weibull(x_for_plot, th_per[0], th_per[3]))