def psychometric_fit(xValues, nTrials, nHits, constraints=None, alpha=0.05):
    '''
    Function moved from jaratoolbox/extrastats.py
    Given performance for each value of parameter, estimate the curve.
    This function uses psignifit (BootstrapInference)
    
    xValues: 1-D array of size M #Numbers of frequencies
    nHits:   1-D array of size M #Nnumber of hits each frequency
    nTrials: 1-D array of size M #Nnumber of trials each frequency
    
    Returns 4 values:
    '''
    import pypsignifit as psi

    if constraints is None:
        constraints = ('flat', 'Uniform(0,0.3)', 'Uniform(0,0.2)',
                       'Uniform(0,0.2)')
    data = np.c_[xValues, nHits, nTrials]
    session = psi.BootstrapInference(data,
                                     sample=False,
                                     priors=constraints,
                                     nafc=1)
    # session = psi.BayesInference(data,sample=False,priors=constraints,nafc=1)
    # (pHit,confIntervals) = binofit(nHits,nTrials,alpha)
    # return (session.estimate,pHit,confIntervals)
    return session.estimate
Exemple #2
0
def find_theta_and_thresh(xs, ys, zs, nsamples=500, thresh_val=0.75):
    a = "Gauss(0,5)"
    b = "Gauss(1,3)"
    l = "Beta(1.5,12)"
    data = np.array(zip(xs, ys, zs))
    B = psi.BootstrapInference(data, priors=[a, b, l], nafc=2, sample=nsamples)
    return np.mean(B.mcestimates, 0), Finv(thresh_val, B.estimate)
Exemple #3
0
def find_theta_and_thresh(xs, ys, zs, thresh_val, nboots=0):
    data = np.array(zip(xs, ys, zs))
    a = "Gauss(0,5)"
    b = "Gauss(1,3)"
    # l = "Beta(2,20)"
    l = "Beta(1.5,12)"
    B = psi.BootstrapInference(data, priors=[a, b, l], nafc=2, sample=nboots)
    thetas = B.mcestimates if B.mcestimates is not None else [B.estimate]
    threshes = [Finv(thresh_val, th) for th in thetas]
    mean_theta = np.mean(thetas, 0)
    mean_thresh = Finv(thresh_val, mean_theta)
    return mean_theta, mean_thresh, thetas, threshes
Exemple #4
0
def integration_grid(data, run=1, dists=None):
    data = np.array(data)
    mprior, mmin, mmax = pfp.default_mid(data[:, 0])
    wprior, wmin, wmax = pfp.default_width(data[:, 0])
    lprior, lmin, lmax = pfp.default_lapse()
    gprior, gmin, gmax = pfp.default_lapse()
    priors = (mprior, wprior, lprior, gprior)

    pdata, ppmf, pn = sfu.make_dataset_and_pmf(data, 1, "logistic", "mw0.1",
                                               priors)
    mapest = pf.BootstrapInference(data, priors, nafc=1).estimate

    if run == 1:
        gridsize = 9
        mmin, mmax = bounds(mapest, pdata, ppmf, "m")
        wmin, wmax = bounds(mapest, pdata, ppmf, "w")
        lmin, lmax = bounds(mapest, pdata, ppmf, "lm")
        gmin, gmax = bounds(mapest, pdata, ppmf, "gm")
        grid = np.reshape(
            np.mgrid[mmin:mmax:1j * gridsize, wmin:wmax:1j * gridsize,
                     lmin:lmax:1j * gridsize, gmin:gmax:1j * gridsize],
            (4, -1))
    elif run == 2:
        f_m = dist2class(dists[0])
        f_w = dist2class(dists[1])
        f_l = dist2class(dists[2])
        f_g = dist2class(dists[3])
        grid = np.mgrid[.025:.975:7j, .025:.975:7j, .025:.975:7j, .025:.975:7j]
        grid[0] = f_m.ppf(grid[0])
        grid[1] = f_w.ppf(grid[1])
        grid[2] = f_l.ppf(grid[2])
        grid[3] = f_g.ppf(grid[3])
        grid = np.reshape(grid, (4, -1))
        gridsize = 7

    post = np.reshape(
        np.array(map(lambda prm: ppmf.neglpost(prm, pdata), grid.T)),
        [gridsize] * pn)  # negative log posterior
    post = np.exp(-post)  # posterior
    grid = np.reshape(grid, [pn] + [gridsize] * pn)

    d = [np.diff(grid[i], axis=i)[0].max() for i in xrange(pn)]

    fx = [marginalize(post, d, i) for i in xrange(pn)]
    x = []
    for i in xrange(pn):
        s = [i] + [0] * pn
        s[i + 1] = slice(0, grid.shape[i + 1])
        x.append(grid[s])
    return x, fx, priors
Exemple #5
0
 def addData(self, response):
     self.y.append(response)
     self.i += 1
     if i == self.nxInit * self.nInit:
         B = psi.BootstrapInference(
             self.y,
             core='ab',
             sigmoid='gauss',
             priors=('unconstrained', 'unconstrained',
                     'Uniform(0.0399,0.0401)', 'Uniform(0.0399,0.0401)'),
             nafc=1)
         self.x = random.shuffle(
             np.linspace(B.estimate - 4 * B.deviance,
                         B.estimate + 4 * B.deviance, nxPost))
Exemple #6
0
def psychometric_fit(xValues, nTrials, nHits, constraints=None, alpha=0.05):
    '''
    Given performance for each value of parameter, estimate the curve.
    This function uses psignifit (BootstrapInference)
    http://psignifit.sourceforge.net/TUTORIAL_BOOTSTRAP.html

    xValues: 1-D array of size M
    nHits:   1-D array of size M
    nTrials: 1-D array of size M
    '''
    import pypsignifit as psi

    if constraints is None:
        constraints = ( 'flat', 'Uniform(0,0.3)' ,'Uniform(0,0.2)', 'Uniform(0,0.2)')
    data = np.c_[xValues,nHits,nTrials]
    session = psi.BootstrapInference(data,sample=False, priors=constraints, nafc=1)
    # session = psi.BayesInference(data,sample=False,priors=constraints,nafc=1)
    # (pHit,confIntervals) = binofit(nHits,nTrials,alpha)
    # return (session.estimate,pHit,confIntervals)
    return session.estimate
Exemple #7
0
    def plot(self, fileName):
        self.graph.clear()
        self.graph.legend.items = []
        self.graph.addLine(y=0.04)
        self.graph.addLine(y=0.96)
        (xDict, yDict, conditions) = readFile(fileName)
        colors = ['F00', '0F0', '00F', 'AA0', '0AA', 'A0A']
        for i in range(len(conditions)):
            condition = conditions[i]
            x = xDict[condition]
            y = yDict[condition]
            #print("c: {}\nx: {}\ny: {}\n".format(condition, x, y))
            data = zip(x, y, [1] * len(x))
            constraints = ('unconstrained', 'unconstrained',
                           'Uniform(0.0399,0.0401)', 'Uniform(0.0399,0.0401)')
            B = psi.BootstrapInference(data,
                                       core='ab',
                                       sigmoid='gauss',
                                       priors=constraints,
                                       nafc=1)
            print("est: {}, dev: {}".format(B.estimate, B.deviance))
            self.graph.plot(x,
                            y,
                            pen=None,
                            symbolPen={'color': colors[i % 6]},
                            symbolSize=6,
                            antialias=True)

            xx = np.linspace(min(x), max(x), 100)
            yy = []
            for p in xx:
                yy.append(f(p, B.estimate[0], B.estimate[1]))
            self.graph.plot(xx,
                            yy,
                            pen={
                                'color': colors[i % 6],
                                'width': 2
                            },
                            name="{}: {:.3f} +/- {:.3f}".format(
                                condition, B.estimate[0], B.estimate[1]))
Exemple #8
0
    nrefine = 2
    print data

    x, fx, priors = integration_grid(data)
    print "x1 =", x
    print "f1 =", fx
    post = fit_posterior(fx, x)
    for i in xrange(nrefine):
        x, fx, priors = integration_grid(data, 2, post)
        print post
        print "x%d =" % (i + 1, ), x
        print "f%d =" % (i + 1), fx
        post = fit_posterior(fx, x)
    f = [sfu.get_prior(p) for p in post]

    mapest = pf.BootstrapInference(data, priors, core="mw0.1", nafc=1).estimate
    pdata, ppmf, pn = sfu.make_dataset_and_pmf(data, 1, "logistic", "mw0.1",
                                               priors)
    samples = sample_importance_resample(post,
                                         pdata,
                                         ppmf,
                                         nresample=600,
                                         nsamples=6000)

    rng = [(3, 7), (0, 6), (0, .5), (0, .5)]

    hist_ax = [
        pl.axes([.15 + .2 * i, .75 - .2 * i, .15, .15]) for i in xrange(4)
    ]
    labels = ["m", "w", "lm", "gm"]
        #plt.setp(pdots,ms=6,mec='k',mew=2,mfc='k')
        plt.hold(True)
        # -- Calculate and plot psychometric fit --
        constraints = None

        #constraints = ['Uniform({},{})'.format(logPossibleValues[0],logPossibleValues[-1]), 'unconstrained' ,'unconstrained', 'unconstrained']
        #curveParams = extrastats.psychometric_fit(possibleValues,nTrialsEachValue, nHitsEachValue)

        # -- Fit psy curve with psi.BoostrapInference object -- #

        data = np.c_[logPossibleValues, nHitsEachValue, nTrialsEachValue]

        # linear core 'ab': (x-a)/b; logistic sigmoid: 1/(1+np.exp(-(xval-alpha)/beta))
        psyCurveInference = psi.BootstrapInference(data,
                                                   sample=False,
                                                   sigmoid='logistic',
                                                   core='ab',
                                                   priors=constraints,
                                                   nafc=1)

        curveParams = psyCurveInference.estimate
        deviance = psyCurveInference.deviance
        predicted = psyCurveInference.predicted
        (alpha, beta, lapse, guess) = list(curveParams)

        xValues = logPossibleValues
        xRange = xValues[-1] - xValues[1]
        fitxval = np.linspace(xValues[0] - 0.2 * xRange,
                              xValues[-1] + 0.2 * xRange, 40)
        fityval = psyCurveInference.evaluate(x=fitxval)
        #fityval = extrastats.psychfun(fitxval,*curveParams)
        plt.plot(fitxval,
            targetFrequencyThisBlock = targetFrequency[trialsEachType[:,stimType]]    
            validThisBlock = valid[trialsEachType[:,stimType]]
            choiceRightThisBlock = choiceRight[trialsEachType[:,stimType]]
            
            (possibleValues,fractionHitsEachValue,ciHitsEachValue,nTrialsEachValue,nHitsEachValue)=\
                                                                                                    behavioranalysis.calculate_psychometric(choiceRightThisBlock,targetFrequencyThisBlock,validThisBlock)
            
            #plot psychometric curve
            (pline, pcaps, pbars, pdots) = extraplots.plot_psychometric(1e-3*possibleValues,fractionHitsEachValue,
                                                                ciHitsEachValue,xTickPeriod=1)

            plt.setp((pline, pcaps, pbars), color=FREQCOLORS[stimType])
            plt.setp(pdots, mfc=FREQCOLORS[stimType], mec=FREQCOLORS[stimType])
            allPline.append(pline)
            curveLegends.append(stimLabels[stimType])

            ####fitting psychometric curve using BootstrapInference
            data = zip(possibleValues,nHitsEachValue,nTrialsEachValue)
            constraints=('flat','Uniform(0,0.3)' ,'Uniform(0,0.2)', 'Uniform(0,0.2)')
            ###parameters that may need to be optimized: priors,sigmoid and core functions
            session = psi.BootstrapInference(data,sample=True, priors=constraints, nafc=1, sigmoid='logistic', core='ab')
            fittedSessions.append(session)
    plt.hold(1)
    psi.psigniplot.plotMultiplePMFs(*fittedSessions)
    ax2=plt.subplot(gs[thisAnimalPos+1,0])
    for session in fittedSessions:
        #session.sample()
        psi.psigniplot.ParameterPlot(session)
        plt.hold(1)
    plt.show()
Exemple #11
0
sys.stderr.write("\n")
for simulation in xrange(options.nsimulations):
    sys.stderr.write("\nSimulation %d is running" % (simulation, ))
    O = create_new_observer()
    # print "\nO=",O
    if not options.fixed_sequence:
        random.shuffle(x)
    data = O.DoAnExperiment(x, ntrials=options.blocksize)
    print "\ndata =", data
    print constraints

    write_id_gen(outfile, simulation)

    if nonparametric:
        Bnpr = pypsignifit.BootstrapInference(data,
                                              priors=constraints,
                                              parametric=False,
                                              **ana_kwargs)
        if options.fixed_pmf:
            Bnpr.estimate = OO.params
        tic = time.time()
        Bnpr.sample(options.nbootstrap)
        toc = time.time()
        count_npr += check_ci(O, Bnpr)
        write_nonparametric(outfile, Bnpr, toc - tic)
        print "Done npar"

    if parametric:
        Bpar = pypsignifit.BootstrapInference(data,
                                              priors=constraints,
                                              parametric=True,
                                              **ana_kwargs)