Ejemplo n.º 1
0
def psignifit(data, options):
   
    #--------------------------------------------------------------------------
    #input parsing
    #--------------------------------------------------------------------------
    # data
    data = np.array(data)
                # percent correct in data
    if all( np.logical_and(data[:,1] <= 1, data[:,1] >= 0)) and any(np.logical_and(data[:,1] > 0, data[:,1] < 1)):
        
        data[:,1] = round(map( lambda x, y: x*y, data[:,2],data[:,1])) # we try to convert into our notation
        
    # options
        
    if not('options' in locals()): 
        options = {}

    if not('sigmoidName' in options.keys()):
        options['sigmoidName'] = 'norm'
    
    if not('expType' in options.keys()):
        options['expType'] = 'YesNo'

    if not('estimateType' in options.keys()):
        options['estimateType'] = 'MAP'

    if not('confP' in options.keys()):
        options['confP'] = [.95, .9, .68]
        
    if not('instantPlot' in options.keys()):
        options['instantPlot'] = 0
        
    if not('setBordersType' in options.keys()):
        options['setBordersType'] = 0
        
    if not('maxBorderValue' in options.keys()):
        options['maxBorderValue'] = .00001
        
    if not('moveBorders' in options.keys()):
        options['moveBorders'] = 1
        
    if not('dynamicGrid' in options.keys()):
        options['dynamicGrid'] = 0
        
    if not('widthalpha' in options.keys()):
        options['widthalpha'] = .05
        
    if not('threshPC' in options.keys()):
        options['threshPC'] = .5

    if not('CImethod' in options.keys()):
        options['CImethod'] = 'percentiles'

    if not('gridSetType' in options.keys()):
        options['gridSetType'] = 'cumDist'
        
    if not( 'fixedPars' in options.keys()):
        a = np.empty((5,1))
        a[:] = np.NaN
        options['fixedPars'] = a
        
    if not('nblocks' in options.keys()):
        options['nblocks'] = 25
    
    if not('useGPU' in options.keys()):
        options['useGPU'] = 0
    
    if not('poolMaxGap' in options.keys()):
        options['poolMaxGap'] = np.inf
    
    if not('poolMaxLength' in options.keys()):
        options['poolMaxLength'] = np.inf
    
    if not('poolxTol' in options.keys()):
        options['poolxTol'] = 0
    
    if not('betaPrior' in options.keys()):
        options['betaPrior'] = 10
    
    if not('verbose' in options.keys()):
        options['verbose'] = 0
        
    if not('stimulusRange' in options.keys()):
        options['stimulusRange'] = 0
        
    if not('fastOptim' in options.keys()):
        options['fastOptim'] = False
    
    if options['expType'] in ['2AFC', '3AFC', '4AFC']:            
        options['expN'] = int(float(options['expType'][0]))
        options['expType'] = 'nAFC'

    if options['expType'] == 'nAFC' and not('expN' in options.keys()):
        raise ValueError('For nAFC experiments please also pass the number of alternatives (options.expN)')
    
    if options['expType'] == 'YesNo':
        if not('stepN' in options.keys()):
            options['stepN'] = [40,40,20,20,20]
        if not('mbStepN' in options.keys()):
            options['mbStepN'] = [25,30, 10,10,15]
    elif options['expType'] == 'nAFC' or options['expType'] == 'equalAsymptote':
        if not('stepN' in options.keys()):
            options['stepN'] = [40,40,20,1,20]
        if not('mbStepN' in options.keys()):
            options['mbStepN'] = [30,40,10,1,20]
    else:
        raise ValueError('You specified an illegal experiment type')
    
    assert((max(data[:,0]) - min(data[:,0]) > 0), \
                'Your data does not have variance on the x-axis! This makes fitting impossible')
                 
                     
    '''
    log space sigmoids
    we fit these functions with a log transformed physical axis
    This is because it makes the paramterization easier and also the priors
    fit our expectations better then.
    The flag is needed for the setting of the parameter bounds in setBorders
    '''
    
    if options['sigmoidName'] in ['Weibull','logn','weibull']:
            options['logspace'] = 1
            assert min(data[:,0]) > 0, 'The sigmoid you specified is not defined for negative data points!'
    else:
        options['logspace'] = 0
        
    #if range was not given take from data
    if len(np.ravel(options['stimulusRange'])) <=1 :
        if options['logspace']:
            options['stimulusRange'] = np.array(np.log([min(data[:,0]),max(data[:,0])]))
        else :
            options['stimulusRange'] = np.array([min(data[:,0]),max(data[:,0])])

        stimRangeSet = False
    else:
        stimRangeSet = True
        if options['logspace']:
            options['stimulusRange'] = np.log(options['stimulusRange'])
    

    if not('widthmin' in options.keys()):
        if len(np.unique(data[:,0])) >1 and not(stimRangeSet):
            if options['logspace']:
                options['widthmin']  = min(np.diff(np.sort(np.unique(np.log(data[:,0])))))
            else:
                options['widthmin']  = min(np.diff(np.sort(np.unique(data[:,0]))))
        else:
            options['widthmin'] = 100*np.spacing(options['stimulusRange'][1])

    # add priors
    if options['threshPC'] != .5 and not(hasattr(options, 'priors')):
        warnings.warn('psignifit:TresholdPCchanged\n'\
            'You changed the percent correct corresponding to the threshold\n')    
    
    if not('priors' in options.keys()):
        options['priors'] = p.getStandardPriors(data, options)
    else:
        
        priors = p.getStandardPriors(data, options)
        
        for ipar in range(5):
            if not(hasattr(options['priors'][ipar], '__call__')):
                options['priors'][ipar] = priors[ipar]
                
        p.checkPriors(data, options)
    if options['dynamicGrid'] and not('GridSetEval' in options.keys()):
        options['GridSetEval'] = 10000
    if options['dynamicGrid'] and not('UniformWeight' in options.keys()):
        options['UniformWeight'] = 1

    '''
    initialize
    '''        
    
    #warning if many blocks were measured
    if (len(np.unique(data[:,0])) >= 25) and (np.ravel(options['stimulusRange']).size == 1):
        warnings.warn('psignifit:probablyAdaptive\n'\
            'The data you supplied contained >= 25 stimulus levels.\n'\
            'Did you sample adaptively?\n'\
            'If so please specify a range which contains the whole psychometric function in options.stimulusRange.\n'\
            'This will allow psignifit to choose an appropriate prior.\n'\
            'For now we use the standard heuristic, assuming that the psychometric function is covered by the stimulus levels,\n'\
            'which is frequently invalid for adaptive procedures!')
    
    if all(data[:,2] <= 5) and (np.ravel(options['stimulusRange']).size == 1):
        warnings.warn('psignifit:probablyAdaptive\n'\
            'All provided data blocks contain <= 5 trials \n'\
            'Did you sample adaptively?\n'\
            'If so please specify a range which contains the whole psychometric function in options.stimulusRange.\n'\
            'This will allow psignifit to choose an appropriate prior.\n'\
            'For now we use the standard heuristic, assuming that the psychometric function is covered by the stimulus levels,\n'\
            'which is frequently invalid for adaptive procedures!')
    
    #pool data if necessary: more than options.nblocks blocks or only 1 trial per block
    if np.max(data[:,2]) == 1 or len(data) > options['nblocks']:
        warnings.warn('psignifit:pooling\n'\
            'We pooled your data, to avoid problems with n=1 blocks or to save time fitting because you have a lot of blocks\n'\
            'You can force acceptance of your blocks by increasing options.nblocks')
        data = poolData(data, options)
    
    
    # create function handle of sigmoid
    options['sigmoidHandle'] = getSigmoidHandle(options)
    
    # borders of integration
    if 'borders' in options.keys():
        borders = b.setBorders(data, options)
        options['borders'][np.isnan(options['borders'])] = borders[np.isnan(options.borders)]
    else:
        options['borders'] = b.setBorders(data,options)
    
    border_idx = np.where(np.isnan(options['fixedPars']) == False);
    
    options['borders'][border_idx[0]] = options['fixedPars'][border_idx[0]]
    options['borders'][border_idx[1]] = options['fixedPars'][border_idx[1]]
            
    # normalize priors to first choice of borders
    options['priors'] = p.normalizePriors(options)
    if options['moveBorders']:
        options['borders'] = b.moveBorders(data, options)
    
    ''' core '''
    result = psignifitCore(data,options)
        
    ''' after processing '''
    # check that the marginals go to nearly 0 at the borders of the grid
    if options['verbose'] > -5:
    
        if result['marginals'][0][0] * result['marginalsW'][0][0] > .001:
            warnings.warn('psignifit:borderWarning\n'\
                'The marginal for the threshold is not near 0 at the lower border.\n'\
                'This indicates that smaller Thresholds would be possible.')
        if result['marginals'][0][-1] * result['marginalsW'][0][-1] > .001:
            warnings.warn('psignifit:borderWarning\n'\
                'The marginal for the threshold is not near 0 at the upper border.\n'\
                'This indicates that your data is not sufficient to exclude much higher thresholds.\n'\
                'Refer to the paper or the manual for more info on this topic.')
        if result['marginals'][1][0] * result['marginalsW'][1][0] > .001:
            warnings.warn('psignifit:borderWarning\n'\
                'The marginal for the width is not near 0 at the lower border.\n'\
                'This indicates that your data is not sufficient to exclude much lower widths.\n'\
                'Refer to the paper or the manual for more info on this topic.')
        if result['marginals'][1][-1] * result['marginalsW'][1][-1] > .001:
            warnings.warn('psignifit:borderWarning\n'\
                'The marginal for the width is not near 0 at the lower border.\n'\
                'This indicates that your data is not sufficient to exclude much higher widths.\n'\
                'Refer to the paper or the manual for more info on this topic.')
    
    result['timestamp'] = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    
    if options['instantPlot']:
        plotPsych(result)
        #plotBayes(result)  #TODO
    
       
    
    return result
Ejemplo n.º 2
0
def biasAna(data1, data2,options):
    """ function biasAna(data1,data2,options)
 runs a short analysis to see whether two 2AFC datasets have a bias and
 whether it can be explained with a "finger bias"-> a bias in guessing """

    options = dict()
    options['borders'] = np.empty([5,2])
    options['borders'][:] = np.nan
    options['expType'] = 'YesNo'

    options['priors'] = [None]*5
    options['priors'][3] = lambda x: scipy.stats.beta.pdf(x,2,2)    
    options['borders'][2,:] = np.array([0,.1])
    options['borders'][3,:] = np.array([.11,.89])
    options['fixedPars'] = np.ones([5,1])*np.nan
    options['fixedPars'][4] = 0
    options['stepN']   = np.array([40,40,40,40,1])
    options['mbStepN'] = np.array([30,30,20,20,1])

    resAll = psignifit(np.append(data1, data2, axis=0),options)
    res1 = psignifit(data1,options)
    res2 = psignifit(data2,options)

    plot.plt.figure()
    a1 = plot.plt.axes([0.15,4.35/6,0.75,1.5/6])

    plot.plotPsych(resAll,showImediate=False)
    plot.plt.hold(True)
    
    plot.plotPsych(res1, lineColor= [1,0,0], dataColor = [1,0,0],showImediate=False)
    plot.plotPsych(res2,lineColor= [0,0,1], dataColor = [0,0,1],showImediate=False)
    plot.plt.ylim([0,1])

    a2 = plot.plt.axes([0.15,3.35/6,0.75,0.5/6])

    plot.plotMarginal(resAll,dim = 0,prior = False, CIpatch = False, lineColor = [0,0,0],showImediate=False)
    plot.plt.hold(True)
    
    plot.plotMarginal(res1,dim = 0,lineColor = [1,0,0],showImediate=False)
    plot.plotMarginal(res2,dim = 0,lineColor=[0,0,1],showImediate=False)
    a2.relim()
    a2.autoscale_view()

    a3 = plot.plt.axes([0.15,2.35/6,0.75,0.5/6])
    plot.plotMarginal(resAll,dim = 1,prior = False, CIpatch=False, lineColor = [0,0,0],showImediate=False)
    plot.plt.hold(True)

    plot.plotMarginal(res1,dim = 1,lineColor=[1,0,0],showImediate=False)
    plot.plotMarginal(res2,dim = 1,lineColor=[0,0,1],showImediate=False)
    a3.relim()
    a3.autoscale_view()

    a4 = plot.plt.axes([0.15,1.35/6,0.75,0.5/6])

    plot.plotMarginal(resAll,dim = 2, prior = False, CIpatch = False, lineColor = [0,0,0],showImediate=False)
    plot.plt.hold(True)
    
    plot.plotMarginal(res1,dim = 2, lineColor=[1,0,0],showImediate=False)
    plot.plotMarginal(res2,dim=2, lineColor=[0,0,1],showImediate=False)
    a4.relim()
    a4.autoscale_view()
    
    a5 = plot.plt.axes([0.15,0.35/6,0.75,0.5/6])

    plot.plotMarginal(resAll,dim = 3, prior = False, CIpatch = False, lineColor = [0,0,0],showImediate=False)
    plot.plt.hold(True)
    
    plot.plotMarginal(res1,dim = 3, lineColor=[1,0,0],showImediate=False)
    plot.plotMarginal(res2,dim = 3, lineColor=[0,0,1],showImediate=False)
    a5.set_xlim([0,1])
    a5.relim()
    a5.autoscale_view()
    
    plot.plt.draw()
Ejemplo n.º 3
0
def psignifit(data, optionsIn):
    """
    main function for fitting psychometric functions
    function result=psignifit(data,options)
    This function is the user interface for fitting psychometric functions to data.
        
    pass your data in the n x 3 matrix of the form:
    [x-value, number correct, number of trials]

    options should be a dictionary in which you set the options for your fit.
    You can find a full overview over the options in demo002
    
    The result of this function is a dictionary, which contains all information the 
    program produced for your fit. You can pass this as whole to all further 
    processing function provided with psignifit. Especially to the plot functions.
    You can find an explanation for all fields of the result in demo006
        
    To get an introduction to basic usage start with demo001
    """
    #--------------------------------------------------------------------------
    #input parsing
    #--------------------------------------------------------------------------
    # data
    data = np.array(data)
                # percent correct in data
    if all( np.logical_and(data[:,1] <= 1, data[:,1] >= 0)) and any(np.logical_and(data[:,1] > 0, data[:,1] < 1)):
        data[:,1] = round(map( lambda x, y: x*y, data[:,2],data[:,1])) # we try to convert into our notation
        
    # options
        
    if not('optionsIn' in locals()): 
        options = dict()
    else:
        options = _deepcopy(optionsIn)

    if not('sigmoidName' in options.keys()):
        options['sigmoidName'] = 'norm'
    
    if not('expType' in options.keys()):
        options['expType'] = 'YesNo'

    if not('estimateType' in options.keys()):
        options['estimateType'] = 'MAP'

    if not('confP' in options.keys()):
        options['confP'] = [.95, .9, .68]
        
    if not('instantPlot' in options.keys()):
        options['instantPlot'] = 0
        
    if not('setBordersType' in options.keys()):
        options['setBordersType'] = 0
        
    if not('maxBorderValue' in options.keys()):
        options['maxBorderValue'] = .00001
        
    if not('moveBorders' in options.keys()):
        options['moveBorders'] = 1
        
    if not('dynamicGrid' in options.keys()):
        options['dynamicGrid'] = 0
        
    if not('widthalpha' in options.keys()):
        options['widthalpha'] = .05
        
    if not('threshPC' in options.keys()):
        options['threshPC'] = .5

    if not('CImethod' in options.keys()):
        options['CImethod'] = 'percentiles'

    if not('gridSetType' in options.keys()):
        options['gridSetType'] = 'cumDist'
        
    if not( 'fixedPars' in options.keys()):
        a = np.empty((5,1))
        a[:] = np.NaN
        options['fixedPars'] = a
        
    if not('nblocks' in options.keys()):
        options['nblocks'] = 25
    
    if not('useGPU' in options.keys()):
        options['useGPU'] = 0
    
    if not('poolMaxGap' in options.keys()):
        options['poolMaxGap'] = np.inf
    
    if not('poolMaxLength' in options.keys()):
        options['poolMaxLength'] = np.inf
    
    if not('poolxTol' in options.keys()):
        options['poolxTol'] = 0
    
    if not('betaPrior' in options.keys()):
        options['betaPrior'] = 10
    
    if not('verbose' in options.keys()):
        options['verbose'] = 0
        
    if not('stimulusRange' in options.keys()):
        options['stimulusRange'] = 0
        
    if not('fastOptim' in options.keys()):
        options['fastOptim'] = False
    
    if options['expType'] in ['2AFC', '3AFC', '4AFC']:            
        options['expN'] = int(float(options['expType'][0]))
        options['expType'] = 'nAFC'

    if options['expType'] == 'nAFC' and not('expN' in options.keys()):
        raise ValueError('For nAFC experiments please also pass the number of alternatives (options.expN)')
    
    if options['expType'] == 'YesNo':
        if not('stepN' in options.keys()):
            options['stepN'] = [40,40,20,20,20]
        if not('mbStepN' in options.keys()):
            options['mbStepN'] = [25,30, 10,10,15]
    elif options['expType'] == 'nAFC' or options['expType'] == 'equalAsymptote':
        if not('stepN' in options.keys()):
            options['stepN'] = [40,40,20,1,20]
        if not('mbStepN' in options.keys()):
            options['mbStepN'] = [30,40,10,1,20]
    else:
        raise ValueError('You specified an illegal experiment type')
    
    assert (max(data[:,0]) - min(data[:,0]) > 0), 'Your data does not have variance on the x-axis! This makes fitting impossible'
                 
                     
    '''
    log space sigmoids
    we fit these functions with a log transformed physical axis
    This is because it makes the paramterization easier and also the priors
    fit our expectations better then.
    The flag is needed for the setting of the parameter bounds in setBorders
    '''
    
    if options['sigmoidName'] in ['Weibull','logn','weibull']:
            options['logspace'] = 1
            assert min(data[:,0]) > 0, 'The sigmoid you specified is not defined for negative data points!'
    else:
        options['logspace'] = 0
        
    #if range was not given take from data
    if len(np.ravel(options['stimulusRange'])) <=1 :
        if options['logspace']:
            options['stimulusRange'] = np.array(np.log([min(data[:,0]),max(data[:,0])]))
        else :
            options['stimulusRange'] = np.array([min(data[:,0]),max(data[:,0])])

        stimRangeSet = False
    else:
        stimRangeSet = True
        if options['logspace']:
            options['stimulusRange'] = np.log(options['stimulusRange'])
    

    if not('widthmin' in options.keys()):
        if len(np.unique(data[:,0])) >1 and not(stimRangeSet):
            if options['logspace']:
                options['widthmin']  = min(np.diff(np.sort(np.unique(np.log(data[:,0])))))
            else:
                options['widthmin']  = min(np.diff(np.sort(np.unique(data[:,0]))))
        else:
            options['widthmin'] = 100*np.spacing(options['stimulusRange'][1])

    # add priors
    if options['threshPC'] != .5 and not(hasattr(options, 'priors')):
        warnings.warn('psignifit:TresholdPCchanged\n'\
            'You changed the percent correct corresponding to the threshold\n')    
    
    if not('priors' in options.keys()):
        options['priors'] = _p.getStandardPriors(data, options)
    else:
        
        priors = _p.getStandardPriors(data, options)
        
        for ipar in range(5):
            if not(hasattr(options['priors'][ipar], '__call__')):
                options['priors'][ipar] = priors[ipar]
                
        _p.checkPriors(data, options)
    if options['dynamicGrid'] and not('GridSetEval' in options.keys()):
        options['GridSetEval'] = 10000
    if options['dynamicGrid'] and not('UniformWeight' in options.keys()):
        options['UniformWeight'] = 1

    '''
    initialize
    '''        
    
    #warning if many blocks were measured
    if (len(np.unique(data[:,0])) >= 25) and (np.ravel(options['stimulusRange']).size == 1):
        warnings.warn('psignifit:probablyAdaptive\n'\
            'The data you supplied contained >= 25 stimulus levels.\n'\
            'Did you sample adaptively?\n'\
            'If so please specify a range which contains the whole psychometric function in options.stimulusRange.\n'\
            'This will allow psignifit to choose an appropriate prior.\n'\
            'For now we use the standard heuristic, assuming that the psychometric function is covered by the stimulus levels,\n'\
            'which is frequently invalid for adaptive procedures!')
    
    if all(data[:,2] <= 5) and (np.ravel(options['stimulusRange']).size == 1):
        warnings.warn('psignifit:probablyAdaptive\n'\
            'All provided data blocks contain <= 5 trials \n'\
            'Did you sample adaptively?\n'\
            'If so please specify a range which contains the whole psychometric function in options.stimulusRange.\n'\
            'This will allow psignifit to choose an appropriate prior.\n'\
            'For now we use the standard heuristic, assuming that the psychometric function is covered by the stimulus levels,\n'\
            'which is frequently invalid for adaptive procedures!')
    
    #pool data if necessary: more than options.nblocks blocks or only 1 trial per block
    if np.max(data[:,2]) == 1 or len(data) > options['nblocks']:
        warnings.warn('psignifit:pooling\n'\
            'We pooled your data, to avoid problems with n=1 blocks or to save time fitting because you have a lot of blocks\n'\
            'You can force acceptance of your blocks by increasing options.nblocks')
        data = poolData(data, options)
    
    
    # create function handle of sigmoid
    options['sigmoidHandle'] = getSigmoidHandle(options)
    
    # borders of integration
    if 'borders' in options.keys():
        borders = _b.setBorders(data, options)
        options['borders'][np.isnan(options['borders'])] = borders[np.isnan(options['borders'])]
    else:
        options['borders'] = _b.setBorders(data,options)
    
    border_idx = np.where(np.isnan(options['fixedPars']) == False);
    if (border_idx[0].size > 0):
        options['borders'][border_idx[0],0] = options['fixedPars'][border_idx[0]]
        options['borders'][border_idx[0],1] = options['fixedPars'][border_idx[0]]
            
    # normalize priors to first choice of borders
    options['priors'] = _p.normalizePriors(options)
    if options['moveBorders']:
        options['borders'] = _b.moveBorders(data, options)
    
    ''' core '''
    result = psignifitCore(data,options)
        
    ''' after processing '''
    # check that the marginals go to nearly 0 at the borders of the grid
    if options['verbose'] > -5:
    
        if result['marginals'][0][0] * result['marginalsW'][0][0] > .001:
            warnings.warn('psignifit:borderWarning\n'\
                'The marginal for the threshold is not near 0 at the lower border.\n'\
                'This indicates that smaller Thresholds would be possible.')
        if result['marginals'][0][-1] * result['marginalsW'][0][-1] > .001:
            warnings.warn('psignifit:borderWarning\n'\
                'The marginal for the threshold is not near 0 at the upper border.\n'\
                'This indicates that your data is not sufficient to exclude much higher thresholds.\n'\
                'Refer to the paper or the manual for more info on this topic.')
        if result['marginals'][1][0] * result['marginalsW'][1][0] > .001:
            warnings.warn('psignifit:borderWarning\n'\
                'The marginal for the width is not near 0 at the lower border.\n'\
                'This indicates that your data is not sufficient to exclude much lower widths.\n'\
                'Refer to the paper or the manual for more info on this topic.')
        if result['marginals'][1][-1] * result['marginalsW'][1][-1] > .001:
            warnings.warn('psignifit:borderWarning\n'\
                'The marginal for the width is not near 0 at the lower border.\n'\
                'This indicates that your data is not sufficient to exclude much higher widths.\n'\
                'Refer to the paper or the manual for more info on this topic.')
    
    result['timestamp'] = _dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    
    if options['instantPlot']:
        plot.plotPsych(result)
    
       
    
    return result
Ejemplo n.º 4
0
            [1.3981,   1.0000,   2.0000],
            [1.5379,   1.0000,   2.0000],
            [1.6917,   3.0000,   3.0000],
            [1.5225,   3.0000,   3.0000],
            [1.3703,   2.0000,   3.0000]])

# We fit this assuming the same lapse rate for yes and for no
options = dict()
options['expType'] = 'equalAsymptote'
# by default this gives us a cumulative normal fit, which is fine for now.

res = psignifit(data, options)

# We first have a look at the fitted function
#plt.figure()
plotPsych(res)

'''
 You should notice that the percent correct is larger than 50 and we did 
 not measure a stimulus level clearly below threshold. Thus it might be 
 that the theshold is below our data, as it is the case actually in our 
 example.
 This is a common problem with adaptive procedures, which do not explore
 the full possible stimulus range. Then our heuristic for the prior may
 easily fail.

 You can see how the prior influences the result by looking at the
 marginal plot for the threshold as well:
'''
#plt.figure()
plotMarginal(res,0)
Ejemplo n.º 5
0
 Now we are ready to run the main function, which fits the function to the
 data. You obtain a struct, which contains all the information about the
 fitted function and can be passed to the many other functions in this
 toolbox, to further process the results.
"""

res = psignifit(data, options)

"""
 --- VISUALIZE THE RESULTS ---
 
 For example you can use the result dict res to plot your psychometric
 function with the data:
"""

plotPsych(res)

"""
 --- REMARK FOR INSUFFICIENT MEMORY ISSUES ---
 
 Especially for YesNo experiments the result structs can become rather
 large. If you run into memory issues you can drop the Posterior from the
 result with the following command.
"""

result = dict(res)
del result['Posterior']
del result['weight']

"""
 Without these fields you will not be able to use the 2D Bayesian plots
Ejemplo n.º 6
0
respTotal = (leftTurnTotal + rightTurnTotal) - (rightNoResp + leftNoResp)
total = (leftTurnTotal + rightTurnTotal)

for i, (num, denom, title) in enumerate(zip([
                                rightTurnCorr, rightTurnIncorrect, rightNoResp, 
                                leftTurnCorr, leftTurnIncorrect, leftNoResp, 
                                (leftTurnCorr+rightTurnCorr), (leftTurnCorr+rightTurnCorr)], 
                                 [rightTurnTotal, rightTurnTotal, rightTurnTotal, 
                                  leftTurnTotal, leftTurnTotal, leftTurnTotal, respTotal, total],
                             ['Turn R % Correct:', 'Turn R % Incorre:', 'Turn R % No Resp:', 
                             'L % Correct:', 'L % Incorre:', 'L % No Resp:', 
                             'Total Correct, given Response:', 'Total Correct:'])):
                         
    print(str(title) + '   ' + str(round(num/denom, 2)))



data = np.array([[-1, rightTurnIncorrect, rightTurnTotal],
                 [0, rightNoResp, rightTurnTotal],
                 [1, rightTurnCorr, rightTurnTotal]])

options = dict()

res = ps.psignifit(data, options)

psigniplot.plotPsych(res)

'''
How can we also characterize the bias that is most likely for an animal model?
'''