def psignifitCore(data, options): """ This is the Core processing of psignifit, call the frontend psignifit! function result=psignifitCore(data,options) Data nx3 matrix with values [x, percCorrect, NTrials] sigmoid should be a handle to a function, which accepts X,parameters as inputs and gives back a value in [0,1]. ideally parameters(1) should correspond to the threshold and parameters(2) to the width (distance containing 95% of the function. """ d = len(options['borders']) result = {'X1D': [], 'marginals': [], 'marginalsX': [], 'marginalsW': []} '''Choose grid dynamically from data''' if options['dynamicGrid']: # get seed from linear regression with logit transform Seed = getSeed(data,options) # further optimize the logliklihood to obtain a good estimate of the MAP if options['expType'] == 'YesNo': calcSeed = lambda X: -_l.logLikelihood(data, options, X[0], X[1], X[2], X[3], X[4]) Seed = scipy.optimize.fmin(func=calcSeed, x0 = Seed) elif options['expType'] == 'nAFC': calcSeed = lambda X: -_l.logLikelihood(data, options, X[0], X[1], X[2], 1/options['expN'], X[3]) Seed = scipy.optimize.fmin(func=calcSeed, x0 = [Seed[0:2], Seed[4]]) Seed = [Seed[0:2], 1/options['expN'], Seed[3]] #ToDo check whether row or colum vector result['X1D'] = gridSetting(data,options, Seed) else: # for types which do not need a MAP estimate if (options['gridSetType'] == 'priorlike' or options['gridSetType'] == 'STD' or options['gridSetType'] == 'exp' or options['gridSetType'] == '4power'): result['X1D'] = gridSetting(data,options) else: # Use a linear grid for idx in range(0,d): # If there is an actual Interval if options['borders'][idx, 0] < options['borders'][idx,1]: result['X1D'].append(np.linspace(options['borders'][idx,0], options['borders'][idx,1], num=options['stepN'][idx])) # if parameter was fixed else: result['X1D'].append(np.array([options['borders'][idx,0]])) '''Evaluate likelihood and form it into a posterior''' (result['Posterior'], result['logPmax']) = _l.likelihood(data, options, result['X1D']) result['weight'] = getWeights(result['X1D']) integral = np.sum(np.array(result['Posterior'][:])*np.array(result['weight'][:])) result['Posterior'] = result['Posterior']/integral result['integral'] = integral '''Compute marginal distributions''' for idx in range(0,d): m, mX, mW = marginalize(result, np.array([idx])) result['marginals'].append(m) result['marginalsX'].append(mX) result['marginalsW'].append(mW) result['marginals'] = np.squeeze(result['marginals']) result['marginalsX'] = np.squeeze(result['marginalsX']) result['marginalsW'] = np.squeeze(result['marginalsW']) '''Find point estimate''' if (options['estimateType'] in ['MAP','MLE']): # get MLE estimate #start at most likely grid point index = np.where(result['Posterior'] == np.max(result['Posterior'].ravel())) Fit = np.zeros([d,1]) for idx in range(0,d): Fit[idx] = result['X1D'][idx][index[idx]] if options['expType'] == 'YesNo': fun = lambda X, f: -_l.logLikelihood(data, options, [X[0],X[1],X[2],X[3],X[4]]) x0 = _deepcopy(Fit) a = None elif options['expType'] == 'nAFC': #def func(X,f): # return -_l.logLikelihood(data,options, [X[0], X[1], X[2], f, X[3]]) #fun = func fun = lambda X, f: -_l.logLikelihood(data,options, [X[0], X[1], X[2], f, X[3]]) x0 = _deepcopy(Fit[0:3]) # Fit[3] is excluded x0 = np.append(x0,_deepcopy(Fit[4])) a = np.array([1/options['expN']]) elif options['expType'] == 'equalAsymptote': fun = lambda X, f: -_l.logLikelihood(data,options,[X[0], X[1], X[2], f, X[3]]) x0 = _deepcopy(Fit[0:3]) x0 = np.append(x0,_deepcopy(Fit[4])) a = np.array([np.nan]) else: raise ValueError('unknown expType') if options['fastOptim']: Fit = scipy.optimize.fmin(fun, x0, args = (a,), xtol=0, ftol = 0, maxiter = 100, maxfun=100) warnings.warn('changed options for optimization') else: Fit = scipy.optimize.fmin(fun, x0, args = (a,), disp = False) if options['expType'] == 'YesNo': result['Fit'] = _deepcopy(Fit) elif options['expType'] == 'nAFC': fit = _deepcopy(Fit[0:3]) fit = np.append(fit, np.array([1/options['expN']])) fit = np.append(fit, _deepcopy(Fit[3])) result['Fit'] = fit elif options['expType'] =='equalAsymptote': fit = _deepcopy(Fit[0:3]) fit = np.append(fit, Fit[2]) fit = np.append(fit, Fit[3]) result['Fit'] = fit else: raise ValueError('unknown expType') par_idx = np.where(np.isnan(options['fixedPars']) == False) for idx in par_idx[0]: result['Fit'][idx] = options['fixedPars'][idx] elif options['estimateType'] == 'mean': # get mean estimate Fit = np.zeros([d,1]) for idx in range[0:d]: Fit[idx] = np.sum(result['marginals'][idx]*result['marginalsW'][idx]*result['marginalsX'][idx]) result['Fit'] = _deepcopy(Fit) Fit = np.empty(Fit.shape) '''Include input into result''' result['options'] = options # no copies here, because they are not changing result['data'] = data '''Compute confidence intervals''' if ~options['fastOptim']: result['conf_Intervals'] = getConfRegion(result) return result
def psignifitCore(data, options): """ This is the Core processing of psignifit, call the frontend psignifit! function result=psignifitCore(data,options) Data nx3 matrix with values [x, percCorrect, NTrials] sigmoid should be a handle to a function, which accepts X,parameters as inputs and gives back a value in [0,1]. ideally parameters(1) should correspond to the threshold and parameters(2) to the width (distance containing 95% of the function. """ d = len(options['borders']) result = {'X1D': [], 'marginals': [], 'marginalsX': [], 'marginalsW': []} '''Choose grid dynamically from data''' if options['dynamicGrid']: # get seed from linear regression with logit transform Seed = getSeed(data,options) # further optimize the logliklihood to obtain a good estimate of the MAP if options['expType'] == 'YesNo': calcSeed = lambda X: -l.logLikelihood(data, options, X[0], X[1], X[2], X[3], X[4]) Seed = scipy.optimize.fmin(func=calcSeed, x0 = Seed) elif options['expType'] == 'nAFC': calcSeed = lambda X: -l.logLikelihood(data, options, X[0], X[1], X[2], 1/options['expN'], X[3]) Seed = scipy.optimize.fmin(func=calcSeed, x0 = [Seed[0:2], Seed[4]]) Seed = [Seed[0:2], 1/options['expN'], Seed[3]] #ToDo check whether row or colum vector result['X1D'] = gridSetting(data,options, Seed) else: # for types which do not need a MAP estimate if (options['gridSetType'] == 'priorlike' or options['gridSetType'] == 'STD' or options['gridSetType'] == 'exp' or options['gridSetType'] == '4power'): result['X1D'] = gridSetting(data,options) else: # Use a linear grid for idx in range(0,d): # If there is an actual Interval if options['borders'][idx, 0] < options['borders'][idx,1]: result['X1D'].append(np.linspace(options['borders'][idx,0], options['borders'][idx,1], num=options['stepN'][idx])) # if parameter was fixed else: result['X1D'].append(np.array([options['borders'][idx,0]])) '''Evaluate likelihood and form it into a posterior''' (result['Posterior'], result['logPmax']) = l.likelihood(data, options, result['X1D']) result['weight'] = getWeights(result['X1D']) integral = np.sum(np.array(result['Posterior'][:])*np.array(result['weight'][:])) result['Posterior'] = result['Posterior']/integral result['integral'] = integral '''Compute marginal distributions''' for idx in range(0,d): m, mX, mW = marginalize(result, np.array([idx])) result['marginals'].append(m) result['marginalsX'].append(mX) result['marginalsW'].append(mW) result['marginals'] = np.squeeze(result['marginals']) result['marginalsX'] = np.squeeze(result['marginalsX']) result['marginalsW'] = np.squeeze(result['marginalsW']) '''Find point estimate''' if (options['estimateType'] in ['MAP','MLE']): # get MLE estimate #start at most likely grid point index = np.where(result['Posterior'] == np.max(result['Posterior'].ravel())) Fit = np.zeros([d,1]) for idx in range(0,d): Fit[idx] = result['X1D'][idx][index[idx]] if options['expType'] == 'YesNo': fun = lambda X, f: -l.logLikelihood(data, options, [X[0],X[1],X[2],X[3],X[4]]) x0 = deepcopy(Fit) a = None elif options['expType'] == 'nAFC': #def func(X,f): # return -l.logLikelihood(data,options, [X[0], X[1], X[2], f, X[3]]) #fun = func fun = lambda X, f: -l.logLikelihood(data,options, [X[0], X[1], X[2], f, X[3]]) x0 = deepcopy(Fit[0:3]) # Fit[3] is excluded x0 = np.append(x0,deepcopy(Fit[4])) a = np.array([1/options['expN']]) elif options['expType'] == 'equalAsymptote': fun = lambda X, f: -l.logLikelihood(data,options,[X[0], X[1], X[2], f, X[3]]) x0 = deepcopy(Fit[0:3]) x0 = np.append(x0,deepcopy(Fit[4])) a = np.array([np.nan]) else: raise ValueError('unknown expType') if options['fastOptim']: Fit = scipy.optimize.fmin(fun, x0, args = (a,), xtol=0, ftol = 0, maxiter = 100, maxfun=100) warnings.warn('changed options for optimization') else: Fit = scipy.optimize.fmin(fun, x0, args = (a,), disp = True) if options['expType'] == 'YesNo': result['Fit'] = deepcopy(Fit) elif options['expType'] == 'nAFC': fit = deepcopy(Fit[0:3]) fit = np.append(fit, np.array([1/options['expN']])) fit = np.append(fit, deepcopy(Fit[3])) result['Fit'] = fit elif options['expType'] =='equalAsymptote': fit = deepcopy(Fit[0:3]) fit = np.append(fit, Fit[2]) fit = np.append(fit, Fit[3]) result['Fit'] = fit else: raise ValueError('unknown expType') par_idx = np.where(np.isnan(options['fixedPars']) == False) for idx in par_idx: result['Fit'][idx] = options['fixedPars'][idx] elif options['estimateType'] == 'mean': # get mean estimate Fit = np.zeros([d,1]) for idx in range[0:d]: Fit[idx] = np.sum(result['marginals'][idx]*result['marginalsW'][idx]*result['marginalsX'][idx]) result['Fit'] = deepcopy(Fit) Fit = np.empty(Fit.shape) '''Include input into result''' result['options'] = options # no copies here, because they are not changing result['data'] = data '''Compute confidence intervals''' if ~options['fastOptim']: result['conf_Intervals'] = getConfRegion(result) return result
options['dynamicGrid'] = 0 options['widthalpha'] = 0.05 options['threshPC'] = 0.5 options['CImethod'] = 'percentiles' options['gridSetType'] = 'cumDist' options['nblocks'] = 25 options['verbose'] = 0 #options['stimulusRange'] = 0 options['sigmoidHandle'] = getSigmoidHandle(options) options['fastOptim'] = 0 options['mbStepN'] = np.array([30,40,10,1,20]) options['logspace'] = 0 temp_data= importer.loadmat('variables.mat', struct_as_record=True,matlab_compatible=True) temp_options = importer.loadmat('options.mat', struct_as_record=False, squeeze_me=True) var = np.array([ 0.00466446, 0.00473373, 0, 0.5 ,0]) - logLikelihood(data, options, var) ar = np.random.random([15,10,5,3]); alpha = np.ones([15,1,1,1]) alpha = np.tile(alpha, ar.shape[1:]) beta = np.ones([1,10,1,1])*0.5 beta[0,2,0,0] = 0.3 beta = np.tile(beta, (ar.shape[0],1)+ ar.shape[2:]) ar += alpha