def bootstrap(data, start=None, nsamples=2000, nafc=2, sigmoid="logistic", core="ab", priors=None, cuts=None, parametric=True, gammaislambda=False): """ Parametric bootstrap of a psychometric function. Parameters ---------- data : A list of lists or an array of data. The first column should be stimulus intensity, the second column should be number of correct responses (in 2AFC) or number of yes- responses (in Yes/No), the third column should be number of trials. See also: the examples section below. start : sequence of floats of length number of model parameters Generating values for the bootstrap samples. If this is None, the generating value will be the MAP estimate. Length should be 4 for Yes/No and 3 for nAFC. nsamples : number Number of bootstrap samples to be drawn. nafc : int Number of alternatives for nAFC tasks. If nafc==1 a Yes/No task is assumed. sigmoid : string Name of the sigmoid to be fitted. Valid sigmoids include: logistic gauss gumbel_l gumbel_r See `swignifit.utility.available_sigmoids()` for all available sigmoids. core : string \"core\"-type of the psychometric function. Valid choices include: ab (x-a)/b mw%g midpoint and width linear a+bx log a+b log(x) See `swignifit.utility.available_cores()` for all available sigmoids. priors : sequence of strings length number of parameters Constraints on the likelihood estimation. These are expressed in the form of a list of prior names. Valid prior choices include: Uniform(%g,%g) Gauss(%g,%g) Beta(%g,%g) Gamma(%g,%g) nGamma(%g,%g) if an invalid prior or `None` is selected, no constraints are imposed at all. See `swignifit.utility.available_priors()` for all available sigmoids. cuts : a single number or a sequence of numbers. Cuts indicating the performances that should be considered 'threshold' performances. This means that in a 2AFC task, cuts==0.5 the 'threshold' is somewhere around 75%% correct performance, depending on the lapse rate parametric boolean to indicate whether or not the bootstrap procedure should be parametric or not. parametric : boolean If `True` do parametric, otherwise do a non-parametric bootstrap. gammaislambda : boolean Set the gamma == lambda prior. Returns ------- (samples,estimates,deviance, threshold, th_bias, th_acceleration, slope, slope_bias, slope_accelerateion Rkd,Rpd,outliers,influential) samples : numpy array, shape: (nsamples, nblocks) the bootstrap sampled data estimates : numpy array, shape: (nsamples, nblocks) estimated parameters associated with the data sets deviance : numpy array, length: nsamples deviances for the bootstraped datasets threshold : numpy array, shape: (nsamples, ncuts) thresholds/cuts for each bootstraped datasets th_bias : numpy array, shape: (ncuts) the bias term associated with the threshold th_acc : numpy array, shape: (ncuts) the acceleration constant associated with the threshold slope : numpy array, shape: (nsamples, ncuts) slope at each cuts for each bootstraped datasets sl_bias : numpy array, shape: (ncuts) bias term associated with the slope sl_acc : numpy array, shape: (ncuts) acceleration term associated with the slope Rkd : numpy array, length: nsamples correlations between block index and deviance residuals Rpd : numpy array, length: nsamples correlations between model prediction and deviance residuals outliers : numpy array of booleans, length nblocks points that are outliers influential : numpy array of booleans, length nblocks points that are influential observations Example ------- >>> x = [float(2*k) for k in xrange(6)] >>> k = [34,32,40,48,50,48] >>> n = [50]*6 >>> d = [[xx,kk,nn] for xx,kk,nn in zip(x,k,n)] >>> priors = ('flat','flat','Uniform(0,0.1)') >>> samples,est,D,thres,thbias,thacc,slope,slbias,slacc,Rkd,Rpd,out,influ \ = bootstrap(d,nsamples=2000,priors=priors) >>> np.mean(est[:,0]) 2.7547034408466811 >>> mean(est[:,1]) 1.4057297989923003 """ dataset, pmf, nparams = sfu.make_dataset_and_pmf( data, nafc, sigmoid, core, priors, gammaislambda=gammaislambda) cuts = sfu.get_cuts(cuts) ncuts = len(cuts) if start is not None: start = sfu.get_start(start, nparams) bs_list = sfr.bootstrap(nsamples, dataset, pmf, cuts, start, True, parametric) jk_list = sfr.jackknifedata(dataset, pmf) nblocks = dataset.getNblocks() # construct the massive tuple of return values samples = np.zeros((nsamples, nblocks), dtype=np.int32) estimates = np.zeros((nsamples, nparams)) deviance = np.zeros((nsamples)) thres = np.zeros((nsamples, ncuts)) slope = np.zeros((nsamples, ncuts)) Rpd = np.zeros((nsamples)) Rkd = np.zeros((nsamples)) for row_index in xrange(nsamples): samples[row_index] = bs_list.getData(row_index) estimates[row_index] = bs_list.getEst(row_index) deviance[row_index] = bs_list.getdeviance(row_index) thres[row_index] = [ bs_list.getThres_byPos(row_index, j) for j in xrange(ncuts) ] slope[row_index] = [ bs_list.getSlope_byPos(row_index, j) for j in xrange(ncuts) ] Rpd[row_index] = bs_list.getRpd(row_index) Rkd[row_index] = bs_list.getRkd(row_index) thacc = np.zeros((ncuts)) thbias = np.zeros((ncuts)) slacc = np.zeros((ncuts)) slbias = np.zeros((ncuts)) for cut in xrange(ncuts): thacc[cut] = bs_list.getAcc_t(cut) thbias[cut] = bs_list.getBias_t(cut) slacc[cut] = bs_list.getAcc_s(cut) slbias[cut] = bs_list.getBias_s(cut) ci_lower = sfr.vector_double(nparams) ci_upper = sfr.vector_double(nparams) for param in xrange(nparams): ci_lower[param] = bs_list.getPercentile(0.025, param) ci_upper[param] = bs_list.getPercentile(0.975, param) outliers = np.zeros((nblocks), dtype=np.bool) influential = np.zeros((nblocks)) for block in xrange(nblocks): outliers[block] = jk_list.outlier(block) influential[block] = jk_list.influential(block, ci_lower, ci_upper) return samples, estimates, deviance, thres, thbias, thacc, slope, slbias, slacc, Rpd, Rkd, outliers, influential
def generate_test_bootstrap_list(): data = TestData.generate_test_dataset() psi = TestPsychometric.generate_test_model() cuts = sfr.vector_double([1, 0.5]) return sfr.bootstrap(999, data, psi, cuts)
def bootstrap(data, start=None, nsamples=2000, nafc=2, sigmoid="logistic", core="ab", priors=None, cuts=None, parametric=True, gammaislambda=False ): """ Parametric bootstrap of a psychometric function. Parameters ---------- data : A list of lists or an array of data. The first column should be stimulus intensity, the second column should be number of correct responses (in 2AFC) or number of yes- responses (in Yes/No), the third column should be number of trials. See also: the examples section below. start : sequence of floats of length number of model parameters Generating values for the bootstrap samples. If this is None, the generating value will be the MAP estimate. Length should be 4 for Yes/No and 3 for nAFC. nsamples : number Number of bootstrap samples to be drawn. nafc : int Number of alternatives for nAFC tasks. If nafc==1 a Yes/No task is assumed. sigmoid : string Name of the sigmoid to be fitted. Valid sigmoids include: logistic (1+exp(-x))**-1 [Default] gauss Phi(x) gumbel_l 1 - exp(-exp(x)) gumbel_r exp(-exp(-x)) exponential x>0: 1 - exp(-x); else: 0 cauchy atan(x)/pi + 0.5 id x; only useful in conjunction with NakaRushton core See `swignifit.utility.available_sigmoids()` for all available sigmoids. core : string \"core\"-type of the psychometric function. Valid choices include: ab (x-a)/b [Default] mw%g midpoint and width, with "%g" a number larger than 0 and less than 0.5. mw%g corresponds to a parameterization in terms of midpoint and width of the rising part of the sigmoid. This width is defined as the length of the interval on which the sigmoidal part reaches from "%g" to 1-"%g". linear a+b*x log a+b*log(x) weibull 2*s*m*(log(x)-log(m))/log(2) + log(log(2)) This will give you a weibull if combined with the gumbel_l sigmoid and a reverse weibull if combined with the gumbel_r sigmoid. poly (x/a)**b Will give you a weibull if combined with an exp sigmoid NakaRushton The Naka-Rushton nonlinearity; should only be used with an id core See `swignifit.utility.available_cores()` for all available cores. priors : sequence of strings length number of parameters Constraints on the likelihood estimation. These are expressed in the form of a list of prior names. Valid prior choices include: Uniform(%g,%g) Uniform distribution on an interval Gauss(%g,%g) Gaussian distribution with mean and standard deviation Beta(%g,%g) Beta distribution Gamma(%g,%g) Gamma distribution nGamma(%g,%g) Gamma distribution on the negative axis invGamma(%g,%g) inverse Gamma distribution ninvGamma(%g,%g) inverse Gamma distribution on the negative axis if an invalid prior or `None` is selected, no constraints are imposed at all. See `swignifit.utility.available_priors()` for all available sigmoids. cuts : a single number or a sequence of numbers. Cuts indicating the performances that should be considered 'threshold' performances. This means that in a 2AFC task, cuts==0.5 the 'threshold' is somewhere around 75%% correct performance, depending on the lapse rate parametric boolean to indicate whether or not the bootstrap procedure should be parametric or not. parametric : boolean If `True` do parametric, otherwise do a non-parametric bootstrap. gammaislambda : boolean Set the gamma == lambda prior. Returns ------- (samples,estimates,deviance, threshold, th_bias, th_acceleration, slope, slope_bias, slope_accelerateion Rkd,Rpd,outliers,influential) samples : numpy array, shape: (nsamples, nblocks) the bootstrap sampled data estimates : numpy array, shape: (nsamples, nblocks) estimated parameters associated with the data sets deviance : numpy array, length: nsamples deviances for the bootstraped datasets threshold : numpy array, shape: (nsamples, ncuts) thresholds/cuts for each bootstraped datasets th_bias : numpy array, shape: (ncuts) the bias term associated with the threshold th_acc : numpy array, shape: (ncuts) the acceleration constant associated with the threshold slope : numpy array, shape: (nsamples, ncuts) slope at each cuts for each bootstraped datasets sl_bias : numpy array, shape: (ncuts) bias term associated with the slope sl_acc : numpy array, shape: (ncuts) acceleration term associated with the slope Rkd : numpy array, length: nsamples correlations between block index and deviance residuals Rpd : numpy array, length: nsamples correlations between model prediction and deviance residuals outliers : numpy array of booleans, length nblocks points that are outliers influential : numpy array of booleans, length nblocks points that are influential observations Example ------- >>> x = [float(2*k) for k in xrange(6)] >>> k = [34,32,40,48,50,48] >>> n = [50]*6 >>> d = [[xx,kk,nn] for xx,kk,nn in zip(x,k,n)] >>> priors = ('flat','flat','Uniform(0,0.1)') >>> samples,est,D,thres,thbias,thacc,slope,slbias,slacc,Rkd,Rpd,out,influ \ = bootstrap(d,nsamples=2000,priors=priors) >>> np.mean(est[:,0]) 2.7547034408466811 >>> mean(est[:,1]) 1.4057297989923003 """ dataset, pmf, nparams = sfu.make_dataset_and_pmf(data, nafc, sigmoid, core, priors, gammaislambda=gammaislambda) cuts = sfu.get_cuts(cuts) ncuts = len(cuts) if start is not None: start = sfu.get_start(start, nparams) bs_list = sfr.bootstrap(nsamples, dataset, pmf, cuts, start, True, parametric) jk_list = sfr.jackknifedata(dataset, pmf) nblocks = dataset.getNblocks() # construct the massive tuple of return values samples = np.zeros((nsamples, nblocks), dtype=np.int32) estimates = np.zeros((nsamples, nparams)) deviance = np.zeros((nsamples)) thres = np.zeros((nsamples, ncuts)) slope = np.zeros((nsamples, ncuts)) Rpd = np.zeros((nsamples)) Rkd = np.zeros((nsamples)) for row_index in xrange(nsamples): samples[row_index] = bs_list.getData(row_index) estimates[row_index] = bs_list.getEst(row_index) deviance[row_index] = bs_list.getdeviance(row_index) thres[row_index] = [bs_list.getThres_byPos(row_index, j) for j in xrange(ncuts)] slope[row_index] = [bs_list.getSlope_byPos(row_index, j) for j in xrange(ncuts)] Rpd[row_index] = bs_list.getRpd(row_index) Rkd[row_index] = bs_list.getRkd(row_index) thacc = np.zeros((ncuts)) thbias = np.zeros((ncuts)) slacc = np.zeros((ncuts)) slbias = np.zeros((ncuts)) for cut in xrange(ncuts): thacc[cut] = bs_list.getAcc_t(cut) thbias[cut] = bs_list.getBias_t(cut) slacc[cut] = bs_list.getAcc_s(cut) slbias[cut] = bs_list.getBias_s(cut) ci_lower = sfr.vector_double(nparams) ci_upper = sfr.vector_double(nparams) for param in xrange(nparams): ci_lower[param] = bs_list.getPercentile(0.025, param) ci_upper[param] = bs_list.getPercentile(0.975, param) outliers = np.zeros((nblocks), dtype=np.bool) influential = np.zeros((nblocks)) for block in xrange(nblocks): outliers[block] = jk_list.outlier(block) influential[block] = jk_list.influential(block, ci_lower, ci_upper) return samples, estimates, deviance, thres, thbias, thacc, slope, slbias, slacc, Rpd, Rkd, outliers, influential