def mcmc( data, start=None, nsamples=10000, nafc=2, sigmoid="logistic", core="mw0.1", priors=None, stepwidths=None, sampler="MetropolisHastings", gammaislambda=False, ): dataset, pmf, nparams = sfu.make_dataset_and_pmf(data, nafc, sigmoid, core, priors, gammaislambda=gammaislambda) if start is not None: start = sfu.get_start(start, nparams) else: # use mapestimate opt = sfr.PsiOptimizer(pmf, dataset) start = opt.optimize(pmf, dataset) proposal = sfr.GaussRandom() if sampler not in sfu.sampler_dict.keys(): raise sfu.PsignifitException("The sampler: " + sampler + " is not available.") else: sampler = sfu.sampler_dict[sampler](pmf, dataset, proposal) sampler.setTheta(start) if stepwidths != None: stepwidths = np.array(stepwidths) if len(stepwidths.shape) == 2: if isinstance(sampler, sfr.GenericMetropolis): sampler.findOptimalStepwidth(sfu.make_pilotsample(stepwidths)) elif isinstance(sampler, sfr.MetropolisHastings): sampler.setStepSize(sfr.vector_double(stepwidths.std(0))) else: raise sfu.PsignifitException( "You provided a pilot sample but the selected sampler does not support pilot samples" ) elif len(stepwidths) != nparams: raise sfu.PsignifitException( "You specified '" + str(len(stepwidths)) + "' stepwidth(s), but there are '" + str(nparams) + "' parameters." ) else: if isinstance(sampler, sfr.DefaultMCMC): for i, p in enumerate(stepwidths): p = sfu.get_prior(p) sampler.set_proposal(i, p) else: sampler.setStepSize(sfr.vector_double(stepwidths)) post = sampler.sample(nsamples) nblocks = dataset.getNblocks() estimates = np.zeros((nsamples, nparams)) deviance = np.zeros(nsamples) posterior_predictive_data = np.zeros((nsamples, nblocks)) posterior_predictive_deviances = np.zeros(nsamples) posterior_predictive_Rpd = np.zeros(nsamples) posterior_predictive_Rkd = np.zeros(nsamples) logposterior_ratios = np.zeros((nsamples, nblocks)) for i in xrange(nsamples): for j in xrange(nparams): estimates[i, j] = post.getEst(i, j) deviance[i] = post.getdeviance(i) for j in xrange(nblocks): posterior_predictive_data[i, j] = post.getppData(i, j) logposterior_ratios[i, j] = post.getlogratio(i, j) posterior_predictive_deviances[i] = post.getppDeviance(i) posterior_predictive_Rpd[i] = post.getppRpd(i) posterior_predictive_Rkd[i] = post.getppRkd(i) accept_rate = post.get_accept_rate() return ( estimates, deviance, posterior_predictive_data, posterior_predictive_deviances, posterior_predictive_Rpd, posterior_predictive_Rkd, logposterior_ratios, accept_rate, )
def mcmc(data, start=None, nsamples=10000, nafc=2, sigmoid='logistic', core='mw0.1', priors=None, stepwidths=None, sampler="MetropolisHastings", gammaislambda=False): """ Markov Chain Monte Carlo sampling for a psychometric function. Parameters ---------- data : A list of lists or an array of data. The first column should be stimulus intensity, the second column should be number of correct responses (in 2AFC) or number of yes- responses (in Yes/No), the third column should be number of trials. See also: the examples section below. start : sequence of floats of length number of model parameters Starting values for the markov chain. If this is None, the MAP estimate will be used. nsamples : int Number of samples to be taken from the posterior (note that due to suboptimal sampling, this number may be much lower than the effective number of samples. nafc : int Number of responses alternatives for nAFC tasks. If nafc==1 a Yes/No task is assumed. sigmoid : string Name of the sigmoid to be fitted. Valid sigmoids include: logistic gauss gumbel_l gumbel_r See `swignifit.utility.available_sigmoids()` for all available sigmoids. core : string \"core\"-type of the psychometric function. Valid choices include: ab (x-a)/b mw%g midpoint and width linear a+bx log a+b log(x) See `swignifit.utility.available_cores()` for all available sigmoids. priors : sequence of strings length number of parameters Prior distributions on the parameters of the psychometric function. These are expressed in the form of a list of prior names. Valid prior choices include: Uniform(%g,%g) Gauss(%g,%g) Beta(%g,%g) Gamma(%g,%g) nGamma(%g,%g) if an invalid prior or `None` is selected, no constraints are imposed at all. See `swignifit.utility.available_priors()` for all available sigmoids. if an invalid prior is selected, no constraints are imposed on that parameter resulting in an improper prior distribution. stepwidths : sequence of floats of length number of model parameters Standard deviations of the proposal distribution. The best choice is sometimes a bit tricky here. However, as a rule of thumb we can state: if the stepwidths are too small, the samples might not cover the whole posterior, if the stepwidths are too large, most steps will leave the area of high posterior density and will therefore be rejected. Thus, in general stepwidths should be somewhere in the middle. sampler : string The type of MCMC sampler to use. See: `sw.utility.available_samplers()` for a list of available samplers. gammaislambda : boolean Set the gamma == lambda prior. Output ------ (estimates, deviance, posterior_predictive_data, posterior_predictive_deviances, posterior_predictive_Rpd, posterior_predictive_Rkd, logposterior_ratios, accept_rate) estimates : numpy array, shape: (nsamples, nparameters) Parameters sampled from the posterior density of parameters given the data. deviances : numpy array, length: nsamples Associated deviances for each estimate posterior_predictive_data : numpy array, shape: (nsamples, nblocks) Data that are simulated by sampling from the joint posterior of data and parameters. They are important for model checking. posterior_predictive_deviances : numpy array, length: nsamples The deviances that are associated with the posterior predictive data. A particular way of model checking could be to compare the deviances and the posterior predicitive deviances. For a good model these should be relatively similar. posterior_predictive_Rpd : numpy array, length: nsamples Correlations between psychometric function and deviance residuals associated with posterior predictive data posterior_predictive_Rkd : numpy array, length: nsamples Correlations between block index and deviance residuals associated with posterior predictive data. logposterior_ratios : numpy array, shape: (nsamples, nblocks) Ratios between the full posetrior and the posterior for a single block for all samples. Used for calculating the KL-Divergence to detrmine influential observations in the Bayesian paradigm. accept_rate : float The number of proposed MCMC samples that were accepted. Example ------- >>> x = [float(2*k) for k in xrange(6)] >>> k = [34,32,40,48,50,48] >>> n = [50]*6 >>> d = [[xx,kk,nn] for xx,kk,nn in zip(x,k,n)] >>> priors = ('Gauss(0,1000)','Gauss(0,1000)','Beta(3,100)') >>> stepwidths = (1.,1.,0.01) >>> (estimates, deviance, posterior_predictive_data, posterior_predictive_deviances, posterior_predictive_Rpd, posterior_predictive_Rkd, logposterior_ratios, accept_rate) \ = mcmc(d,nsamples=10000,priors=priors,stepwidths=stepwidths) >>> mean(estimates[:,0]) 2.4811791550665272 >>> mean(estimates[:,1]) 7.4935217545849184 """ dataset, pmf, nparams = sfu.make_dataset_and_pmf( data, nafc, sigmoid, core, priors, gammaislambda=gammaislambda) if start is not None: start = sfu.get_start(start, nparams) else: # use mapestimate opt = sfr.PsiOptimizer(pmf, dataset) start = opt.optimize(pmf, dataset) proposal = sfr.GaussRandom() if sampler not in sfu.sampler_dict.keys(): raise sfu.PsignifitException("The sampler: " + sampler + " is not available.") else: sampler = sfu.sampler_dict[sampler](pmf, dataset, proposal) sampler.setTheta(start) if stepwidths != None: stepwidths = np.array(stepwidths) if len(stepwidths.shape) == 2: if isinstance(sampler, sfr.GenericMetropolis): sampler.findOptimalStepwidth(sfu.make_pilotsample(stepwidths)) elif isinstance(sampler, sfr.MetropolisHastings): sampler.setStepSize(sfr.vector_double(stepwidths.std(0))) else: raise sfu.PsignifitException( "You provided a pilot sample but the selected sampler does not support pilot samples" ) elif len(stepwidths) != nparams: raise sfu.PsignifitException("You specified \'"+str(len(stepwidths))+\ "\' stepwidth(s), but there are \'"+str(nparams)+ "\' parameters.") else: if isinstance(sampler, sfr.DefaultMCMC): for i, p in enumerate(stepwidths): p = sfu.get_prior(p) sampler.set_proposal(i, p) else: sampler.setStepSize(sfr.vector_double(stepwidths)) post = sampler.sample(nsamples) nblocks = dataset.getNblocks() estimates = np.zeros((nsamples, nparams)) deviance = np.zeros(nsamples) posterior_predictive_data = np.zeros((nsamples, nblocks)) posterior_predictive_deviances = np.zeros(nsamples) posterior_predictive_Rpd = np.zeros(nsamples) posterior_predictive_Rkd = np.zeros(nsamples) logposterior_ratios = np.zeros((nsamples, nblocks)) for i in xrange(nsamples): for j in xrange(nparams): estimates[i, j] = post.getEst(i, j) deviance[i] = post.getdeviance(i) for j in xrange(nblocks): posterior_predictive_data[i, j] = post.getppData(i, j) logposterior_ratios[i, j] = post.getlogratio(i, j) posterior_predictive_deviances[i] = post.getppDeviance(i) posterior_predictive_Rpd[i] = post.getppRpd(i) posterior_predictive_Rkd[i] = post.getppRkd(i) accept_rate = post.get_accept_rate() return (estimates, deviance, posterior_predictive_data, posterior_predictive_deviances, posterior_predictive_Rpd, posterior_predictive_Rkd, logposterior_ratios, accept_rate)
def mcmc( data, start=None, nsamples=10000, nafc=2, sigmoid='logistic', core='mw0.1', priors=None, stepwidths=None, sampler="MetropolisHastings", gammaislambda=False): """ Markov Chain Monte Carlo sampling for a psychometric function. Parameters ---------- data : A list of lists or an array of data. The first column should be stimulus intensity, the second column should be number of correct responses (in 2AFC) or number of yes- responses (in Yes/No), the third column should be number of trials. See also: the examples section below. start : sequence of floats of length number of model parameters Starting values for the markov chain. If this is None, the MAP estimate will be used. nsamples : int Number of samples to be taken from the posterior (note that due to suboptimal sampling, this number may be much lower than the effective number of samples. nafc : int Number of responses alternatives for nAFC tasks. If nafc==1 a Yes/No task is assumed. sigmoid : string Name of the sigmoid to be fitted. Valid sigmoids include: logistic (1+exp(-x))**-1 [Default] gauss Phi(x) gumbel_l 1 - exp(-exp(x)) gumbel_r exp(-exp(-x)) exponential x>0: 1 - exp(-x); else: 0 cauchy atan(x)/pi + 0.5 id x; only useful in conjunction with NakaRushton core See `swignifit.utility.available_sigmoids()` for all available sigmoids. core : string \"core\"-type of the psychometric function. Valid choices include: ab (x-a)/b [Default] mw%g midpoint and width, with "%g" a number larger than 0 and less than 0.5. mw%g corresponds to a parameterization in terms of midpoint and width of the rising part of the sigmoid. This width is defined as the length of the interval on which the sigmoidal part reaches from "%g" to 1-"%g". linear a+b*x log a+b*log(x) weibull 2*s*m*(log(x)-log(m))/log(2) + log(log(2)) This will give you a weibull if combined with the gumbel_l sigmoid and a reverse weibull if combined with the gumbel_r sigmoid. poly (x/a)**b Will give you a weibull if combined with an exp sigmoid NakaRushton The Naka-Rushton nonlinearity; should only be used with an id core See `swignifit.utility.available_cores()` for all available cores. priors : sequence of strings length number of parameters Prior distributions on the parameters of the psychometric function. These are expressed in the form of a list of prior names. Valid prior choices include: Uniform(%g,%g) Uniform distribution on an interval Gauss(%g,%g) Gaussian distribution with mean and standard deviation Beta(%g,%g) Beta distribution Gamma(%g,%g) Gamma distribution nGamma(%g,%g) Gamma distribution on the negative axis invGamma(%g,%g) inverse Gamma distribution ninvGamma(%g,%g) inverse Gamma distribution on the negative axis if an invalid prior or `None` is selected, no constraints are imposed at all. See `swignifit.utility.available_priors()` for all available sigmoids. if an invalid prior is selected, no constraints are imposed on that parameter resulting in an improper prior distribution. stepwidths : sequence of floats of length number of model parameters Standard deviations of the proposal distribution. The best choice is sometimes a bit tricky here. However, as a rule of thumb we can state: if the stepwidths are too small, the samples might not cover the whole posterior, if the stepwidths are too large, most steps will leave the area of high posterior density and will therefore be rejected. Thus, in general stepwidths should be somewhere in the middle. sampler : string The type of MCMC sampler to use. See: `sw.utility.available_samplers()` for a list of available samplers. gammaislambda : boolean Set the gamma == lambda prior. Output ------ (estimates, deviance, posterior_predictive_data, posterior_predictive_deviances, posterior_predictive_Rpd, posterior_predictive_Rkd, logposterior_ratios, accept_rate) estimates : numpy array, shape: (nsamples, nparameters) Parameters sampled from the posterior density of parameters given the data. deviances : numpy array, length: nsamples Associated deviances for each estimate posterior_predictive_data : numpy array, shape: (nsamples, nblocks) Data that are simulated by sampling from the joint posterior of data and parameters. They are important for model checking. posterior_predictive_deviances : numpy array, length: nsamples The deviances that are associated with the posterior predictive data. A particular way of model checking could be to compare the deviances and the posterior predicitive deviances. For a good model these should be relatively similar. posterior_predictive_Rpd : numpy array, length: nsamples Correlations between psychometric function and deviance residuals associated with posterior predictive data posterior_predictive_Rkd : numpy array, length: nsamples Correlations between block index and deviance residuals associated with posterior predictive data. logposterior_ratios : numpy array, shape: (nsamples, nblocks) Ratios between the full posetrior and the posterior for a single block for all samples. Used for calculating the KL-Divergence to detrmine influential observations in the Bayesian paradigm. accept_rate : float The number of proposed MCMC samples that were accepted. Example ------- >>> x = [float(2*k) for k in xrange(6)] >>> k = [34,32,40,48,50,48] >>> n = [50]*6 >>> d = [[xx,kk,nn] for xx,kk,nn in zip(x,k,n)] >>> priors = ('Gauss(0,1000)','Gauss(0,1000)','Beta(3,100)') >>> stepwidths = (1.,1.,0.01) >>> (estimates, deviance, posterior_predictive_data, posterior_predictive_deviances, posterior_predictive_Rpd, posterior_predictive_Rkd, logposterior_ratios, accept_rate) \ = mcmc(d,nsamples=10000,priors=priors,stepwidths=stepwidths) >>> mean(estimates[:,0]) 2.4811791550665272 >>> mean(estimates[:,1]) 7.4935217545849184 """ dataset, pmf, nparams = sfu.make_dataset_and_pmf(data, nafc, sigmoid, core, priors, gammaislambda=gammaislambda) if start is not None: start = sfu.get_start(start, nparams) else: # use mapestimate opt = sfr.PsiOptimizer(pmf, dataset) start = opt.optimize(pmf, dataset) proposal = sfr.GaussRandom() if sampler not in sfu.sampler_dict.keys(): raise sfu.PsignifitException("The sampler: " + sampler + " is not available.") else: sampler = sfu.sampler_dict[sampler](pmf, dataset, proposal) sampler.setTheta(start) if stepwidths != None: stepwidths = np.array(stepwidths) if len(stepwidths.shape)==2: if isinstance ( sampler, sfr.GenericMetropolis ): sampler.findOptimalStepwidth ( sfu.make_pilotsample ( stepwidths ) ) elif isinstance ( sampler, sfr.MetropolisHastings ): sampler.setStepSize ( sfr.vector_double( stepwidths.std(0) ) ) else: raise sfu.PsignifitException("You provided a pilot sample but the selected sampler does not support pilot samples") elif len(stepwidths) != nparams: raise sfu.PsignifitException("You specified \'"+str(len(stepwidths))+\ "\' stepwidth(s), but there are \'"+str(nparams)+ "\' parameters.") else: if isinstance ( sampler, sfr.DefaultMCMC ): for i,p in enumerate(stepwidths): p = sfu.get_prior(p) sampler.set_proposal(i, p) else: sampler.setStepSize(sfr.vector_double(stepwidths)) post = sampler.sample(nsamples) nblocks = dataset.getNblocks() estimates = np.zeros((nsamples, nparams)) deviance = np.zeros(nsamples) posterior_predictive_data = np.zeros((nsamples, nblocks)) posterior_predictive_deviances = np.zeros(nsamples) posterior_predictive_Rpd = np.zeros(nsamples) posterior_predictive_Rkd = np.zeros(nsamples) logposterior_ratios = np.zeros((nsamples, nblocks)) for i in xrange(nsamples): for j in xrange(nparams): estimates[i, j] = post.getEst(i, j) deviance[i] = post.getdeviance(i) for j in xrange(nblocks): posterior_predictive_data[i, j] = post.getppData(i, j) logposterior_ratios[i,j] = post.getlogratio(i,j) posterior_predictive_deviances[i] = post.getppDeviance(i) posterior_predictive_Rpd[i] = post.getppRpd(i) posterior_predictive_Rkd[i] = post.getppRkd(i) accept_rate = post.get_accept_rate() return (estimates, deviance, posterior_predictive_data, posterior_predictive_deviances, posterior_predictive_Rpd, posterior_predictive_Rkd, logposterior_ratios, accept_rate)