def run_DREAM(self,nsamples=100000): model = pymc.Model() with model: params = pymc.Normal('params', mu=self.start_parameters, sd=np.array([1.0 ]*len(self.start_parameters)), shape=(len(self.start_parameters))) #params = pymc.Flat('params',shape=(len(self.start_parameters))) global cost_function cost_function = self.cost_function error = pymc.Potential('error', DREAM_cost(params)) nseedchains = 10*len(self.model.parameters_rules()) step = pymc.Dream(variables=[params], nseedchains=nseedchains, blocked=True, start_random=False, save_history=True, parallel=True, adapt_crossover=False, verbose=False,) trace = pymc.sample(nsamples, step, start=self.pso_results, njobs=self.nchains, use_mpi=False, progressbar=False,) cont_flag = True while cont_flag: cont_flag = False conv_stats = gelman_rubin(trace) for i in conv_stats['params']: if i>1.2: print "Parameters have not converged, will continue run." print "Value so far is %s"%i cont_flag = True break trace = pymc.sample(int(nsamples*.1), step, #start=self.pso_results, njobs=self.nchains, use_mpi=False, trace = trace, progressbar=False,) conv_stats = gelman_rubin(trace) for i in conv_stats['params']: print i,i<1.2 #pymc.traceplot(trace,vars=[params,error]) #plt.show() return trace
def TotalError(self): """Compute Gelman-Rubin error for a set of traces.""" assert self.traces try: # r_hat is the "potential scale reduction", a convergence # diagnostic computed by comparing between-chain and # within-chain variance. r_hat = diagnostics.gelman_rubin(self.traces) except FloatingPointError: # This happens when the within-chain standard deviation is # 0. The chain perfectly correlates with itself, implying # complete failure to converge (if the chain has more than one # state). return utils.VERY_LARGE_NUMBER return max(0, r_hat - 1)
for row in zip(*uparams) ] for name, x in zip(params_names, numpy.transpose(params_flat)): print ' parameter %s' % name nmin, kthin, nburn, nprec, kmind = raftery_lewis(x, q=0.1, r=0.01, verbose=False) print ' Raftery-Lewis (q=0.1, r=0.01): skip %d, use %d (of %d)' % ( nburn, nprec, len(x)) nmin, kthin, nburn, nprec, kmind = raftery_lewis(x, q=0.025, r=0.005, verbose=False) print ' Raftery-Lewis (q=0.025, r=0.005): skip %d, use %d (of %d)' % ( nburn, nprec, len(x)) results.append(params_flat) if len(filenames) > 1: print 'Chain group convergence' results = numpy.transpose(results) print results.shape for name, xx in zip(params_names, results): chains = numpy.transpose(xx) print ' parameter %s:' % name, chains.shape print ' Gelman-Rubin:', gelman_rubin(chains) #print ' Effective sample size:', effective_n(chains) else: print 'Chain group convergence not computed, pass more than one dataset'
params_names += [k] * param_length uparams.append(v) params_flat = [numpy.concatenate([pi.flatten() for pi in row]) for row in zip(*uparams)] for name, x in zip(params_names, numpy.transpose(params_flat)): print ' parameter %s' % name nmin, kthin, nburn, nprec, kmind = raftery_lewis(x, q=0.1, r=0.01, verbose=False) print ' Raftery-Lewis (q=0.1, r=0.01): skip %d, use %d (of %d)' % (nburn, nprec, len(x)) nmin, kthin, nburn, nprec, kmind = raftery_lewis(x, q=0.025, r=0.005, verbose=False) print ' Raftery-Lewis (q=0.025, r=0.005): skip %d, use %d (of %d)' % (nburn, nprec, len(x)) results.append(params_flat) if len(filenames) > 1: print 'Chain group convergence' results = numpy.transpose(results) print results.shape for name, xx in zip(params_names, results): chains = numpy.transpose(xx) print ' parameter %s:' % name, chains.shape print ' Gelman-Rubin:', gelman_rubin(chains) #print ' Effective sample size:', effective_n(chains) else: print 'Chain group convergence not computed, pass more than one dataset'