class ParticleSwarmPriorResample(ParticleSwarm): """ Like ParticleSwarm, but resamples from the prior """ def refresh(self): """ Resample by resampling those below the median from the prior. """ m = median(self.chainZ) for i in range(self.nchains): if self.chainZ[i] < m: self.chains[i] = self.make_h0(**self.kwargs) self.chainZ[i] = -Infinity # reset this # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Examples.Number.Global import generate_data, make_h0 data = generate_data(300) ps = ParticleSwarm(make_h0, data) for h in break_ctrlc(ps): print h.posterior_score, h if len(ps.seen) > 0: print "#", sorted(ps.seen, key=lambda x: x.posterior_score, reverse=True)[0]
parser.add_option("--out", dest="OUT", type="string", help="Output prefix", default="output/proposal") parser.add_option("--samples", dest="SAMPLES", type="int", default=100000, help="Number of samples to run") parser.add_option("--chains", dest="CHAINS", type="int", default=10, help="Number of chains to run in parallel") parser.add_option("--repetitions", dest="REPETITONS", type="int", default=100, help="Number of repetitions to run") parser.add_option("--model", dest="MODEL", type="str", default="Number100", help="Which model to run on (Number, Galileo, RationalRules, SimpleMagnetism)") parser.add_option("--print-every", dest="PRINTEVERY", type="int", default=1000, help="Evaluation prints every this many") options, _ = parser.parse_args() # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Define the test model # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if options.MODEL == "Number100": # Load the data from LOTlib.Examples.Number.Global import generate_data, grammar, make_h0 data = synchronize_variable( lambda : generate_data(100) ) elif options.MODEL == "Number300": # Load the data from LOTlib.Examples.Number.Global import generate_data, grammar, make_h0 data = synchronize_variable( lambda : generate_data(300) ) elif options.MODEL == "Number1000": # Load the data from LOTlib.Examples.Number.Global import generate_data, grammar, make_h0 data = synchronize_variable( lambda : generate_data(1000) ) elif options.MODEL == "Galileo": from LOTlib.Examples.SymbolicRegression.Galileo import data, grammar, make_h0 elif options.MODEL == "RationalRules": from LOTlib.Examples.RationalRules.Model.Utilities import grammar, data, make_h0
class ParticleSwarmPriorResample(ParticleSwarm): """ Like ParticleSwarm, but resamples from the prior """ def refresh(self): """ Resample by resampling those below the median from the prior. """ m = median(self.chainZ) for i in range(self.nchains): if self.chainZ[i] < m: self.chains[i] = self.make_h0(**self.kwargs) self.chainZ[i] = -Infinity # reset this # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Examples.Number.Global import generate_data, grammar, make_h0 data = generate_data(300) ps = ParticleSwarm(make_h0, data) for h in lot_iter(ps): print h.posterior_score, h if len(ps.seen) > 0: print "#", sorted(ps.seen, key=lambda x: x.posterior_score, reverse=True)[0]
class MemoizedMHSampler(MHSampler): """ Same as MHSampler, but the values of compute_posterior are cached via LRUCache """ def __init__(self, h0, data, memoize=Infinity, **kwargs): MHSampler.__init__(self, h0, data, **kwargs) # self.mem stores return of compute_posterior self.mem = LRUCache(maxsize=memoize) def compute_posterior(self, h, data): if h in self.mem: ret = self.mem[h] h.posterior_score = ret # set this because it may not be set return ret else: ret = MHSampler.compute_posterior(self, h, data) self.mem[h] = ret return ret if __name__ == "__main__": from LOTlib.Examples.Number.Global import generate_data, NumberExpression, grammar data = generate_data(100) h0 = NumberExpression(grammar) sampler = MemoizedMHSampler(h0, data, steps=1000) for h in sampler: pass #print q(get_knower_pattern(h)), h.posterior_score, h.prior, h.likelihood, q(h), sampler.acceptance_count, sampler.acceptance_ratio()
""" def __init__(self, h0, data, memoize=Infinity, **kwargs): MHSampler.__init__(self, h0, data, **kwargs) # self.mem stores return of compute_posterior self.mem = LRUCache(maxsize=memoize) def compute_posterior(self, h, data, shortcut=-Infinity): if h in self.mem: ret = self.mem[h] h.posterior_score = ret # set this because it may not be set return ret else: ret = MHSampler.compute_posterior( self, h, data, shortcut=-Infinity) # calls update to posterior counter self.mem[h] = ret return ret if __name__ == "__main__": from LOTlib.Examples.Number.Global import generate_data, NumberExpression, grammar data = generate_data(100) h0 = NumberExpression(grammar) sampler = MemoizedMHSampler(h0, data, steps=1000) for h in sampler: pass #print q(get_knower_pattern(h)), h.posterior_score, h.prior, h.likelihood, q(h), sampler.acceptance_count, sampler.acceptance_ratio()