def runme(x,datamt): def make_data(size=datamt): return [FunctionData(input=[], output={'h e s': size, 'm e s': size, 'm e g': size, 'h e g': size, 'm e n': size, 'h e m': size, 'm e k': size, 'k e s': size, 'h e k': size, 'k e N': size, 'k e g': size, 'h e n': size, 'm e N': size, 'k e n': size, 'h e N': size, 'f e N': size, 'g e N': size, 'n e N': size, 'n e s': size, 'f e n': size, 'g e n': size, 'g e m': size, 'f e m': size, 'g e k': size, 'f e k': size, 'f e g': size, 'f e s': size, 'n e g': size, 'k e m': size, 'n e m': size, 'g e s': size, 'n e k': size})] print "Start: " + str(x) + " on this many: " + str(datamt) return standard_sample(make_hypothesis, make_data, show=False, N=options.top, save_top="topModel1.pkl", steps=options.steps)
def runme(x): print "Start: " + str(x) fuckup = TopN(options.top) try: return standard_sample(make_hypothesis, make_data, show=False, N=options.top, save_top= "top.pkl", steps=options.steps) except: return fuckup
def runme(x,datamt): def make_data(size=datamt): return [FunctionData(input=[], output={'h e s': size, 'm e s': size, 'm e g': size, 'h e g': size, 'm e n': size, 'h e m': size, 'm e k': size, 'k e s': size, 'h e k': size, 'k e N': size, 'k e g': size, 'h e n': size, 'm e N': size, 'k e n': size, 'h e N': size, 'f e N': size, 'g e N': size, 'n e N': size, 'n e s': size, 'f e n': size, 'g e n': size, 'g e m': size, 'f e m': size, 'g e k': size, 'f e k': size, 'f e g': size, 'f e s': size, 'n e g': size, 'k e m': size, 'n e m': size, 'g e s': size, 'n e k': size})] print "Start: " + str(x) + " on this many: " + str(datamt) return standard_sample(make_hypothesis, make_data, show=False, N=options.top, save_top="topEnglish1.pkl", steps=options.steps)
def runme(x,datamt): def make_data(size=datamt): return [FunctionData(input=[], output={'n i k': size, 'h i N': size, 'f a n': size, 'g i f': size, 'm a N': size, 'f a m': size, 'g i k': size, 'k a n': size, 'f a f': size, 'g i n': size, 'g i m': size, 'g i s': size, 's i f': size, 's i n': size, 'n i s': size, 's i m': size, 's i k': size, 'h a N': size, 'f i N': size, 'h i m': size, 'h i n': size, 'h a m': size, 'n i N': size, 'h i k': size, 'f a s': size, 'f i n': size, 'h i f': size, 'n i m': size, 'g i N': size, 'h a g': size, 's i N': size, 'n i n': size, 'f i m': size, 's i s': size, 'h i s': size, 'n a s': size, 'k a s': size, 'f i s': size, 'n i f': size, 'm i n': size, 's a s': size, 'f a g': size, 'k a g': size, 'k a f': size, 's a m': size, 'n a f': size, 'n a g': size, 'm i N': size, 's a g': size, 'f i k': size, 'k a m': size, 'n a n': size, 's a f': size, 'n a m': size, 'm a s': size, 'h a f': size, 'h a s': size, 'n a N': size, 'm i s': size, 's a n': size, 's a N': size, 'm i k': size, 'f a N': size, 'm i m': size, 'm a g': size, 'm a f': size, 'f i f': size, 'k a N': size, 'h a n': size, 'm a n': size, 'm a m': size, 'm i f': size})] print "Start: " + str(x) + " on this many: " + str(datamt) fuckup = TopN(options.top) try: return standard_sample(make_hypothesis, make_data, show=False, N=options.top, save_top="topkaggik.pkl", steps=options.steps) except: return fuckup
def runme(x,datamt): def make_data(size=datamt): return [FunctionData(input=[], output={'n i k': size, 'h i N': size, 'f a n': size, 'g i f': size, 'm a N': size, 'f a m': size, 'g i k': size, 'k a n': size, 'f a f': size, 'g i n': size, 'g i m': size, 'g i s': size, 's i f': size, 's i n': size, 'n i s': size, 's i m': size, 's i k': size, 'h a N': size, 'f i N': size, 'h i m': size, 'h i n': size, 'h a m': size, 'n i N': size, 'h i k': size, 'f a s': size, 'f i n': size, 'h i f': size, 'n i m': size, 'g i N': size, 'h a g': size, 's i N': size, 'n i n': size, 'f i m': size, 's i s': size, 'h i s': size, 'n a s': size, 'k a s': size, 'f i s': size, 'n i f': size, 'm i n': size, 's a s': size, 'f a g': size, 'k a g': size, 'k a f': size, 's a m': size, 'n a f': size, 'n a g': size, 'm i N': size, 's a g': size, 'f i k': size, 'k a m': size, 'n a n': size, 's a f': size, 'n a m': size, 'm a s': size, 'h a f': size, 'h a s': size, 'n a N': size, 'm i s': size, 's a n': size, 's a N': size, 'm i k': size, 'f a N': size, 'm i m': size, 'm a g': size, 'm a f': size, 'f i f': size, 'k a N': size, 'h a n': size, 'm a n': size, 'm a m': size, 'm i f': size})] print "Start: " + str(x) + " on this many: " + str(datamt) messup = TopN(options.top) try: return standard_sample(make_hypothesis, make_data, show=False, N=options.top, save_top="topkaggik.pkl", steps=options.steps) except: return messup
def runme(d,x,datamt,partitions): def make_data(d=d,size=datamt): output = {} for val in d: output.update({val:size}) return [FunctionData(input=[],output=output)] print "Start: " + str(x) + " on this many: " + str(datamt) if options.PARTITION: partitionMCMC(make_data(),partitions) else: return standard_sample(make_hypothesis, make_data, show=True, N=100, save_top="topModel1.pkl", steps=100000)
def run(make_hypothesis, make_data, data_size): """ This out on the DATA_RANGE amounts of data and returns all hypotheses in top count """ if LOTlib.SIG_INTERRUPTED: return set() return standard_sample(make_hypothesis, lambda: make_data(data_size), N=options.TOP_COUNT, steps=options.STEPS, show=False,save_top=None)
def test_proposer(the_class): # We'd probably see better performance on a grammar with fewer # distinct types, but this one is a good testbed *because* it's # complex (lambdas, etc.) from LOTlib.Examples.Magnetism.Simple import grammar, make_data from LOTlib.Hypotheses.LOTHypothesis import LOTHypothesis from LOTlib.Hypotheses.Likelihoods.BinaryLikelihood import BinaryLikelihood from LOTlib.Inference.Samplers.StandardSample import standard_sample class CRHypothesis(BinaryLikelihood, the_class, LOTHypothesis): """ A recursive LOT hypothesis that computes its (pseudo)likelihood using a string edit distance """ def __init__(self, *args, **kwargs ): LOTHypothesis.__init__(self, grammar, display='lambda x,y: %s', **kwargs) def make_hypothesis(**kwargs): return CRHypothesis(**kwargs) standard_sample(make_hypothesis, make_data, save_top=False)
def run(mk_hypothesis, lang, size): """ This out on the DATA_RANGE amounts of data and returns all hypotheses in top count """ if LOTlib.SIG_INTERRUPTED: return set() return standard_sample(lambda: mk_hypothesis(options.LANG, N=options.N, rank=rank, terminals=options.TERMINALS, bound=options.BOUND), lambda: lang.sample_data_as_FuncData(size), N=options.TOP_COUNT, steps=options.STEPS, show=True, skip=200, save_top=None)
def run(mk_hypothesis, size, finite, options, get_data=None, terminals=None): """ This out on the DATA_RANGE amounts of data and returns all hypotheses in top count """ if LOTlib.SIG_INTERRUPTED: return set() return standard_sample(lambda: mk_hypothesis(options.LANG, N=options.N, terminals=terminals), lambda: instance(options.LANG, finite).sample_data_as_FuncData(size) if get_data is None else get_data(size, max_length=options.FINITE), N=options.TOP_COUNT, steps=options.STEPS, show=True, save_top=None, skip=200)
def run(make_hypothesis, make_data, data_size): """ This out on the DATA_RANGE amounts of data and returns all hypotheses in top count """ if LOTlib.SIG_INTERRUPTED: return set() return standard_sample(make_hypothesis, lambda: make_data(data_size), N=options.TOP_COUNT, steps=options.STEPS, show=False, save_top=None)
def run(concept_key, ndata, chainidx): """ Return standard sampling of hypotheses on this amount of data """ if LOTlib.SIG_INTERRUPTED: return None, set() myset = standard_sample(make_hypothesis, lambda: concept2data[concept_key][:ndata], N=options.TOP_COUNT, steps=options.STEPS, show=False, save_top=None) return concept_key, set(myset.get_all())
def test_proposer(the_class): # We'd probably see better performance on a grammar with fewer # distinct types, but this one is a good testbed *because* it's # complex (lambdas, etc.) from LOTlib.Examples.Magnetism.Simple import grammar, make_data from LOTlib.Hypotheses.LOTHypothesis import LOTHypothesis from LOTlib.Hypotheses.Likelihoods.BinaryLikelihood import BinaryLikelihood from LOTlib.Inference.Samplers.StandardSample import standard_sample class CRHypothesis(BinaryLikelihood, the_class, LOTHypothesis): """ A recursive LOT hypothesis that computes its (pseudo)likelihood using a string edit distance """ def __init__(self, *args, **kwargs): LOTHypothesis.__init__(self, grammar, display='lambda x,y: %s', **kwargs) def make_hypothesis(**kwargs): return CRHypothesis(**kwargs) standard_sample(make_hypothesis, make_data, save_top=False)
return (lambda s: (c.match(s) is not None)) def __str__(self): return str(self.value) def __call__(self, *args): try: return LOTHypothesis.__call__(self, *args) except EvaluationException: return None def make_hypothesis(**kwargs): """Define a new kind of LOTHypothesis, that gives regex strings. These have a special interpretation function that compiles differently than straight python eval. """ return RegexHypothesis(**kwargs) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample from LOTlib import break_ctrlc from LOTlib.Miscellaneous import qq for h in break_ctrlc(standard_sample(make_hypothesis, make_data, steps=10000)): print h.posterior_score, h.prior, h.likelihood, qq(h)
def runme(x): print "Start: " + str(x) return standard_sample(make_hypothesis, make_data, show=False, N=options.top, save_top="topModel1.pkl", steps=options.steps)
default="None", help= "A function of a hypothesis we can also print at the start of a line to see things we " "want. E.g. --alsoprint='lambda h: h.get_knower_pattern()' ") (options, args) = parser.parse_args() from LOTlib.Miscellaneous import display_option_summary display_option_summary(options) # ======================================================================================================== # Load the model specified on the command line # ======================================================================================================== from LOTlib.Examples import load_example make_hypothesis, make_data = load_example(options.MODEL) # ======================================================================================================== # Run the example's standard sampler with these parameters # ======================================================================================================== from LOTlib.Inference.Samplers.StandardSample import standard_sample # This is just a wrapper that nicely prints information standard_sample(make_hypothesis, make_data, alsoprint=options.ALSO_PRINT, steps=options.STEPS, skip=options.SKIP)
No inference here -- just random sampling from a grammar. """ example_input = [[], [[]], [[], []], [[[]]]] ## Generate some and print out unique ones seen = set() for i in break_ctrlc(xrange(10000)): x = grammar.generate("START") if x not in seen: seen.add(x) # make the function node version f = LOTHypothesis(grammar, value=x, args=["x"]) print x.log_probability(), x for ei in example_input: print "\t", ei, " -> ", f(ei) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample standard_sample(make_hypothesis, make_data, save_top=False)
(nicelog(1.0 * can_insert_FunctionNode(ni)) - nicelog(newZ)) - nicelog(len(replicating_rules)) + (nicelog(before_same_children) - nicelog(nrk)) + old_lp_below ) return [newt, f - b] if __name__ == "__main__": # test code ## NOTE: IN REAL LIFE, MIX WITH REGENERATION PROPOSAL -- ELSE NOT ERGODIC from LOTlib.Examples.Magnetism.Simple import grammar, make_data from LOTlib.Hypotheses.LOTHypothesis import LOTHypothesis from LOTlib.Hypotheses.Likelihoods.BinaryLikelihood import BinaryLikelihood from LOTlib.Inference.Samplers.StandardSample import standard_sample class IDHypothesis(BinaryLikelihood, InsertDeleteProposal, LOTHypothesis): """ A recursive LOT hypothesis that computes its (pseudo)likelihood using a string edit distance """ def __init__(self, **kwargs): LOTHypothesis.__init__(self, grammar, display="lambda x,y: %s", **kwargs) def make_hypothesis(**kwargs): return IDHypothesis(**kwargs) standard_sample(make_hypothesis, make_data, save_top=False, show_skip=9)
data.append(FunctionData(input=[o], output=f(o), alpha=0.90)) return data # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Hypothesis # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ from LOTlib.Hypotheses.LOTHypothesis import LOTHypothesis from LOTlib.Hypotheses.Likelihoods.BinaryLikelihood import BinaryLikelihood class MyHypothesis(BinaryLikelihood, LOTHypothesis): def __init__(self, grammar=grammar, **kwargs): LOTHypothesis.__init__(self, grammar=grammar, args=['x'], **kwargs) def make_hypothesis(**kwargs): return MyHypothesis(**kwargs) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample standard_sample(make_hypothesis, make_data, save_top=False)
parser.add_option("--model", dest="MODEL", type="string", default="Number", help="Which model do we run? (e.g. 'Number', 'Magnetism.Simple', etc.") parser.add_option("--steps", dest="STEPS", type="int", default=Infinity, help="Draw this many samples") parser.add_option("--skip", dest="SKIP", type="int", default=0, help="Skip this many steps between samples") parser.add_option("--alsoprint", dest="ALSO_PRINT", type="string", default="None", help="A function of a hypothesis we can also print at the start of a line to see things we " "want. E.g. --alsoprint='lambda h: h.get_knower_pattern()' ") (options, args) = parser.parse_args() from LOTlib.Miscellaneous import display_option_summary display_option_summary(options) # ======================================================================================================== # Load the model specified on the command line # ======================================================================================================== from LOTlib.Examples import load_example make_hypothesis, make_data = load_example(options.MODEL) # ======================================================================================================== # Run the example's standard sampler with these parameters # ======================================================================================================== from LOTlib.Inference.Samplers.StandardSample import standard_sample # This is just a wrapper that nicely prints information standard_sample(make_hypothesis, make_data, alsoprint=options.ALSO_PRINT, steps=options.STEPS, skip=options.SKIP)
class MyHypothesis(MultinomialLikelihoodLog, LOTHypothesis): def __init__(self, grammar=None, **kwargs): LOTHypothesis.__init__(self, grammar, display='lambda : %s', **kwargs) self.outlier = -1000 # for MultinomialLikelihood def __call__(self, *args, **kwargs): # we have to mod this to insert the spaces since they aren't part of cons above ret = LOTHypothesis.__call__(self, *args, **kwargs) out = dict() for k, v in ret.items(): out[' '.join(k)] = v return out def make_hypothesis(): return MyHypothesis(grammar) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample standard_sample(make_hypothesis, make_data, save_top=False) #, alsoprint="lambda h: h()")
FunctionData(input=[ "n2", "p2" ], output=True, alpha=alpha)] * n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Hypothesis # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ from LOTlib.Hypotheses.LOTHypothesis import LOTHypothesis from LOTlib.Hypotheses.Likelihoods.BinaryLikelihood import BinaryLikelihood class MyHypothesis(BinaryLikelihood, LOTHypothesis): def __init__(self, **kwargs ): LOTHypothesis.__init__(self, grammar, args=['x', 'y'], **kwargs) def make_hypothesis(**kwargs): return MyHypothesis(**kwargs) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample standard_sample(make_hypothesis, make_data)
def __str__(self): return str(self.value) def __call__(self, *args): try: return LOTHypothesis.__call__(self, *args) except EvaluationException: return None def make_hypothesis(**kwargs): """Define a new kind of LOTHypothesis, that gives regex strings. These have a special interpretation function that compiles differently than straight python eval. """ return RegexHypothesis(**kwargs) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample from LOTlib import break_ctrlc from LOTlib.Miscellaneous import qq for h in break_ctrlc( standard_sample(make_hypothesis, make_data, steps=10000)): print h.posterior_score, h.prior, h.likelihood, qq(h)
return log(1.0 / 10.0) # if undefined, just sample from a base distribution else: return log((1.0 - datum.alpha) / 10.0 + datum.alpha * (response == datum.output)) def sample_output(self, datum): # return a sample of my output given the input in datum if random() < datum.alpha: return self(*datum.input) else: return weighted_sample(WORDS) # uniform sample def get_knower_pattern(self): # compute a string describing the behavior of this knower-level resp = [self(set(sample_sets_of_objects(n, all_objects))) for n in xrange(1, 10)] return "".join([str(word_to_number[x]) if (x is not None and x is not "undef") else "U" for x in resp]) def make_hypothesis(**kwargs): return NumberExpression(grammar, **kwargs) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample standard_sample(make_hypothesis, make_data, save_top=False, alsoprint="lambda h: h.get_knower_pattern()")
class MyHypothesis(MultinomialLikelihoodLog, LOTHypothesis): def __init__(self, grammar=None, **kwargs): LOTHypothesis.__init__(self, grammar, display="lambda : %s", **kwargs) self.outlier = -1000 # for MultinomialLikelihood def __call__(self, *args, **kwargs): # we have to mod this to insert the spaces since they aren't part of cons above ret = LOTHypothesis.__call__(self, *args, **kwargs) out = dict() for k, v in ret.items(): out[" ".join(k)] = v return out def make_hypothesis(): return MyHypothesis(grammar) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample standard_sample(make_hypothesis, make_data, save_top=False) # , alsoprint="lambda h: h()")
FunctionData(input=["n2", "p2"], output=True, alpha=alpha) ] * n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Hypothesis # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ from LOTlib.Hypotheses.LOTHypothesis import LOTHypothesis from LOTlib.Hypotheses.Likelihoods.BinaryLikelihood import BinaryLikelihood class MyHypothesis(BinaryLikelihood, LOTHypothesis): def __init__(self, **kwargs): LOTHypothesis.__init__(self, grammar, args=['x', 'y'], **kwargs) def make_hypothesis(**kwargs): return MyHypothesis(**kwargs) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample standard_sample(make_hypothesis, make_data)
return weighted_sample(WORDS) # uniform sample def get_knower_pattern(self): # compute a string describing the behavior of this knower-level resp = [ self(set(sample_sets_of_objects(n, all_objects))) for n in xrange(1, 10) ] return ''.join([ str(word_to_number[x]) if (x is not None and x is not 'undef') else 'U' for x in resp ]) def make_hypothesis(**kwargs): return NumberExpression(grammar, **kwargs) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Main # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if __name__ == "__main__": from LOTlib.Inference.Samplers.StandardSample import standard_sample standard_sample(make_hypothesis, make_data, save_top=False, alsoprint='lambda h: h.get_knower_pattern()')