def learning(self): mln = MLN(mlnfile=self.mln_path, grammar='StandardGrammar') db = Database(mln, dbfile=self.db_path) for method in ('BPLL', 'BPLL_CG', 'CLL'): print('=== LEARNING TEST:', method, '===') learn(method=method, mln=mln, db=db, verbose=True, multicore=False).run()
def __init__(self, prac, evidence=None, db=None, ignore_unknown_preds=False): self.prac = prac if evidence: pass elif db: evidence = db.evidence else: evidence = {} Database.__init__(self, prac.mln, evidence=evidence, dbfile=None, ignore_unknown_preds=ignore_unknown_preds)
def test_learning_taxonomies(): p = '$PRACMLN_HOME/examples/taxonomies/taxonomies.pracmln' mln = MLN(mlnfile=('%s:senses_and_roles.mln' % p), grammar='PRACGrammar') mln.write() dbs = Database.load(mln, dbfiles='%s:training.db' % p) for method in ('DPLL', 'DBPLL_CG', 'DCLL'): for multicore in (True, False): print '=== LEARNING TEST:', method, '===' learn(method=method, mln=mln, db=dbs, verbose=True, multicore=multicore, epreds='is_a', discr_preds=EVIDENCE_PREDS).run()
def finalgraph(self, filename=None): finaldb = Database(self.prac.mln) for step in self.inference_steps: for db in step.output_dbs: for atom, truth in list(db.evidence.items()): if truth == 0: continue _, predname, args = self.prac.mln.logic.parseLiteral(atom) if predname in self.prac.roles.union( ['has_sense', 'action_core', 'achieved_by']): finaldb << atom # finaldb.write(sys.stdout, color=True) g = Digraph(format='svg', engine='dot') g.attr('node', shape='box', style='filled') for res in finaldb.query('action_core(?w, ?a) ^ has_sense(?w, ?s)'): actioncore = res['?a'] sense = res['?s'] predname = 'action_core' g.node(actioncore, fillcolor='#bee280') g.node(sense) g.edge(actioncore, sense, label='is_a') roles = self.prac.actioncores[actioncore].roles for role in roles: for res in db.query('{}(?w, {}) ^ has_sense(?w, ?s)'.format( role, actioncore)): sense = res['?s'] g.node(sense) g.edge(actioncore, sense, label=role) for res in finaldb.query('achieved_by(?a1, ?a2)'): a1 = res['?a1'] a2 = res['?a2'] g.node(a1, fillcolor='#bee280') g.node(a2, fillcolor='#bee280') g.edge(a1, a2, label='achieved_by') actioncore = a2 roles = self.prac.actionroles[actioncore].roles for role in roles: for res in db.query('{}(?w, {}) ^ has_sense(?w, ?s)'.format( role, actioncore)): sense = res['?s'] g.node(sense) g.edge(actioncore, sense, label=role) return render_gv(g, filename)
def test_GSMLN(): # mln = MLN(grammar='GSMLNGrammar') # mln << 'residue(id, profile)' # mln << 'partners(id, id)' # f = "residue(a, $pa) v residue(b, $pb) => partners(a,b)" # # f = "((a(x) ^ b(x)) v (c(x) ^ !(d(x) ^ e(x) ^ g(x)))) => f(x)" # # f = "(a(x) v (b(x) ^ c(x))) => f(x)" # f = mln.logic.grammar.parse_formula(f) # f.print_structure() # print(list(f.literals())) # g = "partners(id, id)" # g = mln.logic.grammar.parse_predicate(g) # print(g) # print(mln.predicates) mln = MLN(mlnfile='smokers.mln', grammar='GSMLNGrammar') # mln.write() # print(mln.predicates) dbs = Database.load(mln, dbfiles='smokers.db') # dbs[0].write() # print(mln.formulas[0].neural) # print(mln.formulas[0].cnf()) # print(mln.nnformulas[0].idx) # print(mln.domains) # print(dbs[0].domains) # mln.formulas[1].print_structure() # mrf = mln.ground(dbs[0]) # grounder = DefaultGroundingFactory(mrf, simplify=False, unsatfailure=True, verbose=False, cache=0) # for f in grounder.itergroundings(): # print(f) # print((mrf.gndatoms)) # mln = MLN(grammar='GSMLNGrammar') # mln << 'Cancer(&person)' # mln << 'Friends(&person,&person)' # mln << 'Smokes(&person)' # f = 'Smokes($x) => Cancer($x)' # g = 'Friends($x,$y) => (Smokes($x) <=> Smokes($y))' # print(mln.logic.grammar.parse_formula(f)) # mln.formula(f) # mln.formula(g) # print(mln.predicates) # print(mln.formulas) # mln.formulas[0].print_structure() # print(mln.domains) # print(mln.formulas[0].cnf()) # this uses the method from base.py learned_mln = mln.learn(databases=dbs, method=GSMLN_L, verbose=True)
def test_inference_smokers(): p = os.path.join(locs.examples, 'smokers', 'smokers') # mln = MLN(mlnfile=('%s:wts.pybpll.smoking-train-smoking.mln' % p), # grammar='StandardGrammar') # db = Database(mln, dbfile='%s:smoking-test-smaller.db' % p) print(p) mln = MLN(mlnfile=('%s.mln' % p), grammar='StandardGrammar') db = Database(mln, dbfile='%s.db' % p) for method in ('GibbsSampler', ): print('=== INFERENCE TEST:', method, '===') query(queries='Cancer,Smokes,Friends', method=method, mln=mln, db=db, verbose=True, multicore=False).run()
def test_learning_smokers(arg='.', disp=False): pth = os.path.join(arg, 'smoking.mln') mln = MLN(mlnfile=pth, grammar='StandardGrammar') pth = os.path.join(arg, 'smoking-train.db') db = Database(mln, dbfile=pth) for method in ('BPLL', 'BPLL_CG', 'CLL'): for multicore in (True, False): print('=== LEARNING TEST:', method, '===') if disp: query(queries='Cancer,Smokes,Friends', method=method, mln=mln, db=db, verbose=disp, multicore=multicore).run().write() else: learn(method=method, mln=mln, db=db, verbose=disp, multicore=multicore).run()
def inference(self, inference_query): mln = MLN(mlnfile=self.mln_path, grammar='StandardGrammar') db = Database(mln, dbfile=self.db_path) for method in [ 'EnumerationAsk' # 'MC-SAT', # 'WCSPInference', # 'GibbsSampler' ]: print('=== INFERENCE TEST:', method, '===') result = query(queries=inference_query, method=method, mln=mln, db=db, verbose=True, multicore=False).run() print(result) if result: return result else: return []
def test_inference_smokers(arg='.', disp=False): pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln') mln = MLN(mlnfile=pth, grammar='StandardGrammar') pth = os.path.join(arg, 'smoking-test-smaller.db') db = Database(mln, dbfile=pth) for method in ('EnumerationAsk', 'MC-SAT', 'WCSPInference', 'GibbsSampler'): for multicore in (False, True): print('=== INFERENCE TEST:', method, '===') if disp: query(queries='Cancer,Smokes,Friends', method=method, mln=mln, db=db, verbose=disp, multicore=multicore).run().write() else: query(queries='Cancer,Smokes,Friends', method=method, mln=mln, db=db, verbose=disp, multicore=multicore).run()