def ground_problog_program(program): """ Grounds a Problog program using the problog library. """ lf = LogicFormula.create_from(program, avoid_name_clash=True, keep_order=True, label_all=True) return lf.to_prolog()
def evaluate_custom_weights(self, eval_name=None): class TestSemiringProbabilityNSP(SemiringProbability): def is_nsp(self): return True program = """ 0.25::a. query(a). """ pl = PrologString(program) lf = LogicFormula.create_from(pl, label_all=True, avoid_name_clash=True) semiring = TestSemiringProbabilityNSP() kc_class = get_evaluatable(name=eval_name, semiring=semiring) kc = kc_class.create_from(lf) a = Term('a') # without custom weights results = kc.evaluate(semiring=semiring) self.assertEqual(0.25, results[a]) # with custom weights weights = {a: 0.1} results = kc.evaluate(semiring=semiring, weights=weights) self.assertEqual(0.1, results[a])
def main_mpe_semiring(args): inputfile = args.inputfile init_logger(args.verbose) if args.web: result_handler = print_result_json else: result_handler = print_result if args.output is not None: outf = open(args.output, 'w') else: outf = sys.stdout with Timer("Total"): try: pl = PrologFile(inputfile) lf = LogicFormula.create_from(model, label_all=True) prob, facts = mpe_semiring(lf, args.verbose) result_handler((True, (prob, facts)), outf) except Exception as err: trace = traceback.format_exc() err.trace = trace result_handler((False, err), outf)
def find_all_prob(): ps = "" with open("prolog/problog_predicates.pl", "r") as f: for line in f: ps += line # Calcolo probabilità tramite problog ps += "query(infect(_))." p = PrologString(ps) dbp = engine.prepare(p) lf = LogicFormula.create_from(p) # ground the program dag = LogicDAG.create_from(lf) # break cycles in the ground program cnf = CNF.create_from(dag) # convert to CNF ddnnf = DDNNF.create_from(cnf) # compile CNF to ddnnf r = ddnnf.evaluate() # Siccome Problog restituisce un dizionario struttrato in questa maniera: # {query(infect(2)): 0.67, query(infect(3)): 0.8, ...} # Bisogna estrarre ogni id dalla chiave nel seguente modo items = [] if len(RedNode.query.all()) > 0: for key, value in r.items(): start = "infect(" end = ")" result = str(key)[len(start):-len(end)] try: u = User.query.get(int(result)) items.append((u, value)) except ValueError: continue return items
def call_theorem_prover(theorem_prover, instance_id, question_id, theory, assertion, gold_label, print_log=True): """Function that takes a single theory/assertion example and runs it through the theorem prover to obtain a label. Returns the obtained label, elapsed time to solve it, and exception returned by the engine, if any. """ obtained_result = False millisecs_elapsed = 0 if print_log: print("=======ORIGINAL THEORY=========") theory_as_txt = theory.program(theorem_prover) if print_log: print(theory_as_txt) theory.preprocess(theorem_prover) theory_as_txt = theory.program(theorem_prover) if theorem_prover == "problog": assertion_lf = assertion.logical_form(theorem_prover, False) assertion_lf = f"query({assertion_lf})." program = f"{theory_as_txt}\n{assertion_lf}" if print_log: print("=======PROGRAM FROM PREPROCESSED THEORY=========") print(program) print("=======EXPECTED LABEL=========") print(f" {gold_label}") start_millisecs = current_milli_time() try: lf = LogicFormula.create_from(program) # ground the program dag = LogicDAG.create_from( lf) # break cycles in the ground program sdd = SDD.create_from(dag) result = sdd.evaluate() end_millisecs = current_milli_time() elapsed_millisecs = end_millisecs - start_millisecs result_tuples = [(k, v) for k, v in result.items()] obtained_result = result_tuples[0][1] != float(0) return obtained_result, elapsed_millisecs, None except (NegativeCycle, NonGroundProbabilisticClause, UnknownClause) as e: end_millisecs = current_milli_time() elapsed_millisecs = end_millisecs - start_millisecs if print_log: print( f"!!!Encountered Exception at instance id {instance_id}, question id {question_id}: {e}" ) exception_name = str(type(e)).lstrip("<class '").rstrip("'>") return None, elapsed_millisecs, exception_name return obtained_result, elapsed_millisecs, None
def ad_atom_duplicate(self, eval_name=None): """ This test must pickup the case where during the transformation, additional _extra atoms are created because add_atom(..., cr_extra=True) is used instead of cr_extra=False. """ program = """ 0.2::a ; 0.8::b. query(a). query(b). """ pl = PrologString(program) lf = LogicFormula.create_from(pl, label_all=True, avoid_name_clash=True) semiring = SemiringProbability() kc_class = get_evaluatable(name=eval_name, semiring=semiring) kc = kc_class.create_from(lf) # type: LogicFormula self.assertEqual(3, kc.atomcount)
def run_theory_in_problog(theory, assertion): """Run the given theory and assertion through ProbLog engine to obtain a True/False label. If an exception is encountered, return None so that this example will not be part of output.""" theorem_prover = "problog" try: program = theory.program(theorem_prover, assertion) lf = LogicFormula.create_from(program) # ground the program dag = LogicDAG.create_from(lf) # break cycles in the ground program sdd = SDD.create_from(dag) result = sdd.evaluate() result_tuples = [(k, v) for k, v in result.items()] if len(result_tuples) == 0: return False return result_tuples[0][1] != 0.0 except (NegativeCycle, NonGroundProbabilisticClause, UnknownClause) as e: return None return None
def find_user_prob(uid): ps = "" with open("prolog/problog_predicates.pl", "r") as f: for line in f: ps += line # Pulizia dei nodi dinamici date/1 all'interno di problog p = PrologString(ps) dbp = engine.prepare(p) query = Term("clean") res = engine.query(dbp, query) # Calcolo probabilità tramite problog ps += "query(infect(" + str(uid) + "))." p = PrologString(ps) dbp = engine.prepare(p) lf = LogicFormula.create_from(p) # ground the program dag = LogicDAG.create_from(lf) # break cycles in the ground program cnf = CNF.create_from(dag) # convert to CNF ddnnf = DDNNF.create_from(cnf) # compile CNF to ddnnf r = ddnnf.evaluate() # Salvataggio nel database SQLite della data del nodo rosso più vecchio con cui è stato a contatto term = Term("date", None) database = problog_export.database # Database interno di Problog dove vengono salvati i fatti con assertz() node_key = database.find(term) if node_key is not None: node = database.get_node(node_key) dates = node.children.find( term.args) # Tutti i fatti date/1 inseriti con assertz/1 vals = [] if dates: for date in dates: n = database.get_node(date) vals.append(int(n.args[0])) min_val = min(vals) # Trova la data (in millisecondi) minima u = User.query.get(uid) u.oldest_risk_date = min_val db.session.commit() return r
def main(): p = PrologString(""" increaseOsteoblasts :- calcium. 0.5::\+increaseOsteoblasts :- calcium, bispho. reduceOsteoclasts :- bispho. 1.0::\+reduceOsteoclasts :- calcium , bispho. osteoprosis :- initialOsteoprosis. 0.85::\+osteoprosis :- reduceOsteoclasts. % Bisphosphonates 0.15::\+osteoprosis :- increaseOsteoblasts. % Calcium % Prior probabilities 0.5::calcium. 0.5::bispho. 0.5::initialOsteoprosis. % Query probability of effect evidence(initialOsteoprosis, true). evidence(calcium, true). evidence(bispho, false). query(osteoprosis). """) #1.3: Create the CNF of the problog lf = LogicFormula.create_from(p,avoid_name_clash=True, keep_order=True, label_all=True) # ground the program print("Ground program") print(LogicFormula.to_prolog(lf)) dag = LogicDAG.create_from(lf,avoid_name_clash=True, keep_order=True, label_all=True) # break cycles in the ground program cnf = CNF.create_from(dag) # convert to CNF print(CNF.to_dimacs(cnf)) ddnnf = DDNNF.create_from(cnf) # compile CNF to ddnnf test = DDNNF.get_weights(ddnnf) print(test) print(ddnnf.evaluate()) #3.1: Create 4 interpretations print("--Create 4 interpretations--") interpretations = create_interpretations(p_without_evidence, 4) for i in interpretations: print(i) #3.2: Create 100, 1000, 10000 interpretations and estimate p_n print("--Estimate parameters--") estimate_parameters(100) estimate_parameters(1000) estimate_parameters(10000)
# p2 = PrologString(""" person(a). person(b). person(c). 0.2::stress(X) :- person(X). 0.1::friends(X,Y) :- person(X), person(Y). 0.3::smokes(X) :- stress(X). 0.4::smokes(X) :- friends(X,Y), smokes(Y). evidence(friends(a,b), true). evidence(friends(a,c), true). query(smokes(a)). """) lf2 = LogicFormula.create_from(p2, avoid_name_clash=True, keep_order=True, label_all=True) # print(LogicFormula.to_prolog(lf2)) dag2 = LogicDAG.create_from(lf2, avoid_name_clash=False, keep_order=True, label_all=True) # # print(dag2) # # print(LogicFormula.to_prolog(dag2)) cnf2 = CNF.create_from(dag2) # # print(cnf2.to_dimacs(weighted=True, invert_weights=True)) ddnnf2 = DDNNF.create_from(cnf2) #print(ddnnf2.evaluate()) # # import PyBool_public_interface as Bool
m = model.read() times = [] door_num = range(3, 10) for i in door_num: start = timeit.default_timer() model = m.format(door_num=i) p = PrologString(model) formula = get_evaluatable().create_from(p) print(formula.evaluate()) stop = timeit.default_timer() times.append(stop - start) for i in door_num: model = m.format(door_num=i) p = PrologString(model) lf = LogicFormula.create_from(p) lfs.append(lf) dag = LogicDAG.create_from(lf) dags.append(dag) cnf = CNF.create_from(dag) cnfs.append(cnf) for i in door_num: model = m.format(door_num=i) p = PrologString(model) lf = LogicFormula.create_from(p) lfs.append(lf) dag = LogicDAG.create_from(lf) dags.append(dag) cnf = CNF.create_from(dag) cnfs.append(cnf)
from problog.program import PrologString from problog.formula import LogicFormula, LogicDAG from problog.logic import Term from problog.ddnnf_formula import DDNNF from problog.cnf_formula import CNF p = PrologString(""" coin(c1). coin(c2). 0.4::heads(C); 0.6::tails(C) :- coin(C). win :- heads(C). evidence(heads(c1), false). query(win). query(coin(X)). """) lf = LogicFormula.create_from(p) # ground the program dag = LogicDAG.create_from(lf) # break cycles in the ground program cnf = CNF.create_from(dag) # convert to CNF ddnnf = DDNNF.create_from(cnf) # compile CNF to ddnnf results = ddnnf.evaluate() print(results)
def evaluate_custom_weights(self, eval_name=None): class TestSemiringProbabilityNSP(SemiringProbability): def is_nsp(self): return True def pos_value(self, a, key=None): if isinstance(a, tuple): return float(a[0]) else: return float(a) def neg_value(self, a, key=None): if isinstance(a, tuple): return float(a[1]) else: return 1 - float(a) program = """ 0.25::a. query(a). """ pl = PrologString(program) lf = LogicFormula.create_from(pl, label_all=True, avoid_name_clash=True) semiring = TestSemiringProbabilityNSP() kc_class = get_evaluatable(name=eval_name, semiring=semiring) kc = kc_class.create_from(lf) a = Term('a') # without custom weights results = kc.evaluate(semiring=semiring) self.assertEqual(0.25, results[a]) # with custom weights weights = {a: 0.1} results = kc.evaluate(semiring=semiring, weights=weights) self.assertEqual(0.1, results[a]) # with custom weights weights = {a: (0.1, 0.1)} results = kc.evaluate(semiring=semiring, weights=weights) self.assertEqual(0.5, results[a]) # with custom weights based on index weights = {kc.get_node_by_name(a): 0.2} results = kc.evaluate(semiring=semiring, weights=weights) self.assertEqual(0.2, results[a]) # Testing with weight on node 0 (True) weights = {0: 0.3, a: (0.1, 0.1)} results = kc.evaluate(semiring=semiring, weights=weights) self.assertEqual(0.5, results[a]) # Testing query on node 0 (True) class TestSemiringProbabilityIgnoreNormalize(SemiringProbabilityNSPCopy ): def normalize(self, a, z): return a weights = {0: (0.3, 0.7), a: (0.1, 0.1)} results = kc.evaluate( index=0, semiring=TestSemiringProbabilityIgnoreNormalize(), weights=weights) self.assertEqual(0.06, results)
def main(): program = PrologFile('model.txt') formula = LogicFormula.create_from(program) modelEvaluatable = get_evaluatable().create_from(formula) print modelEvaluatable.evaluate()
return new_weights, iters # Read in the progam with tunable parameters #ground_progam = file_to_string("test_learn.pl") # Assign to each tunable parameter a random initialization value #ground_progam_tunable = init_tunable(ground_progam) # Write back the prolog file with random initialized tunable probabilities #f = open('test_learn_tunable.pl', 'w') #f.write(ground_progam_tunable) pf = PrologFile("test_learn_tunable.pl") formula = LogicFormula.create_from(pf) sdd = SDD.create_from(formula) queries = [ Term('stress', Term('a')), Term('stress', Term('b')), Term('stress', Term('c')), Term('smokes', Term('a')), Term('smokes', Term('b')), Term('smokes', Term('c')) ] # File which holds all the evidence examples examples = file_to_string('data.pl') # Retrieve the amount of interpretations that has been specified
# 0.4::b. # 0.3::a :- b. # 0.5::b :- a. # query(a). # evidence(b). # """) p = PrologString(""" 0.4::heads(1). 0.7::heads(2). 0.5::heads(3). win :- heads(1). win :- heads(2), heads(3). query(win). """) lf = LogicFormula.create_from(p, avoid_name_clash=True, keep_order=True, label_all=True) # ground the program # print(lf) # print(LogicFormula.to_prolog(lf)) dag = LogicDAG.create_from( lf, avoid_name_clash=True, keep_order=True, label_all=True) # break cycles in the ground program # print(dag) # print(LogicFormula.to_prolog(dag)) cnf = CNF.create_from(dag) # convert to CNF # for clause in cnf._clauses: # print(clause) ddnnf = DDNNF.create_from(cnf) # compile CNF to ddnnf # Outcome for the a/b thing with query(a) is 0,2+(0,8*0,3*0,4) # but if evidence(b) : (0,2*0,4+0,2*0,5*0,6+0,4*0,3*0,8) / (0,4+0,6*0,5*0,2) [Formula for conditional probability P(a|b) = ...] # For coin thing : 1-0,6*0,3*0,5-0,6*0,3*0,5-0,6*0,7*0,5 = 0,61