def _run_tests():
    from problog.program import PrologString
    from problog.engine import DefaultEngine
    p = PrologString("""
    coin(c1). coin(c2).
    0.4::heads(C); 0.6::tails(C) :- coin(C).
    win :- heads(C).
    """)

    engine = DefaultEngine()
    db = engine.prepare(p)

    # tasks = [
    #     ([Term("win")], []),
    #     ([Term("win")], [(Term("heads", Term("c1")), True)]),
    #     ([Term("win")], [(Term("heads", Term("c1")), False)]),
    # ]
    # for q,e in tasks:
    #     qs.prepare_query(q, e)
    # print(qs.evaluate_queries())

    qs = QuerySession(engine, db)
    inline_queries = [" win | heads(c1).", "win | \+heads(c1).", "win."]
    for iq in inline_queries:
        q, e = qs.transform_inline_query(PrologString(iq)[0])
        qs.prepare_query(q, e)

    result = qs.evaluate_queries()
    print(result)
def load_data(filename, engine=None):
    if engine is None:
        engine = DefaultEngine()
        engine.prepare(PrologString(':- unknown(fail).'))

    data = read_data(filename)

    background_pl = list(PrologString('\n'.join(data.get('BACKGROUND', []))))

    language = CModeLanguage.load(data)

    background_pl += language.background

    examples = data.get('', [])
    examples_db = [
        engine.prepare(background_pl + list(PrologString(example_pl)))
        for example_pl in examples
    ]
    instances = Interpretations(
        [Instance(example_db) for example_db in examples_db], background_pl)

    neg_examples = data.get('!', [])
    #print("the negative examples are {}".format("".join(neg_examples)))
    neg_examples_db = [
        engine.prepare(background_pl + list(PrologString(neg_example_pl)))
        for neg_example_pl in neg_examples
    ]
    neg_instances = Interpretations(
        [Instance(neg_example_db) for neg_example_db in neg_examples_db],
        background_pl)

    return language, instances, neg_instances, engine
Example #3
0
def run_eval_neg(filename, **other):
    from .data import read_data, concat, Interpretations, Instance
    from problog.program import PrologString
    # from problog.logic import AnnotatedDisjunction, Clause
    print("starting eval")
    data = read_data(filename)

    rules = concat(data['RULES'])

    engine = DefaultEngine()
    engine.prepare(PrologString(':- unknown(fail).'))

    background_pl = concat(data.get('BACKGROUND', []))

    examples = data.get('!', [])
    examples_db = [
        engine.prepare(PrologString(background_pl + example_pl))
        for example_pl in examples
    ]
    instances = Interpretations(
        [Instance(example_db) for example_db in examples_db],
        PrologString(background_pl))

    for rule in PrologString(rules):
        clause = Clause.from_logic(rule)
        print('Evaluation of rule:', clause)
        if not clause.validate(instances, engine):
            print('\tRule is invalid')
            #for ex, success in enumerate(clause.successes):
            #    if not success:
            #        print('\t\tExample %s:' % (ex + 1), success)

        else:
            print('\tRule is valid.')
    def __init__(self, model_string, networks, caching=False, saving=False):
        self.networks = dict()
        for network in networks:
            self.networks[network.name] = network
            network.model = self
        self.model_string = self.parse(model_string)
        self.engine = problog.engine.DefaultEngine(
        ), problog.engine.DefaultEngine()
        train_model = self.engine[0].prepare(
            PrologString(self.model_string[0] + '\n' + self.model_string[1]))
        test_model = self.engine[1].prepare(
            PrologString(self.model_string[0] + '\n' + self.model_string[2]))
        self.problog_model = train_model, test_model

        for network in self.networks.values():
            network.register_external(*self.problog_model)
        self.sdd_manager = None
        self.parameters, self.ADs = extract_parameters(train_model)
        self.caching = caching
        self.saving = saving
        self.n = 0
        self.obj_store = list()
        if caching:
            self.sdd_cache = dict()
        if saving:
            import os
            if not os.path.exists('sdd/'):
                os.makedirs('sdd/')
Example #5
0
def run_ground(model):
    """Ground the program given by model and return an SVG of the resulting formula."""
    model = model[0]
    knowledge = LogicFormula

    #from problog.engine import EngineLogger, SimpleEngineLogger
    #EngineLogger.setClass(SimpleEngineLogger)

    try:
        formula = knowledge.createFrom(PrologString(model))

        handle, filename = tempfile.mkstemp('.dot')
        with open(filename, 'w') as f:
            f.write(formula.toDot())
        print(formula)
        result = subprocess.check_output(['dot', '-Tsvg',
                                          filename]).decode('utf-8')
        content_type = 'application/json'
        #EngineLogger.setClass(None)
        return 200, content_type, json.dumps({
            'svg': result,
            'txt': str(formula)
        })
    except Exception as err:
        #EngineLogger.setClass(None)
        return process_error(err)
Example #6
0
 def __init__(self, model_str, probabilistic_data=False, relational_data=False):
     # parse the Prolog string
     pl_model_sr = PrologString(model_str)
     # compile the Prolog model
     self.problog_knowledge_sr = get_evaluatable().create_from(pl_model_sr)
     self.probabilistic_data = probabilistic_data
     self.relational_data = relational_data
Example #7
0
def run_test_with_query_instance():

    from problog.program import PrologString
    from problog.engine import DefaultEngine
    from problog.logic import Term, Var
    p = PrologString("""
    coin(c1). coin(c2).
    0.4::heads(C); 0.6::tails(C) :- coin(C).
    win :- heads(C).
    """)
    from .formula_wrapper import FormulaWrapper

    s_qs, s_evs = ([Term("win")], [(Term("heads",
                                         Term("c1")), False)])  # For now

    engine = DefaultEngine()
    probsr = SemiringProbability()
    fw = FormulaWrapper(engine.prepare(p))
    qobj = AMCQuery(s_qs, s_evs, fw, target_class=DDNNF, semiring=probsr)

    qobj.ground(engine)
    result, ground_evidence = qobj.evaluate(engine)
    print("evidence: ", ground_evidence)
    for r in result:
        print(r)
    print("---")
Example #8
0
def main():
    p = PrologString("""
    mother_child(trude, sally).
    
    father_child(tom, sally).
    father_child(tom, erica).
    father_child(mike, tom).
    
    sibling(X, Y) :- parent_child(Z, X), parent_child(Z, Y).
    
    parent_child(X, Y) :- father_child(X, Y).
    parent_child(X, Y) :- mother_child(X, Y).
    """)

    sibling = Term('sibling')
    query_term = sibling(None, None)
    engine = DefaultEngine()

    # prepare the model for querying
    model_db = engine.prepare(p)  # This compiles the Prolog model into an internal format.
    # This step is optional, but it might be worthwhile if you
    #  want to query the same model multiple times.

    times_query = test_query_method1(engine, model_db, query_term)
    times_query_extended = test_query_method2(engine, model_db, query_term)

    print("average duration query:", statistics.mean(times_query), "seconds")
    print("average duration query:", statistics.mean(times_query_extended), "seconds")
Example #9
0
def run_problog(rules, target_rule, target_subject):
    model_string = problog_model(rules)
    model_string += "query({}(\'{}\',_)).".format(target_rule, target_subject)
    print(model_string)
    result = get_evaluatable().create_from(
        PrologString(model_string)).evaluate()
    return result
Example #10
0
def find_all_prob():
    ps = ""
    with open("prolog/problog_predicates.pl", "r") as f:
        for line in f:
            ps += line

    # Calcolo probabilità tramite problog
    ps += "query(infect(_))."
    p = PrologString(ps)
    dbp = engine.prepare(p)
    lf = LogicFormula.create_from(p)  # ground the program
    dag = LogicDAG.create_from(lf)  # break cycles in the ground program
    cnf = CNF.create_from(dag)  # convert to CNF
    ddnnf = DDNNF.create_from(cnf)  # compile CNF to ddnnf
    r = ddnnf.evaluate()

    # Siccome Problog restituisce un dizionario struttrato in questa maniera:
    # {query(infect(2)): 0.67, query(infect(3)): 0.8, ...}
    # Bisogna estrarre ogni id dalla chiave nel seguente modo
    items = []
    if len(RedNode.query.all()) > 0:
        for key, value in r.items():
            start = "infect("
            end = ")"
            result = str(key)[len(start):-len(end)]
            try:
                u = User.query.get(int(result))
                items.append((u, value))
            except ValueError:
                continue
    return items
Example #11
0
 def __init__(self,
              pos_examples,
              neg_examples,
              extra_terms=[],
              target_name='target'):
     # Define the language of terms
     self.target = Term(target_name)
     self.equal = Term('equal')
     self.pos_examples = pos_examples
     self.neg_examples = neg_examples
     self.examples = pos_examples + neg_examples
     self.extra_terms = extra_terms
     #TODO: check extra terms arity, if greater than target arity, create more variables
     n_target_variables = len(self.examples[0])
     target_variables_names = [
         'X' + str(i) for i in range(1, n_target_variables + 1)
     ]
     self.X = list(map(Var, target_variables_names))
     constants = set()
     for example in self.examples:
         constants.update(example)
     self.c = list(map(Term, [str(constant) for constant in constants]))
     # Initialize the logic program
     self.pl = SimpleProgram()
     self.pl += self.equal(self.X[0], self.X[0])
     self.pl += self.target(*tuple(self.X))
     for extra_term in self.extra_terms:
         self.pl += PrologString(extra_term)
     self.predicates = [self.equal]  # + list(extra_terms.keys())
     self.engine = DefaultEngine()
     self.db = self.engine.prepare(self.pl)
     self.original_rule = list(self.pl)[1]
     self.new_body_literals = []
     print(list(self.pl))
Example #12
0
    def evaluate_custom_weights(self, eval_name=None):
        class TestSemiringProbabilityNSP(SemiringProbability):
            def is_nsp(self):
                return True

        program = """
                    0.25::a.
                    query(a).
                """
        pl = PrologString(program)
        lf = LogicFormula.create_from(pl,
                                      label_all=True,
                                      avoid_name_clash=True)
        semiring = TestSemiringProbabilityNSP()
        kc_class = get_evaluatable(name=eval_name, semiring=semiring)
        kc = kc_class.create_from(lf)
        a = Term('a')

        # without custom weights
        results = kc.evaluate(semiring=semiring)
        self.assertEqual(0.25, results[a])

        # with custom weights
        weights = {a: 0.1}
        results = kc.evaluate(semiring=semiring, weights=weights)
        self.assertEqual(0.1, results[a])
Example #13
0
def run_tests_with_static_methods():
    from problog.program import PrologString
    from problog.engine import DefaultEngine
    from problog.logic import Term, Var
    p = PrologString("""
    coin(c1). coin(c2).
    0.4::heads(C); 0.6::tails(C) :- coin(C).
    win :- heads(C).
    """)

    qs, evs = ([Term("win")], [(Term("heads", Term("c1")), False)])  # For now

    engine = DefaultEngine()
    db = engine.prepare(p)
    labels = (LogicFormula.LABEL_QUERY, LogicFormula.LABEL_EVIDENCE_POS,
              LogicFormula.LABEL_EVIDENCE_NEG)
    lf = LogicFormula()
    lf = AMCQuery.ground_query_evidence(engine, db, qs, evs, lf, labels)

    circuit = AMCQuery.compile_to_circuit(lf, "ddnnf")
    prob_sr = SemiringProbability()
    results, ground_evidence = AMCQuery.evaluate_circuit(
        circuit, labels, prob_sr)
    print("evidence: ", ground_evidence)
    for r in results:
        print(r)
    print("---")
Example #14
0
    def test_anonymous_variable(self):
        """Anonymous variables are distinct"""

        program = """
            p(_,X,_) :- X = 3.

            q(1,2,3).
            q(1,2,4).
            q(2,3,5).
            r(Y) :- q(_,Y,_).

        """

        engine = DefaultEngine()
        db = engine.prepare(PrologString(program))
        self.assertEqual(
            list(
                map(
                    list,
                    engine.query(
                        db, Term("p", Constant(1), Constant(3), Constant(2))),
                )),
            [[Constant(1), Constant(3), Constant(2)]],
        )

        self.assertEqual(list(map(list, engine.query(db, Term("r", None)))),
                         [[2], [3]])
Example #15
0
    def test_cycle_goodcode(self):
        N = 20
        program = self.program_v1[:]

        for i in range(0, N):
            seed = str(random.random())[2:]
            random.seed(seed)
            random.shuffle(program)
            txt = "\n".join(program)
            f = DefaultEngine(label_all=True).ground_all(PrologString(txt))
            paths = list(list_paths(f))

            edges = set()
            for p in paths:
                for i in range(0, len(p) - 1):
                    edges.add((int(p[i]), int(p[i + 1])))
            edges = list(sorted(edges))

            # if (edges != self.edges) :
            #     with open('cycle_error.pl', 'w') as f :
            #         print(txt, file=f)
            #     with open('cycle_error.dot', 'w') as f :
            #         print('digraph CycleError {', file=f)
            #         for edge in edges :
            #             print('%s -> %s;' % edge, file=f)
            #         print('}', file=f)

            self.assertCollectionEqual(self.edges,
                                       edges,
                                       msg="Test failed for random seed %s" %
                                       seed)
def run_dtproblog(node_size):
    model_text = generate_program(node_size)
    print(model_text)
    program = PrologString(model_text)
    decisions, score, statistics = dtproblog(program)
    print("++++++++ Program for node size = "+ str(node_size)+"++++++++")
    for name, value in decisions.items():
        print ('%s: %s' % (name, value))
Example #17
0
def runproblogsampling(s, n=5, output='html'):
    model = PrologString(s)
    samples = plsample.sample(model, n=n, tuples=True)
    result = ''
    for sample in samples:
        result += ','.join(
            str(Term(query[0], *query[1:-1])) for query in sample) + '<br/>'
    if output == 'html':
        return '<pre>{}</pre>'.format(result)
    return result
Example #18
0
def main(file):
    pl = PrologString(file)
    # pl = PrologFile(file)
    operator = SumOperator()
    result = solve(pl, WmiSemiring(operator.neutral_element), operator)
    for k, v in result.items():
        (formula, integral) = v
        # print('%s(%s): %s\n%s' % (operator, k, integral, formula))
        # print(formula.key)
    return integral
Example #19
0
 def test_implementation_predicate(self):
     model = (self.module_import + self.load_csv +
              """magic_models:X :-{}(magic_tables,
                            [column('T1', 2), column('T1', 5)],
                            [column('T1', 3)],
                            X).
     query(magic_models:{}(_)).
     """.format(self.input_predicate, self.output_predicate))
     result = get_evaluatable().create_from(PrologString(model)).evaluate()
     self.assertEqual(len(result), 1)
Example #20
0
def learn_model(fold_i):
    fold_n = fold_i + 1
    print(f"Learning fold {fold_n} @ {datetime.now()}")

    model = get_untrained_model(fold_n)

    score, weights, atoms, iteration, lfi_problem = lfi.run_lfi(
        PrologString(model), examples=[])
    learned_model = lfi_problem.get_model()
    with open(pjoin("models", f"model{fold_n}.pl"), "w") as f:
        f.write(learned_model + "\n")
Example #21
0
    def test_compare(self) :
        """Comparison operator"""

        program = """
            morning(Hour) :- Hour >= 6, Hour =< 10.
        """

        engine = DefaultEngine()
        db = engine.prepare( PrologString(program) )

        self.assertEqual( list(map(list,engine.query(db, Term('morning', Constant(8)) ))), [[8]])
Example #22
0
def runproblogsampling(s, n=5, output="html"):
    model = PrologString(s)
    samples = plsample.sample(model, n=n, tuples=True)
    result = ""
    for sample in samples:
        result += (
            ",".join(str(Term(query[0], *query[1:-1])) for query in sample) + "<br/>"
        )
    if output == "html":
        return "<pre>{}</pre>".format(result)
    return result
Example #23
0
    def get_timestamps(self):
        model = PrologString(self.model + '\n\nquery(allTimeStamps(TPs)).')

        knowledge = get_evaluatable().create_from(model)

        timestamps = [
            term_to_list(term.args[0]) for term in knowledge.evaluate().keys()
            if term.functor == 'allTimeStamps'
        ]

        return sorted([item for sublist in timestamps for item in sublist])
Example #24
0
def find_user_prob(uid):
    ps = ""
    with open("prolog/problog_predicates.pl", "r") as f:
        for line in f:
            ps += line

    # Pulizia dei nodi dinamici date/1 all'interno di problog
    p = PrologString(ps)
    dbp = engine.prepare(p)
    query = Term("clean")
    res = engine.query(dbp, query)

    # Calcolo probabilità tramite problog
    ps += "query(infect(" + str(uid) + "))."
    p = PrologString(ps)
    dbp = engine.prepare(p)
    lf = LogicFormula.create_from(p)  # ground the program
    dag = LogicDAG.create_from(lf)  # break cycles in the ground program
    cnf = CNF.create_from(dag)  # convert to CNF
    ddnnf = DDNNF.create_from(cnf)  # compile CNF to ddnnf
    r = ddnnf.evaluate()

    # Salvataggio nel database SQLite della data del nodo rosso più vecchio con cui è stato a contatto
    term = Term("date", None)
    database = problog_export.database  # Database interno di Problog dove vengono salvati i fatti con assertz()
    node_key = database.find(term)
    if node_key is not None:
        node = database.get_node(node_key)
        dates = node.children.find(
            term.args)  # Tutti i fatti date/1 inseriti con assertz/1
        vals = []
        if dates:
            for date in dates:
                n = database.get_node(date)
                vals.append(int(n.args[0]))
        min_val = min(vals)  # Trova la data (in millisecondi) minima
        u = User.query.get(uid)
        u.oldest_risk_date = min_val
        db.session.commit()

    return r
Example #25
0
def read_examples(*filenames):

    for filename in filenames:
        engine = DefaultEngine()

        with open(filename) as f:
            example = ''
            for line in f:
                if line.strip().startswith('---'):
                    pl = PrologString(example)
                    atoms = extract_evidence(pl)
                    if len(atoms) > 0:
                        yield atoms
                    example = ''
                else:
                    example += line
            if example:
                pl = PrologString(example)
                atoms = extract_evidence(pl)
                if len(atoms) > 0:
                    yield atoms
Example #26
0
def evaluate_model(fold_i):
    print(f"Testing fold {fold_i} @ {datetime.now()}")
    fold_n = fold_i + 1

    test = get_text(f"data/5folds-processed/fold{fold_n}", "test_neg.pl")
    learned_model = get_text("models", f"model{fold_n}.pl")

    pl_model = PrologString(learned_model + "\n" + test)

    knowledge = get_evaluatable().create_from(pl_model)

    pprint(knowledge.evaluate())
Example #27
0
 def test_predicates_objects(self):
     model = (self.module_import + self.load_csv +
              """magic_models:X :-{}(magic_tables,
                            [column('T1', 2), column('T1', 5)],
                            [column('T1', 3)],
                            X).
             same_objects :- magic_models:predictor(T), magic_models:{}(T).
     query(same_objects).
     """.format(self.input_predicate, self.output_predicate))
     result = get_evaluatable().create_from(PrologString(model)).evaluate()
     self.assertEqual(len(result), 1)
     for term, proba in result.items():
         self.assertEqual(proba, 1)
    def learn(self):
        '''Construisce gli oggetti problog per la valutazione dell'inferenza'''
        try:
            knowledge_str = ''
            for predicate in self.conoscenza_prob:
                knowledge_str += predicate + '\n'

            knowledge_str = PrologString(knowledge_str)
            self.problog = DefaultEngine()
            self.knowledge_database = self.problog.prepare(knowledge_str)

        except Exception as e:
            print(e)
Example #29
0
def _run_tests():
    def _format_evidence(et):
        return "%s%s"%("" if et[1] else "~", et[0])

    def _print_result(queries, evidence, query_probs):
        print("----")
        if e:
            print("% Evidence: ", ", ".join(_format_evidence(ev) for ev in e))
            print(" . . . ")
        print("\n".join(["%s: %f"%(str(k), query_probs[k]) for k in query_probs]))
        print("----")

    pbl = ProblogWrapper()
    p1 = (1, """
    0.2::foo(2); 0.3::foo(3); 0.5::foo(5):- bar(Z).
    """)

    p2 = (None, """
    bar(a).
    0.7::bar(b).

    evidence(\+bar(b)).
    query(foo(X)).
    """)

    for cid, p in [p1, p2]:
        q, e = pbl.process_cell(cid, PrologString(p))
        res = pbl.query(q,e)
        if q:
            _print_result(q, e, res)

    p1 = (1, """ 0.1::foo(a); 0.25::foo(b); 0.65::foo(c):- bar(Z).""")

    for (cid, p) in [p1, p2]:
        q ,e = pbl.process_cell(cid, PrologString(p))
        res = pbl.query(q,e)
        if q:
            _print_result(q, e, res)
Example #30
0
 def test_source_predicate_objects(self):
     expected_columns = ["column('T1',5)", "column('T1',3)"]
     model = (self.module_import + self.load_csv +
              """magic_models:X :-{}(magic_tables,
                            [{}],
                            [column('T1', 2)],
                            X).
             same_objects(C) :- magic_models:source(T, C), magic_models:{}(T).
     query(same_objects(C)).
     """.format(self.input_predicate, ",".join(expected_columns),
                self.output_predicate))
     result = get_evaluatable().create_from(PrologString(model)).evaluate()
     self.assertEqual(len(result), len(expected_columns))
     for term, proba in result.items():
         self.assertEqual(proba, 1)