Example #1
0
def run_problog(rules, target_rule, target_subject):
    model_string = problog_model(rules)
    model_string += "query({}(\'{}\',_)).".format(target_rule, target_subject)
    print(model_string)
    result = get_evaluatable().create_from(
        PrologString(model_string)).evaluate()
    return result
Example #2
0
    def evaluate(self,
                 rule,
                 functor=None,
                 arguments=None,
                 ground_program=None):
        """Evaluate the given rule.

        :param rule: rule to evaluate
        :type rule: Rule
        :param functor: override rule functor
        :type functor: str
        :param arguments: query arguments (None if non-ground)
        :type arguments: list[tuple[Term]] | None
        :param ground_program: use pre-existing ground program (perform ground if None)
        :type ground_program: LogicFormula | None
        :return: dictionary of results
        :rtype: dict[Term, float]
        """
        if ground_program is None:
            ground_program = self.ground(rule, functor, arguments)

        with Timer("Compiling", logger="probfoil"):
            knowledge = get_evaluatable().create_from(ground_program)

        with Timer("Evaluating", logger="probfoil"):
            result = knowledge.evaluate()

        return result
Example #3
0
 def __init__(self, model_str, probabilistic_data=False, relational_data=False):
     # parse the Prolog string
     pl_model_sr = PrologString(model_str)
     # compile the Prolog model
     self.problog_knowledge_sr = get_evaluatable().create_from(pl_model_sr)
     self.probabilistic_data = probabilistic_data
     self.relational_data = relational_data
Example #4
0
    def evaluate_custom_weights(self, eval_name=None):
        class TestSemiringProbabilityNSP(SemiringProbability):
            def is_nsp(self):
                return True

        program = """
                    0.25::a.
                    query(a).
                """
        pl = PrologString(program)
        lf = LogicFormula.create_from(pl,
                                      label_all=True,
                                      avoid_name_clash=True)
        semiring = TestSemiringProbabilityNSP()
        kc_class = get_evaluatable(name=eval_name, semiring=semiring)
        kc = kc_class.create_from(lf)
        a = Term('a')

        # without custom weights
        results = kc.evaluate(semiring=semiring)
        self.assertEqual(0.25, results[a])

        # with custom weights
        weights = {a: 0.1}
        results = kc.evaluate(semiring=semiring, weights=weights)
        self.assertEqual(0.1, results[a])
Example #5
0
def mpe_semiring(lf, verbose=0, solver=None):
    semiring = SemiringMPEState()
    kc_class = get_evaluatable(semiring=semiring)

    if lf.evidence():
        # Query = evidence + constraints
        qn = lf.add_and([y for x, y in lf.evidence()])
        lf.clear_evidence()

        if lf.queries():
            print('%% WARNING: ignoring queries in file', file=sys.stderr)
        lf.clear_queries()

        query_name = Term('query')
        lf.add_query(query_name, qn)
        kc = kc_class.create_from(lf)

        # with open('/tmp/x.dot', 'w') as f:
        #     print(kc.to_dot(), file=f)

        results = kc.evaluate(semiring=semiring)
        prob, facts = results[query_name]
    else:
        prob, facts = 1.0, []

    return prob, facts
Example #6
0
    def evaluate(self, rule, functor=None, arguments=None, ground_program=None):
        if ground_program is None:
            # pdb.set_trace()
            ground_program = self.ground(rule, functor, arguments)

        knowledge = get_evaluatable().create_from(ground_program) #ddnnf object of all groundings
        return knowledge.evaluate() #for each grounding given, finds probability from database and applies it
Example #7
0
    def evaluate(self, evaluatable_name=None, custom_semiring=None):
        try:
            parser = DefaultPrologParser(ExtendedPrologFactory())
            kc = get_evaluatable(name=evaluatable_name).create_from(
                PrologFile(filename, parser=parser))

            if custom_semiring is not None:
                semiring = custom_semiring  # forces the custom semiring code.
            elif logspace:
                semiring = SemiringLogProbability()
            else:
                semiring = SemiringProbability()

            computed = kc.evaluate(semiring=semiring)
            computed = {str(k): v for k, v in computed.items()}
        except Exception as err:
            #print("exception %s" % err)
            e = err
            computed = None

        if computed is None:
            self.assertEqual(correct, type(e).__name__)
        else:
            self.assertIsInstance(correct, dict)
            self.assertSequenceEqual(correct, computed)

            for query in correct:
                self.assertAlmostEqual(correct[query],
                                       computed[query],
                                       msg=query)
Example #8
0
 def test_implementation_predicate(self):
     model = (self.module_import + self.load_csv +
              """magic_models:X :-{}(magic_tables,
                            [column('T1', 2), column('T1', 5)],
                            [column('T1', 3)],
                            X).
     query(magic_models:{}(_)).
     """.format(self.input_predicate, self.output_predicate))
     result = get_evaluatable().create_from(PrologString(model)).evaluate()
     self.assertEqual(len(result), 1)
Example #9
0
    def evaluate(self, appendstr=""):
        """Evaluate the program.

        appendstr -- additional string to add to the program only for evaluation

        """
        model = problog.program.PrologString(self.__program + "\n" +
                                             appendstr + "\n")
        result = problog.get_evaluatable().create_from(model).evaluate()
        return result
Example #10
0
    def get_timestamps(self):
        model = PrologString(self.model + '\n\nquery(allTimeStamps(TPs)).')

        knowledge = get_evaluatable().create_from(model)

        timestamps = [
            term_to_list(term.args[0]) for term in knowledge.evaluate().keys()
            if term.functor == 'allTimeStamps'
        ]

        return sorted([item for sublist in timestamps for item in sublist])
Example #11
0
def evaluate_model(fold_i):
    print(f"Testing fold {fold_i} @ {datetime.now()}")
    fold_n = fold_i + 1

    test = get_text(f"data/5folds-processed/fold{fold_n}", "test_neg.pl")
    learned_model = get_text("models", f"model{fold_n}.pl")

    pl_model = PrologString(learned_model + "\n" + test)

    knowledge = get_evaluatable().create_from(pl_model)

    pprint(knowledge.evaluate())
Example #12
0
    def get_examples_satisfying_query(self, clause_db_examples: Iterable[ClauseDBExampleWrapper], query) -> Tuple[Set[ExampleWrapper], Set[ExampleWrapper]]:
        examples_satisfying_query = set()
        examples_not_satisfying_query = set()

        for clause_db_ex in clause_db_examples:  # type: ClauseDBExampleWrapper
            db_to_query = clause_db_ex.extend()  # type: ClauseDB
            if clause_db_ex.classification_term is not None:
                db_to_query += clause_db_ex.classification_term

            # db_to_query = example_db.extend()
            db_to_query += Term('query')(self.to_query)
            db_to_query += (self.to_query << query)

            start_time = time.time()
            evaluatable = problog.get_evaluatable()
            mid_time1 = time.time()
            something = evaluatable.create_from(db_to_query, engine=self.engine)
            mid_time2 = time.time()
            query_result = something.evaluate()
            end_time = time.time()

            self.nb_partitions_calculated += 1

            get_evaluatable_duration = mid_time1 - start_time
            self.sum_get_evaluatable += get_evaluatable_duration

            structure_creation_duration = mid_time2 - mid_time1
            self.sum_structure_creation_duration += structure_creation_duration
            if structure_creation_duration > self.max_structure_creation_duration:
                self.max_structure_creation_duration = structure_creation_duration
            if structure_creation_duration < self.min_structure_creation_duration:
                self.min_structure_creation_duration = structure_creation_duration
            if structure_creation_duration < 0.000001:
                self.nb_structure_creation_zero += 1

            evalutation_duration = end_time - mid_time2
            self.sum_evaluation_duration += evalutation_duration
            if evalutation_duration > self.max_evaluation_duration:
                self.max_evaluation_duration = evalutation_duration
            if evalutation_duration < self.min_evaluation_duration:
                self.min_evaluation_duration = evalutation_duration
            if evalutation_duration < 0.000001:
                self.nb_evaluation_zero += 1

            # query_result = problog.get_evaluatable().create_from(db_to_query, engine=self.engine).evaluate()

            if query_result[self.to_query] > 0.5:
                examples_satisfying_query.add(clause_db_ex)
            else:
                examples_not_satisfying_query.add(clause_db_ex)

        return examples_satisfying_query, examples_not_satisfying_query
Example #13
0
 def test_predicates_objects(self):
     model = (self.module_import + self.load_csv +
              """magic_models:X :-{}(magic_tables,
                            [column('T1', 2), column('T1', 5)],
                            [column('T1', 3)],
                            X).
             same_objects :- magic_models:predictor(T), magic_models:{}(T).
     query(same_objects).
     """.format(self.input_predicate, self.output_predicate))
     result = get_evaluatable().create_from(PrologString(model)).evaluate()
     self.assertEqual(len(result), 1)
     for term, proba in result.items():
         self.assertEqual(proba, 1)
Example #14
0
def mpe_semiring(lf, verbose=0, solver=None, minpe=False):
    if minpe:
        semiring = SemiringMinPEState()
    else:
        semiring = SemiringMPEState()
    kc_class = get_evaluatable(semiring=semiring)

    if lf.evidence():
        # Query = evidence + constraints
        qn = lf.add_and([y for x, y in lf.evidence()])
        lf.clear_evidence()

        if lf.queries():
            non_atom = []
            atom = []
            qs = []
            for qnm, qi in lf.queries():
                if lf.is_probabilistic(qi):
                    if type(lf.get_node(qi)).__name__ != "atom":
                        non_atom.append(qnm)
                atom.append(qi)
                qs += [qnm, -qnm]
            qs = set(qs)
            if non_atom:
                print(
                    "WARNING: compound queries are not supported in the output: %s"
                    % ", ".join(map(str, non_atom)),
                    file=sys.stderr,
                )

            qn = lf.add_and(
                [lf.add_or((qi, lf.negate(qi)), compact=False) for qi in atom] + [qn]
            )

        else:
            qs = None
        lf.clear_queries()

        query_name = Term("query")
        lf.add_query(query_name, qn, keep_name=True)

        kc = kc_class.create_from(lf)

        results = kc.evaluate(semiring=semiring)
        prob, facts = results[query_name]
        if qs is not None:
            facts &= qs
    else:
        prob, facts = 1.0, []

    return prob, facts
Example #15
0
    def compile(self, terms=[]):
        """
        Create compiled knowledge database from ground program.
        Return mapping of `terms` to nodes in the compiled knowledge database.

        :param terms: list of predicates
        :type terms: list of problog.logic.Term
        :rtype: dict of (problog.logic.Term, int)
        """
        self._knowledge = get_evaluatable(None).create_from(self._gp)
        term2node = {}
        for term in terms:
            term2node[term] = self._knowledge.get_node_by_name(term)
        return term2node
Example #16
0
    def query(self, queries, kb=None, prob_override=None):
        if kb is None:
            copyDb = self.copy()
        else:
            copyDb = kb.extend()
        if prob_override==None:
            prob_override = self._prob
        for query in queries:
            copyDb += Term('query', query)

        ret = get_evaluatable().create_from(copyDb).evaluate()
        for (k,v) in ret.items():
            ret[k] = v*prob_override
        return ret
Example #17
0
    def compile(self, terms=[]):
        """
        Create compiled knowledge database from ground program.
        Return mapping of `terms` to nodes in the compiled knowledge database.

        :param terms: list of predicates
        :type terms: list of problog.logic.Term
        :rtype: dict of (problog.logic.Term, int)
        """
        self._knowledge = get_evaluatable(None).create_from(self._gp)
        term2node = {}
        for term in terms:
            term2node[term] = self._knowledge.get_node_by_name(term)
        return term2node
Example #18
0
def test_query_method2(engine, model_db, query_term):
    times_query = []

    extended_model_db = model_db.extend()
    extended_model_db += Term('query')(query_term)
    evaluatable = problog.get_evaluatable()

    for i in range(0, 100):
        start = timeit.default_timer()
        query_result = evaluatable.create_from(extended_model_db, engine=engine).evaluate()
        end = timeit.default_timer()
        gc.collect()
        times_query.append(end - start)
        # print(query_result)
    return times_query
Example #19
0
 def test_source_predicate_objects(self):
     expected_columns = ["column('T1',5)", "column('T1',3)"]
     model = (self.module_import + self.load_csv +
              """magic_models:X :-{}(magic_tables,
                            [{}],
                            [column('T1', 2)],
                            X).
             same_objects(C) :- magic_models:source(T, C), magic_models:{}(T).
     query(same_objects(C)).
     """.format(self.input_predicate, ",".join(expected_columns),
                self.output_predicate))
     result = get_evaluatable().create_from(PrologString(model)).evaluate()
     self.assertEqual(len(result), len(expected_columns))
     for term, proba in result.items():
         self.assertEqual(proba, 1)
Example #20
0
def get_labels_single_example_models(example: SimpleProgram,
                                     rules: SimpleProgram,
                                     possible_labels: Iterable[str],
                                     background_knowledge=None,
                                     debug_printing=False) -> List[str]:
    """


    Classifies a single example and returns a list of its labels
    :param example:
    :param rules:
    :param possible_labels:
    :return:
    """
    eng = DefaultEngine()
    eng.unknown = 1

    if background_knowledge is not None:
        db = eng.prepare(background_knowledge)
        for statement in example:
            db += statement
        for rule in rules:
            db += rule
    else:
        db = eng.prepare(rules)
        for statement in example:
            db += statement

    if debug_printing:
        print('\nQueried database:')
        for statement in db:
            print('\t' + str(statement))
            # print('\n')

    result_list = []
    for label in possible_labels:
        db_to_query = db.extend()
        db_to_query += Term('query')(label)
        start_time = time.time()
        result = problog.get_evaluatable().create_from(db_to_query,
                                                       engine=eng).evaluate()
        end_time = time.time()
        print("call time:", end_time - start_time)

        if result[label] > 0.5:
            result_list.append(label)

    return result_list
Example #21
0
 def ad_atom_duplicate(self, eval_name=None):
     """
     This test must pickup the case where during the transformation, additional _extra atoms are created because
     add_atom(..., cr_extra=True) is used instead of cr_extra=False.
     """
     program = """
                 0.2::a ; 0.8::b.
                 query(a).
                 query(b).
             """
     pl = PrologString(program)
     lf = LogicFormula.create_from(pl, label_all=True, avoid_name_clash=True)
     semiring = SemiringProbability()
     kc_class = get_evaluatable(name=eval_name, semiring=semiring)
     kc = kc_class.create_from(lf)  # type: LogicFormula
     self.assertEqual(3, kc.atomcount)
Example #22
0
def main(argv, result_handler=None):
    parser = argparser()
    args = parser.parse_args(argv)

    if result_handler is None:
        if args.web:
            result_handler = print_result_json
        else:
            result_handler = print_result

    knowledge = get_evaluatable(args.koption)

    if args.output is None:
        outf = sys.stdout
    else:
        outf = open(args.output, 'w')

    create_logger('problog_lfi', args.verbose)
    create_logger('problog', args.verbose - 1)

    program = PrologFile(args.model)
    examples = list(read_examples(*args.examples))
    if len(examples) == 0:
        logging.getLogger('problog_lfi').warn('no examples specified')
    else:
        logging.getLogger('problog_lfi').info('Number of examples: %s' %
                                              len(examples))
    options = vars(args)
    del options['examples']

    try:
        results = run_lfi(program, examples, knowledge=knowledge, **options)

        for n in results[2]:
            n.loc = program.lineno(n.location)
        retcode = result_handler((True, results), output=outf)
    except Exception as err:
        trace = traceback.format_exc()
        err.trace = trace
        retcode = result_handler((False, err), output=outf)

    if args.output is not None:
        outf.close()

    if retcode:
        sys.exit(retcode)
Example #23
0
def FSM_core(pattern_detect_model,
             state_info,
             org_uniq_event,
             current_state,
             current_input,
             diagnose=0):

    #     org_uniq_event = set(event_info)

    # definition of FSM model
    problog_model = pattern_detect_model

    # naive way of assign prob
    for idx, state_i in enumerate(state_info):
        problog_model = problog_model.replace(('p_s_' + str(state_i) + '_'),
                                              str(current_state[idx]))
    for idx, event_i in enumerate(org_uniq_event):
        problog_model = problog_model.replace(('p_e_' + str(event_i) + '_'),
                                              str(current_input[idx]))
    # change prob for dont_care
    problog_model = problog_model.replace(('p_e_dont_care_'),
                                          str(current_input[-1]))

    #     print(problog_model)

    result = get_evaluatable().create_from(
        PrologString(problog_model)).evaluate()

    # naive way of getting ProbLog inference result
    py_result = {}
    for i in result:
        py_result[str(i)] = result[i]

    next_state = np.array(
        [py_result['n_state(' + str(state_i) + ')'] for state_i in state_info])

    if diagnose:
        #         print(problog_model, '\n')
        print('============= Diagnose Mode =================')
        #         print(result, '\n')
        print('Current state: \t', current_state, state_info, '\nInput: \t',
              current_input, uniq_event)
        # print the reuslt:
        print('Next state: \t', next_state)
        print('\n')
    return next_state
Example #24
0
    def test_source_predicates(self):
        expected_columns = ["column('T1',5)", "column('T1',2)"]
        model = (self.module_import + self.load_csv +
                 """magic_models:X :-{}(magic_tables,
                                       [{}],
                                       [column('T1', 3)],
                                       X).

                query(magic_models:source(_, _)).
                """.format(self.input_predicate, ",".join(expected_columns)))
        result = get_evaluatable().create_from(PrologString(model)).evaluate()
        self.assertEqual(len(result), len(expected_columns))

        for term, proba in result.items():
            self.assertEqual(len(term.args), 2)
            self.assertIn(term2str(term.args[1].args[1]), expected_columns)
            expected_columns.remove(term2str(term.args[1].args[1]))
Example #25
0
    def calc_probabilities_of_utilities(self, offer: Offer) -> Dict[str, float]:
        """
        Uses ProbLog and the known knowledge_base to calculate the probability
        of each of the known utilities to occur so we can use it to calculate the
        expected utility of an offer.

        :param offer: The offer you want to calculate the utility of
        :type offer: Offer
        :return: A dictionary with known utilities as keys and the probability \
            they will be fufilled as values.
        :rtype: Dict[str, float]
        """
        model = self.compile_problog_model(offer)
        probability_of_facts = {str(atom): util for atom, util in
                                get_evaluatable("sdd").create_from(
                                    PrologString(model)).evaluate().items()}

        return probability_of_facts
Example #26
0
    def evaluate(self, instance: Example, test: TILDEQuery) -> bool:

        query_conj = test.to_conjunction()

        db_to_query = self.db.extend()

        for statement in instance.data:
            db_to_query += statement

        # TODO: remove ugly hack
        if hasattr(instance,  'classification_term'):
            db_to_query += instance.classification_term

        db_to_query += (self.to_query << query_conj)

        query_result = problog.get_evaluatable().create_from(db_to_query, engine=self.engine).evaluate()

        return query_result[self.to_query] > 0.5
Example #27
0
    def run_problog_model(self, rules, service_name,current_node,experiment_path):
        """
        executes the model into the prolog engine.

        :param rules:
        :param service_name:
        :param current_node:
        :param experiment_path:
        :return: a list order by agent preference with the highest probabilities
        """
        all_rules = ""
        with open(self.rule_profile, "r") as f:
            all_rules = f.read()

        queries = "\nquery(nop(%s)).\n"%service_name
        queries += "query(migrate(%s, X, %s)).\n"%(service_name,current_node)
        queries += "query(replicate(%s, X)).\n"%service_name
        queries += "query(suicide(%s)).\n"%service_name
        queries += "query(fusion(X, Y)).\n"
        queries += "query(priority(X)).\n"


        modeltext = """
        :- use_module(library(lists)).
           route(xxxxxx, path(xxxx, xxx, []), 10, 10).
        %s
        
        """%(all_rules+"\n"+str(rules)+queries)

        # print(modeltext)
        try:
            model = PrologString(modeltext)
            result = get_evaluatable().create_from(model).evaluate()

            # print("RE ",result)
            best_actions = self.__sort_results_rules(result)
            # print("BA ",best_actions[0])
            if self.render_action:
                self.render(service_name, current_node, modeltext, experiment_path)

            return best_actions
        except:
            raise Exception(" A problem running problog ")
Example #28
0
    def _run_sl_operators_on_semiring(self, givensemiring, program=None):
        engine = DefaultEngine()
        if program == None:
            program = self._slproblog_program

        db = engine.prepare(PrologString(program))
        semiring = givensemiring
        knowledge = get_evaluatable(None, semiring=semiring)
        formula = knowledge.create_from(db, engine=engine, database=db)
        res = formula.evaluate(semiring=semiring)

        ret = {}
        for k, v in res.items():
            if isinstance(semiring, BetaSemiring):
                ret[k] = moment_matching(semiring.parse(v))
            else:
                ret[k] = semiring.parse(v)

        return self._order_dicts(ret)
Example #29
0
def get_labels_single_example_probabilistic_models(
        example: SimpleProgram,
        rules: SimpleProgram,
        possible_labels: Iterable[str],
        background_knowledge=None,
        debug_printing=False) -> List[str]:
    """
    Classifies a single example and returns a list of its labels
    :param example:
    :param rules:
    :param possible_labels:
    :return:
    """
    eng = DefaultEngine()
    eng.unknown = 1

    if background_knowledge is not None:
        db = eng.prepare(background_knowledge)
        for statement in example:
            db += statement
        for rule in rules:
            db += rule
    else:
        db = eng.prepare(rules)
        for statement in example:
            db += statement

    if debug_printing:
        print('\nQueried database:')
        for statement in db:
            print('\t' + str(statement))
            # print('\n')

    query_terms = [Term('query')(label) for label in possible_labels]

    db_to_query = db.extend()
    for query_term in query_terms:
        db_to_query += query_term

    query_results = problog.get_evaluatable().create_from(
        db_to_query, engine=eng).evaluate()

    return query_results
Example #30
0
    def get_examples_satisfying_query(self, examples: Iterable[SimpleProgramExampleWrapper], query) -> Tuple[Set[ExampleWrapper], Set[ExampleWrapper]]:
        examples_satisfying_query = set()
        examples_not_satifying_query = set()

        for example in examples:  # type: SimpleProgramExampleWrapper
            db_to_query = self.db.extend()  # type: ClauseDB
            if example.classification_term is not None:
                db_to_query += example.classification_term
            for statement in example:
                db_to_query += statement
            db_to_query += Term('query')(self.to_query)
            db_to_query += (self.to_query << query)

            query_result = problog.get_evaluatable().create_from(db_to_query, engine=self.engine).evaluate()

            if query_result[self.to_query] > 0.5:
                examples_satisfying_query.add(example)
            else:
                examples_not_satifying_query.add(example)

        return examples_satisfying_query, examples_not_satifying_query
Example #31
0
    def get_values_for(self,
                       existing_timestamps,
                       query_timestamps,
                       expected_events,
                       input_events=()):
        # As the model we use self.model (basic EC definition + definition of rules by the user) and we add the list
        # of the input events
        string_model = self.model + '\n' + '\n'.join(
            map(lambda x: x.to_problog(), input_events))

        string_model += '\nallTimeStamps([{}]).'.format(', '.join(
            map(str, existing_timestamps)))

        updated_knowledge = ''

        res = {}

        for timestamp in query_timestamps:
            for event in expected_events:
                query = 'query(holdsAt({event} = true, {timestamp})).\n'.format(
                    event=event, timestamp=timestamp)

                model = PrologString(string_model + '\n' + updated_knowledge +
                                     '\n' + query)

                knowledge = get_evaluatable(name='ddnnf').create_from(
                    model, semiring=SemiringSymbolic())

                evaluation = knowledge.evaluate()

                res.update(evaluation)

                for k, v in evaluation.items():
                    if v > 0.0:
                        updated_knowledge += '{0}::{1}.\n'.format(
                            v, k).replace('holdsAt', 'holdsAt_')

        return res