Exemplo n.º 1
0
def run_test_with_query_instance():

    from problog.program import PrologString
    from problog.engine import DefaultEngine
    from problog.logic import Term, Var
    p = PrologString("""
    coin(c1). coin(c2).
    0.4::heads(C); 0.6::tails(C) :- coin(C).
    win :- heads(C).
    """)
    from .formula_wrapper import FormulaWrapper

    s_qs, s_evs = ([Term("win")], [(Term("heads",
                                         Term("c1")), False)])  # For now

    engine = DefaultEngine()
    probsr = SemiringProbability()
    fw = FormulaWrapper(engine.prepare(p))
    qobj = AMCQuery(s_qs, s_evs, fw, target_class=DDNNF, semiring=probsr)

    qobj.ground(engine)
    result, ground_evidence = qobj.evaluate(engine)
    print("evidence: ", ground_evidence)
    for r in result:
        print(r)
    print("---")
Exemplo n.º 2
0
    def ground(self, rule, functor=None, arguments=None):
        """Generate ground program for the given rule.

        :param rule: rule to evaluate
        :type rule: Rule
        :param functor: override rule functor
        :type functor: str
        :param arguments: query arguments (None if non-ground)
        :type arguments: list[tuple[Term]] | None
        :return: ground program
        :rtype: LogicFormula
        """
        if rule is None:
            db = self._database
            target = Term(functor)
        else:
            db = self._database.extend()
            target = None
            for clause in rule.to_clauses(functor):
                target = clause.head
                db += clause

        if arguments is not None:
            queries = [target.with_args(*args) for args in arguments]
        else:
            queries = [target]

        with Timer("Grounding", logger="probfoil"):
            return self._database.engine.ground_all(db, queries=queries)
Exemplo n.º 3
0
def main():
    mod = GDLIIIEngine('./examples/montyhall.gdliii', File_Format.INFIX)
    res = {}
    for i in range(0,1000):
        print(f"Game {i}")
        #Random initial door choice to begin with
        moves1 = mod.get_legal_moves()[PLAYER_NAME]
        mod.set_actions({PLAYER_NAME: choice(moves1)})
        mod.update_step()
        #Second step, can only choose noop
        mod.set_actions({PLAYER_NAME: mod.get_legal_moves()[PLAYER_NAME][0]})
        mod.update_step()
        #Important step to show game playing capability. Check whether we believe we are currently in a winning state
        #If yes, then choose noop, if no choose switch
        (p100,goal100) = mod.query(PLAYER_NAME, Term('goal', Constant(mod.player_to_id(PLAYER_NAME)), Constant('100')))[0]
        (p0,goal0) = mod.query(PLAYER_NAME, Term('goal', Constant(mod.player_to_id(PLAYER_NAME)), Constant('0')))[0]
        if (p100 > p0):
            #Perform Noop if we are more likely to be in the winning state already
            mod.set_actions({PLAYER_NAME:mod.get_legal_moves()[PLAYER_NAME][0]})
        else:
            #Perform switch if we are likely to be in the losing state
            mod.set_actions({PLAYER_NAME:mod.get_legal_moves()[PLAYER_NAME][1]})
        mod.update_step()
        # Add a counter to either goal(1,0) if we lost, or goal(1,100) if we won
        try:
            res[mod.query(PLAYER_NAME, Term('goal', Constant(mod.player_to_id(PLAYER_NAME)), Var('_')))[0]] += 1
        except:
            res[mod.query(PLAYER_NAME, Term('goal', Constant(mod.player_to_id(PLAYER_NAME)), Var('_')))[0]] = 1
        # Go back to beginning of game
        for i in range(3):
            mod.undo_step()
    print(res)
Exemplo n.º 4
0
    def query(self, player, query, step=0):
        if step == 0:
            return self._cur_node.raw_query(player, Term('thinks', player, query))
        else:
            world_set = set([self._cur_node])
            for _ in range(step):
                #Create set of all possible move sequences from perspective of player
                action_set = set()
                for world in world_set:
                    action_set = action_set.union(set([i for i in world.get_legal_moves()[player].keys()]))
                legal_moves_seqs = [{k:(None if k != player else a) for k in self._playerList} for a in action_set]
                #Generate possible (but not always valid) successor worlds
                new_set = set()
                for world in world_set:
                    for actions in legal_moves_seqs:
                        new_set.add(world.generate_speculative_worlds(player, actions))
                world_set = new_set

            query_dict = {}
            size = len(world_set)
            for w in world_set:
                for (item,val) in w.raw_query(player, Term('thinks', player, query)).items():
                    if item in query_dict.keys():
                        query_dict[item] += val/size
                    else:
                        query_dict[item] = val/size
            return query_dict
Exemplo n.º 5
0
def run_tests_with_static_methods():
    from problog.program import PrologString
    from problog.engine import DefaultEngine
    from problog.logic import Term, Var
    p = PrologString("""
    coin(c1). coin(c2).
    0.4::heads(C); 0.6::tails(C) :- coin(C).
    win :- heads(C).
    """)

    qs, evs = ([Term("win")], [(Term("heads", Term("c1")), False)])  # For now

    engine = DefaultEngine()
    db = engine.prepare(p)
    labels = (LogicFormula.LABEL_QUERY, LogicFormula.LABEL_EVIDENCE_POS,
              LogicFormula.LABEL_EVIDENCE_NEG)
    lf = LogicFormula()
    lf = AMCQuery.ground_query_evidence(engine, db, qs, evs, lf, labels)

    circuit = AMCQuery.compile_to_circuit(lf, "ddnnf")
    prob_sr = SemiringProbability()
    results, ground_evidence = AMCQuery.evaluate_circuit(
        circuit, labels, prob_sr)
    print("evidence: ", ground_evidence)
    for r in results:
        print(r)
    print("---")
Exemplo n.º 6
0
def translate(db, atom_id):
    if type(atom_id) == tuple:
        atom_id, args, choice = atom_id
        return Term("ad_%s_%s" % (atom_id, choice), *args)
    else:
        node = db.get_node(atom_id)
        return Term(node.functor, *node.args)
Exemplo n.º 7
0
def init_db(engine, model, propagate_evidence=False):
    db = engine.prepare(model)

    if propagate_evidence:
        evidence = engine.query(db, Term("evidence", None, None))
        evidence += engine.query(db, Term("evidence", None))

        ev_target = LogicFormula()
        engine.ground_evidence(db, ev_target, evidence)
        ev_target.lookup_evidence = {}
        ev_nodes = [
            node for name, node in ev_target.evidence()
            if node != 0 and node is not None
        ]
        ev_target.propagate(ev_nodes, ev_target.lookup_evidence)

        evidence_facts = []
        for index, value in ev_target.lookup_evidence.items():
            node = ev_target.get_node(index)
            if ev_target.is_true(value):
                evidence_facts.append((node[0], 1.0) + node[2:])
            elif ev_target.is_false(value):
                evidence_facts.append((node[0], 0.0) + node[2:])
    else:
        evidence_facts = []
        ev_target = None

    return db, evidence_facts, ev_target
Exemplo n.º 8
0
    def eval(self, model, evidence=pd.Series()):
        """Returns the query for all configurations of all head variables
        in the model, given the evidence.

        Keyword arguments:
        evidence
        """
        # change model weights
        custom_weights = {}
        custom_weights_items = {}
        xs = self.problog_knowledge_sr.get_weights().values()
        for x in xs:
            if getattr(x, "functor", None):
                custom_weights_items[x.functor] = x
        for head in model:
            rules = model[head]['rules']
            for i, rule in enumerate(rules):
                x = custom_weights_items[rule['parameter_name']]
                custom_weights[x] = rule['parameter']

        # change evidence (and weights in case evidence is probabilistic)
        evidence_dict = {}
        for var, value in evidence.iteritems():
            term = Term(var)
            if value == 1:
                evidence_dict[term] = True
            if value == 0:
                evidence_dict[term] = False
            # MANAGING PROBABILISTIC CASE
            # initialize all probability dumb variables custom weights
            x_0 = custom_weights_items[model[var]['prob_dumb']['weight_0']]
            x_1 = custom_weights_items[model[var]['prob_dumb']['weight_1']]
            custom_weights[x_1] = 0.5
            custom_weights[x_0] = 0.5
            if value > 0 and value < 1:
                # if observation is probabilistic, insert evidence for dumb var
                prob_term = Term(model[var]['prob_dumb']['var'])
                evidence_dict[prob_term] = True
                # and weights for probabilistic dumb rules
                custom_weights[x_1] = value
                custom_weights[x_0] = 1 - value
                
        # print('\nevidence_dict:::\n', evidence_dict)
        # print('\n\ncustom_weights:::\n', custom_weights)
        
        # make inference
        try:
            res = self.problog_knowledge_sr.evaluate(
                        evidence=evidence_dict,
                        keep_evidence=False,
                        semiring=CustomSemiring(custom_weights)),
            output = {}
            for key in res[0]:
                output[str(key)] = res[0][key]
                # output = res[0]
        except InconsistentEvidenceError:
            raise InconsistentEvidenceError("""This error may have occured
                because some observation in the dataset is impossible given
                the model structure.""")
        return output
Exemplo n.º 9
0
def cell_to_atoms(table_name, header, cell_row, column_type,
                  column_unique_values, cell_value, **kwargs):
    table_name = unquote(str(table_name)).lower()
    header = unquote(str(header)).lower()
    cell_row = unquote(str(cell_row)).lower()
    column_type = unquote(str(column_type)).lower()
    cell_value = unquote(str(cell_value)).lower()
    column_unique_values1 = term2list(column_unique_values)
    # print(column_unique_values1)

    row_id = table_name + "_r" + cell_row

    # result = [Term(header, Constant(row_id), Constant(cell_value))]
    result = []

    result.append((Term("row", Constant(row_id)), 1.0))

    if column_type == "string":
        for unique_value in column_unique_values1:
            if unquote(unique_value).lower() == cell_value:
                result.append((Term(header + "_" + cell_value,
                                    Constant(row_id)), 1.0))
            else:
                # WIth Probability 0
                result.append((
                    Term(
                        header + "_" + unquote(unique_value).lower(),
                        Constant(row_id),
                    ),
                    0.0,
                ))
    return result
Exemplo n.º 10
0
    def test_anonymous_variable(self):
        """Anonymous variables are distinct"""

        program = """
            p(_,X,_) :- X = 3.

            q(1,2,3).
            q(1,2,4).
            q(2,3,5).
            r(Y) :- q(_,Y,_).

        """

        engine = DefaultEngine()
        db = engine.prepare(PrologString(program))
        self.assertEqual(
            list(
                map(
                    list,
                    engine.query(
                        db, Term("p", Constant(1), Constant(3), Constant(2))),
                )),
            [[Constant(1), Constant(3), Constant(2)]],
        )

        self.assertEqual(list(map(list, engine.query(db, Term("r", None)))),
                         [[2], [3]])
Exemplo n.º 11
0
    def load(self, data):
        """Load the settings from a data file.

        Initializes language, target and examples.

        :param data: data file
        :type data: DataFile
        """
        self.language.load(data)  # for types and modes

        if self._target is None:
            try:
                target = data.query('learn', 1)[0]
                target_functor, target_arity = target[0].args
            except IndexError:
                raise KnownError('No target specification found!')
        else:
            target_functor, target_arity = self._target.functor, self._target.args[
                0]
        target_arguments = [
            Var(chr(65 + i)) for i in range(0, int(target_arity))
        ]
        self._target = Term(str(target_functor), *target_arguments)

        # Find examples:
        #  if example_mode is closed, we will only use examples that are defined in the data
        #      this includes facts labeled with probability 0.0 (i.e. negative example)
        #  otherwise, the examples will consist of all combinations of values appearing in the data
        #      (taking into account type information)
        example_mode = data.query(Term('example_mode'), 1)
        if example_mode and str(example_mode[0][0]) == 'auto':
            types = self.language.get_argument_types(self._target.functor,
                                                     self._target.arity)
            values = [self.language.get_type_values(t) for t in types]
            self._examples = list(product(*values))
        elif example_mode and str(example_mode[0][0]) == 'balance':
            # Balancing based on count only
            pos_examples = [
                r for r in data.query(self._target.functor, self._target.arity)
            ]
            pos_count = len(pos_examples)  # get no. of positive examples
            types = self.language.get_argument_types(self._target.functor,
                                                     self._target.arity)
            values = [self.language.get_type_values(t) for t in types]
            from random import shuffle
            neg_examples = list(product(*values))
            shuffle(neg_examples)
            logger = logging.getLogger(self._logger)
            logger.debug('Generated negative examples:')
            for ex in neg_examples[:pos_count]:
                logger.debug(Term(self._target(*ex).with_probability(0.0)))

            self._examples = pos_examples + neg_examples[:pos_count]
        else:
            self._examples = [
                r for r in data.query(self._target.functor, self._target.arity)
            ]

        with Timer('Computing scores', logger=self._logger):
            self._scores_correct = self._compute_scores_correct()
Exemplo n.º 12
0
    def test_add_rule(self):
        engine = self.engines['sysadmin']

        running = Term('running')
        c1 = Constant('c1')
        c2 = Constant('c2')
        c3 = Constant('c3')
        r1 = Fluent.create_fluent(running(c1), 1)
        r2 = Fluent.create_fluent(running(c2), 1)
        r3 = Fluent.create_fluent(running(c3), 1)

        head = Term('__s0__')
        body = ~r1 & ~r2 & ~r3
        rule = head << body
        node = engine.add_rule(head, [~r1, ~r2, ~r3])
        self.assertTrue(engine.get_rule(node), rule)

        head = Term('__s3__')
        body = r1 & r2 & ~r3
        node = engine.add_rule(head, [r1, r2, ~r3])
        rule = engine.get_rule(node)
        self.assertTrue(rule, head << body)

        head = Term('__s7__')
        body = r1 & r2 & r3
        rule = head << body
        node = engine.add_rule(head, [r1, r2, r3])
        self.assertTrue(engine.get_rule(node), rule)
Exemplo n.º 13
0
 def update_evidence(self, evidence=pd.Series()):
     model = self.model
     # change evidence (and weights in case evidence is probabilistic)
     evidence_dict = {}
     for var, value in evidence.iteritems():
         term = Term(var)
         if value == 1:
             evidence_dict[term] = True
         if value == 0:
             evidence_dict[term] = False
         # MANAGING PROBABILISTIC CASE
         if self.probabilistic_data:
             # initialize all probability dumb variables custom weights
             x_0 = self.custom_weights_items[model[var]['prob_dumb']['weight_0']]
             x_1 = self.custom_weights_items[model[var]['prob_dumb']['weight_1']]
             self.custom_weights[x_1] = 0.5
             self.custom_weights[x_0] = 0.5
             if value > 0 and value < 1:
                 # if observation is probabilistic, insert evidence for dumb var
                 prob_term = Term(model[var]['prob_dumb']['var'])
                 evidence_dict[prob_term] = True
                 # and weights for probabilistic dumb rules
                 self.custom_weights[x_1] = value
                 self.custom_weights[x_0] = 1 - value
     return evidence_dict
Exemplo n.º 14
0
def main():
    mod = GDLIIIEngine('./examples/guess.gdliii', File_Format.PREFIX)
    res = {}
    for i in range(1, 11):
        res[i] = 0
    res['lost'] = 0
    #Choose a move
    for i in range(0, 1000):
        guesses = 0
        mov = mod.get_legal_moves()
        mod.set_actions({'player': mov['player'][0]})
        mod.update_step()
        while not mod.is_terminal():
            mov = mod.get_legal_moves()
            pmoves = sorted(mov[PLAYER_NAME],
                            key=lambda a: int(str(a.args[0])))
            possible_nums = sorted(
                [a[1] for a in mod.query('player', Term('secret', Var('_')))],
                key=lambda a: int(str(a.args[0])))
            num = int(str(possible_nums[round(
                len(possible_nums) / 2)].args[0])) - 1
            mod.set_actions({'player': pmoves[num]})
            mod.update_step()
            guesses += 1
        result = mod.query('player', Term('secret', Var('_')))
        if len(result) == 1:
            res[guesses] += 1
        else:
            res['lost'] += 1
        mod.undo_step(guesses + 1)
        print(f'Game {i} Complete')
    print(res)
Exemplo n.º 15
0
 def getSuccessorWorlds(self, takenMoves):
     copyKb = self.copy()
     for i in takenMoves.keys():
         copyKb += Term('does', i, takenMoves[i].args[1])
     #Assumption: random player never has any thinks predicates
     nextState = map(lambda a: Term('ptrue', a.args[0]),\
             self.query([Term('next', Var('_'))], kb=copyKb).keys())
     return set([RandomWorld(self._engine, self._baseModel, self._step + 1,\
             1, set(nextState), self._player, [])])
Exemplo n.º 16
0
 def get_pred_clause(self, subj, pred, obj=None, prob=None):
     if type(pred) is not Term:
         if type(pred) is Constant:
             pred = Term(pred.functor())
         else:
             raise Exception
     if obj is None:
         return pred(subj)
     return pred(subj, obj, p=prob)
Exemplo n.º 17
0
def load_mushroom():
    schema = "e/p b/c/x/f/k/s f/g/y/s n/b/c/g/r/p/u/e/w/y t/f a/l/c/y/f/m/n/p/s a/d/f/n c/w/d b/n k/n/b/h/g/r/o/p/u/e/w/y e/t b/c/u/e/z/r f/y/k/s f/y/k/s n/b/c/g/o/p/e/w/y n/b/c/g/o/p/e/w/y p/u n/o/w/y n/o/t c/e/f/l/n/p/s/z k/n/b/h/r/o/u/w/y a/c/n/s/v/y g/l/m/p/u/w/d"
    schema_list = []
    var_dict = {}
    var_counter = 0
    for i, options in enumerate(schema.split(" ")[1:]):
        option_list = options.split("/")
        schema_list.append(len(option_list))

        var_list = []
        for j in range(len(option_list)):
            var_list.append(str(var_counter + j))
        var_counter += len(option_list)

        var_dict[i + 1] = var_list

    class_dict = {}
    var_counter = 0
    for i, schema in enumerate(schema_list):
        for j in range(schema):
            class_dict[str(var_counter + j)] = i + 1
        var_counter += schema

    print(schema_list)
    print(var_dict)
    print(class_dict)

    # f = open("./Data/mushroom.D90.N8124.C2.num", "rb")
    f = open("./Data/mushroom_cp4im.txt", "r")
    positive_input_clauses = []
    negative_input_clauses = []
    for line in f:
        row = str(line)[:-1].split(" ")

        clause = []
        for i in range(117):
            if str(i) in row:
                clause.append(-Term("v" + str(i)))
            else:
                clause.append(Term("v" + str(i)))

        # for var in row[:-1]:
        #     clause.append(Term('v'+str(var)))
        #     var_class = class_dict[var]
        #     for other_var in var_dict[var_class]:
        #         if other_var != var:
        #             a = Term('v' + str(other_var))
        #             clause.append(-a)

        # print(clause)

        if row[-1] == "0":
            negative_input_clauses.append(Or.from_list(clause))
        elif row[-1] == "1":
            positive_input_clauses.append(Or.from_list(clause))

    return (positive_input_clauses, negative_input_clauses)
Exemplo n.º 18
0
    def get_prediction_goal(self) -> Term:
        prediction_goal_term = Term(self.functor)

        nb_of_args = len(self.modes)
        arguments = [Var('#')] * nb_of_args
        prediction_goal_term = prediction_goal_term(*arguments)

        prediction_goal_term = prediction_goal_term.apply(TypeModeLanguage.ReplaceNew(0))
        return prediction_goal_term
Exemplo n.º 19
0
 def output_terms(self):
     predictor_term = Term("predictor", self.problog_obj)
     target_terms = [
         Term("target", self.problog_obj, t) for t in self.target_columns
     ]
     source_terms = [
         Term("source", self.problog_obj, s) for s in self.source_columns
     ]
     return [predictor_term] + source_terms + target_terms
Exemplo n.º 20
0
def transformation(scope, term_list, transformer, source_columns, function, **kwargs):
    """
    Transform values using a transformer that was fitted on data. It uses source_columns of scope to transform the data
    :param scope: A scope, containing table_cell predicates describing a table content.
    :param transformer: A scikit-learn transformer, stored as a Problog Object (accessible through transformer(<transformer>) of the ordinal_encoder function for example).
    :param source_columns: A list of columns, where column is: column(<table_name>, <col_number>). <table_name> is a table name present in table_cell. These columns will be used as input columns for the transformer.
    :param function: The function to use for the transformation (either transform or inverse transform)
    :param kwargs:
    :return: Transformations from transformer using source_columns of scope, as well as transformation metadata.
    transformation(<scope>, <transformer>, <source_columns>) is created. <scope> is the scope parameter, as a Problog object, <transformer> is the transformer parameter, as a Problog object and <source_columns> are the source_columns parameter as a Problog object.
        This whole transformation/3 is used as a key for the transformation object. In the future, it might be better to use a unique identifier or something else!
    cell_transform(<row_id>, <col_id>, <value>, <transformation_term>) are created for each transformation. <row_id> and <col_id> are (1,1) indexed, NOT indexed from the table_cell row and column ids.
        The <col_id> corresponds to the index of the target column of transformer. <value> is the transformed value. <transformation_term> is whole transformation(<scope>, <transformer>, <source_columns>) defined above.
    transformer(<transformation_term>, <transformer>) is created. <transformation_term> is whole transformation(<scope>, <transformer>, <source_columns>) defined above, <transformer> is the transformer parameter, as a Problog object
    source(<transformation_term>, <source_column>) are created for each source_column. <transformation_term> is whole transformation(<scope>, <transformer>, <source_columns>) defined above, <source_column> is column(<table_name>, <col_number>)
    """
    transformation_term_3 = Term(
        "transformation", Object(scope), transformer, Object(source_columns)
    )

    transformation_term_1 = Term("transformation", transformation_term_3)

    # TODO: Handle probabilistic terms in transformers!
    relevant_table = [t for t in term_list if t[1].args[0] == source_columns[0].args[0]]

    most_probable_cells = get_most_probable_world(relevant_table)

    matrix = cells_to_matrix(most_probable_cells)

    src_cols = [s.args[1].value for s in source_columns]

    y_transform = function(matrix[:, src_cols])

    if len(y_transform.shape) == 1:
        y_transform = np.atleast_2d(y_transform).T

    n_rows, n_cols = y_transform.shape

    cell_transform_cells = []
    for r, c in product(range(n_rows), range(n_cols)):
        cell_transform_cells.append(
            init_cell_transform(r + 1, c + 1, y_transform[r, c], transformation_term_3)
        )

    transformer_term = [Term("transformer", transformation_term_3, transformer)]
    source_terms = [Term("source", transformation_term_3, s) for s in source_columns]

    return (
        [transformation_term_1, transformation_term_3]
        + cell_transform_cells
        + transformer_term
        + source_terms
    )
Exemplo n.º 21
0
def extract_evidence(pl):
    engine = DefaultEngine()
    atoms = engine.query(pl, Term('evidence', None, None))
    atoms1 = engine.query(pl, Term('evidence', None))
    atoms2 = engine.query(pl, Term('observe', None))
    for atom in atoms1 + atoms2:
        atom = atom[0]
        if atom.is_negated():
            atoms.append((-atom, Term('false')))
        else:
            atoms.append((atom, Term('true')))
    return [(at, str2bool(vl)) for at, vl in atoms]
Exemplo n.º 22
0
def apply_substitution_to_term(term: Term, substitution: Dict[str,
                                                              Term]) -> Term:
    complete_substitution = {}

    # NOTE: all variables in the term need to be defined in the substitution
    for var in term.variables():
        complete_substitution[var.name] = Var(var.name)

    complete_substitution.update(substitution)

    term_substitution = term.apply(complete_substitution)
    return term_substitution
Exemplo n.º 23
0
 def test_state_space(self):
     running = Term('running')
     fluents = [ running.with_args(Constant('c%d' % i), Constant(0)) for i in range(1, 4) ]
     states = StateSpace(fluents)
     for i, state in enumerate(states):
         self.assertEqual(len(state), 3)
         n = 0
         for j, (fluent, value) in enumerate(state.items()):
             self.assertEqual(fluent.functor, 'running')
             self.assertEqual(fluent.args[0], 'c%d' % (j + 1))
             self.assertEqual(fluent.args[-1], 0)
             n += value * (2**j)
         self.assertEqual(n, i)
Exemplo n.º 24
0
 def test_fluent(self):
     terms = [
         Term('t0'),
         Term('t1', args=(Constant('c1'), )),
         Term('t2', args=(Constant('c1'), Constant('c2')))
     ]
     for term in terms:
         for timestep in range(2):
             fluent = Fluent.create_fluent(term, timestep)
             self.assertEqual(fluent.functor, term.functor)
             self.assertEqual(fluent.arity, term.arity + 1)
             self.assertEqual(fluent.args[:-1], term.args)
             self.assertEqual(fluent.args[-1], Constant(timestep))
Exemplo n.º 25
0
def apply_clause(scope, clause, database=None, *args, **kwargs):
    if isinstance(clause, Clause):
        database.add_clause(
            Clause(
                Term(
                    "':'",
                    scope,
                    clause.head.with_probability(None),
                    p=clause.head.probability,
                ),
                Term("evaluate", scope, clause.body),
            ))
    return ()
Exemplo n.º 26
0
    def getSuccessorWorlds(self, takenMoves):
        potentialMovesByOthers = {}
        tempKb = self.copy()
        for i in takenMoves.keys():
            tempKb += Term('does', i, takenMoves[i].args[1])
            if i == self._player:
                potentialMovesByOthers[i] = {
                    Term('thinks_move', self._player,
                        Term('does', i, takenMoves[i].args[1])): 1
                }
            else:
                potentialMovesByOthers[i] = self.query(\
                    [Term('thinks_move', self._player, \
                        Term('does', i, Var('_')))], kb=tempKb)
        potentialMoveSequences = \
            self._processMoveSequences(\
                itertools.product(*[[(key, val) for (key, val) in potentialMovesByOthers[key].items()] \
                    for key in potentialMovesByOthers.keys()]))
        tkquery = self.query([Term('sees', self._player, Var('_'))], kb=tempKb).keys()
        tokens = list(map(lambda a: str(a.args[1]),tkquery)) 
        tokens.sort()
        tkkey = "".join(tokens)
        worlds = {}
        for moves, seqProb in potentialMoveSequences:
            copyKb = tempKb.extend()
            for m in moves:
                copyKb += m
            potentialTokens = set(map(lambda a: a.args[1], self.query([Term('thinks', self._player, \
                Term('sees', self._player, Var('_')))],kb=copyKb).keys()))
            move_id = list(map(lambda a: str(a.args[1]),moves))
            move_id.sort()
            move_id = "".join(move_id)
            ptklist = list(map(lambda a: str(a.args[1]),potentialTokens))
            ptklist.sort()
            ptkkey = "".join(ptklist)
            nextState = map(lambda a: Term('ptrue', a.args[0]) if a.args[0].functor != "thinks" else a.args[0],\
                self.query([Term('next', Var('_'))], kb=copyKb).keys())
            move_id = list(map(lambda a: str(a.args[1]),moves))
            move_id.sort()
            move_id = "".join(move_id)
            ptklist = list(map(lambda a: str(a.args[1]),potentialTokens))
            ptklist.sort()
            ptkkey = "".join(ptklist)
            potentialWorld = World(self._engine, self._baseModel, self._step + 1,\
                seqProb*self._prob, set(nextState), self._player, ptkkey, move_id)
            if ptkkey not in worlds.keys():
                worlds[ptkkey] = set()
            worlds[ptkkey].add(potentialWorld)

        return (worlds, tkkey)
Exemplo n.º 27
0
def decision_tree_to_simple_program(node: TreeNode,
                                    simple_program: SimpleProgram,
                                    predicate_generator,
                                    previous_conjunction=Term('true'),
                                    debug_printing=False):
    if node.has_both_children():
        # assign a new predicate to this node
        p = next(predicate_generator)

        # the following if-else is only necessary to remove an unnecessary 'true' term in the head
        if previous_conjunction.functor == 'true':
            conj_left = node.query.get_literal()
            conj_right = ~p
        else:
            conj_left = And(previous_conjunction, node.query.get_literal())
            conj_right = And(previous_conjunction, ~p)
        clause = (p << conj_left)
        simple_program += clause

        # recurse on left subtree
        decision_tree_to_simple_program(node.left_subtree, simple_program,
                                        predicate_generator, conj_left)
        # recurse on right subtree
        decision_tree_to_simple_program(node.right_subtree, simple_program,
                                        predicate_generator, conj_right)
    else:
        if node.can_classify():
            clause = (node.strategy.classification << previous_conjunction)
            simple_program += clause
        else:
            raise InvalidTreeNodeError()
Exemplo n.º 28
0
def mpe_semiring(lf, verbose=0, solver=None):
    semiring = SemiringMPEState()
    kc_class = get_evaluatable(semiring=semiring)

    if lf.evidence():
        # Query = evidence + constraints
        qn = lf.add_and([y for x, y in lf.evidence()])
        lf.clear_evidence()

        if lf.queries():
            print('%% WARNING: ignoring queries in file', file=sys.stderr)
        lf.clear_queries()

        query_name = Term('query')
        lf.add_query(query_name, qn)
        kc = kc_class.create_from(lf)

        # with open('/tmp/x.dot', 'w') as f:
        #     print(kc.to_dot(), file=f)

        results = kc.evaluate(semiring=semiring)
        prob, facts = results[query_name]
    else:
        prob, facts = 1.0, []

    return prob, facts
Exemplo n.º 29
0
    def fit(self):
        """
        If a predictor object is matched on the database, does nothing.
        Else, learn the predictor model on scope. It uses source_columns to predict source_columns (MERCS can predict any column from its sources) and stores the model in Problog database.
        """
        # If the object was not retrieved from db, we train the model
        if not self.object_from_db:
            table_cell_term_list = [
                t[1] for t in self.engine.query(
                    self.database, Term("':'", self.scope, None), subcall=True)
                if t[1].functor == "table_cell"
            ]

            relevant_table = [
                t for t in table_cell_term_list
                if t.args[0] == self.target_columns[0].args[0]
            ]

            # Filter data
            matrix = cells_to_matrix(relevant_table)
            src_cols = [s.args[1].value for s in self.source_columns]
            matrix = matrix[:, src_cols]

            # Train a MERCS model

            data = pd.DataFrame(
                matrix)  # MERCS still needs this (elia: I'm so sorry)
            self.model.fit(data)

            # We add the new predictor in the database to be able to retrieve it in future calls
            self.database.add_fact(self.to_term())
Exemplo n.º 30
0
 def initial_hypothesis(self):
     initial = FOILRule(self.target, correct=self._scores_correct)
     initial = initial & Term('fail')
     initial.scores = [0.0] * len(self._scores_correct)
     initial.score = self._compute_rule_score(initial)
     initial.avoid_literals = set()
     return initial
Exemplo n.º 31
0
 def to_term(self):
     """
     Term representation of the current Cluster  object
     :return:
     """
     return Term("cluster_object", self.scope, Object(self.source_columns),
                 self.problog_obj)