Beispiel #1
0
    def ifit(self, instance, do_mapping=False):
        """
        Just maintain a set of counts at the root and use these for prediction.

        The structure_map parameter determines whether or not to do structure
        mapping. This is disabled by default to get a really naive model.

        **This process modifies the tree's knoweldge.** For a non-modifying
        version see: :meth:`DummyTree.categorize`.

        :param instance: an instance to be categorized into the tree.
        :type instance: :ref:`Instance<instance-rep>`
        :param do_mapping: a flag for whether or not to do structure mapping.
        :type do_mapping: bool
        :return: the root node of the tree containing everything ever added to
            it.
        :rtype: Cobweb3Node
        """
        if do_mapping:
            pipeline = Pipeline(SubComponentProcessor(), Flattener(),
                                StructureMapper(self.root, gensym=self.gensym))
        else:
            pipeline = Pipeline(SubComponentProcessor(), Flattener())
        temp_instance = pipeline.transform(instance)
        self.root.increment_counts(temp_instance)
        return self.root
Beispiel #2
0
    def skill_info(self, X):
        X = [X] if not isinstance(X, list) else X
        feature_names = self.named_steps["dict vect"].get_feature_names()
        classifier = self.steps[-1][-1]

        ft = Flattener()
        tup = Tuplizer()
        lvf = ListValueFlattener()

        X = lvf.transform(X)
        X = [tup.undo_transform(ft.transform(x)) for x in X]

        # X = self.named_steps["dict vect"].transform(x)
        # X = self._transform(X)

        # ft = Flattener()
        # tup = Tuplizer()
        # print("BAE1",X)
        # print("PEEP",feature_names)
        # print("BAE",[tup.transform(x) for x in X])
        # print(type(self))
        # X = [tup.undo_transform(ft.transform(x)) for x in X]
        Xt = X
        for name, transform in self.steps[:-1]:
            if transform is not None:
                # print("HEY",transform.get_feature_names())
                Xt = transform.transform(Xt)
                # print("BAE_"+name,Xt)

        return classifier.skill_info(Xt,feature_names)
Beispiel #3
0
 def predict(self, X):
     ft = Flattener()
     tup = Tuplizer()
     X = [tup.undo_transform(ft.transform(x)) for x in X]
     # print("BEEP", X)
     # print("PRED:",X)
     # print("VAL:", super(CustomPipeline, self).predict(X))
     return super(CustomPipeline, self).predict(X)
Beispiel #4
0
    def fit(self, X, y):

        # print("X",X)
        # NOTE: Only using boolean values
        X = [{k: v for k, v in d.items() if isinstance(v, bool)} for d in X]
        print("FITX", X)
        ft = Flattener()
        tup = Tuplizer()

        self.X = [tup.undo_transform(ft.transform(x)) for x in X]
        self.y = [int(x) if not isinstance(x, tuple) else x for x in y]

        super(CustomPipeline, self).fit(self.X, self.y)
    def ifit(self, x, y):
        if not hasattr(self, 'X'):
            self.X = []
        if not hasattr(self, 'y'):
            self.y = []

        ft = Flattener()
        tup = Tuplizer()

        # pprint(x)
        self.X.append(tup.undo_transform(ft.transform(x)))
        self.y.append(int(y))
        # print(self.y)
        return self.fit(self.X, self.y)
Beispiel #6
0
    def request(self, state):
        tup = Tuplizer()
        flt = Flattener()
        state = flt.transform(tup.transform(state))

        knowledge_base = FoPlanner([(self.ground(a), state[a].replace(
            '?', 'QM') if isinstance(state[a], str) else state[a])
                                    for a in state], self.feature_set)
        knowledge_base.fc_infer(depth=1, epsilon=self.epsilon)
        ostate = {
            self.unground(a): v.replace("QM", "?") if isinstance(v, str) else v
            for a, v in knowledge_base.facts
        }

        knowledge_base = FoPlanner([(self.ground(a), state[a].replace(
            '?', 'QM') if isinstance(state[a], str) else state[a])
                                    for a in state], self.function_set)
        knowledge_base.fc_infer(depth=self.search_depth, epsilon=self.epsilon)

        state = {
            self.unground(a): v.replace("QM", "?") if isinstance(v, str) else v
            for a, v in knowledge_base.facts
        }

        actions = [{
            'skill_label': 'NO_LABEL',
            'foci_of_attention': [],
            'selection': vm['?selection'],
            'action': vm['?action'],
            'inputs': {e[0]: e[1]
                       for e in vm['?inputs']}
        } for vm in knowledge_base.fc_query([(('sai', '?selection', '?action',
                                               '?inputs'), True)],
                                            max_depth=0,
                                            epsilon=0)]

        actions = [(self.Q.evaluate(ostate,
                                    self.get_action_key(a)), random(), a)
                   for a in actions]

        actions.sort(reverse=True)
        # print(actions)

        self.last_state = ostate
        self.last_action = self.get_action_key(actions[0][2])
        self.reward = 0

        return actions[0][2]
Beispiel #7
0
    def ifit(self, x, y):
        if not hasattr(self, 'X'):
            self.X = []
        if not hasattr(self, 'y'):
            self.y = []

        ft = Flattener()
        tup = Tuplizer()

        # pprint(x)
        self.X.append(tup.undo_transform(ft.transform(x)))
        self.y.append(int(y) if not isinstance(y, tuple) else y)

        # print("IFIT:",self.X)
        # print(self.y)
        return super(CustomPipeline, self).fit(self.X, self.y)
Beispiel #8
0
    def fit(self, X, y):

        # print("X",X)
        # NOTE: Only using boolean values
        # X = [{k: v for k, v in d.items() if isinstance(v, bool)} for d in X]
        # print("FITX", X)
        # print("GIN JEF", X[-1])
        ft = Flattener()
        tup = Tuplizer()
        lvf = ListValueFlattener()

        X = lvf.transform(X)
        # print("GIN FEF", X[-1])

        self.X = [tup.undo_transform(ft.transform(x)) for x in X] 
        self.y = [int(x) if not isinstance(x, tuple) else x for x in y] 
        # print("GIN IN")
        # print(self.X[-1])
        # print("BLOOP:",len(self.y))
        super(CustomPipeline, self).fit(self.X, self.y)
Beispiel #9
0
    def _trestle_categorize(self, instance):
        """
        The structure maps the instance, categorizes the matched instance, and
        returns the resulting concept.

        :param instance: an instance to be categorized into the tree.
        :type instance: {a1:v1, a2:v2, ...}
        :return: A concept describing the instance
        :rtype: concept
        """
        preprocessing = Pipeline(NameStandardizer(self.gensym), Flattener(),
                                 SubComponentProcessor(),
                                 StructureMapper(self.root))
        temp_instance = preprocessing.transform(instance)
        self._sanity_check_instance(temp_instance)
        return self._cobweb_categorize(temp_instance)
Beispiel #10
0
    def trestle(self, instance):
        """
        The core trestle algorithm used in fitting and categorization.

        This function is similar to :meth:`Cobweb.cobweb
        <concept_formation.cobweb.CobwebTree.cobweb>` The key difference
        between trestle and cobweb is that trestle performs structure mapping
        (see: :meth:`structure_map
        <concept_formation.structure_mapper.StructureMapper.transform>`) before
        proceeding through the normal cobweb algorithm.

        :param instance: an instance to be categorized into the tree.
        :type instance: :ref:`Instance<instance-rep>`
        :return: A concept describing the instance
        :rtype: CobwebNode
        """
        preprocessing = Pipeline(NameStandardizer(self.gensym), Flattener(),
                                 SubComponentProcessor(),
                                 StructureMapper(self.root))
        temp_instance = preprocessing.transform(instance)
        self._sanity_check_instance(temp_instance)
        return self.cobweb(temp_instance)
Beispiel #11
0
    def infer_missing(self,
                      instance,
                      choice_fn="most likely",
                      allow_none=True):
        """
        Given a tree and an instance, returns a new instance with attribute
        values picked using the specified choice function (either "most likely"
        or "sampled").

        .. todo:: write some kind of test for this.

        :param instance: an instance to be completed.
        :type instance: :ref:`Instance<instance-rep>`
        :param choice_fn: a string specifying the choice function to use,
            either "most likely" or "sampled".
        :type choice_fn: a string
        :param allow_none: whether attributes not in the instance can be
            inferred to be missing. If False, then all attributes will be
            inferred with some value.
        :type allow_none: Boolean
        :return: A completed instance
        :rtype: instance
        """
        preprocessing = Pipeline(NameStandardizer(self.gensym), Flattener(),
                                 SubComponentProcessor(),
                                 StructureMapper(self.root))

        temp_instance = preprocessing.transform(instance)
        concept = self._cobweb_categorize(temp_instance)

        for attr in concept.attrs('all'):
            if attr in temp_instance:
                continue
            val = concept.predict(attr, choice_fn, allow_none)
            if val is not None:
                temp_instance[attr] = val

        temp_instance = preprocessing.undo_transform(temp_instance)
        return temp_instance
    def request(self, state):
        """
        Doc String
        TODO - several Linter problems with this one
        """
        tup = Tuplizer()
        flt = Flattener()
        state = flt.transform(tup.transform(state))

        knowledge_base = FoPlanner([(ground(a), state[a].replace('?', 'QM') if
                                     isinstance(state[a], str) else state[a])
                                    for a in state], self.feature_set)
        knowledge_base.fc_infer(depth=1, epsilon=self.epsilon)
        state = {
            unground(a): v.replace("QM", "?") if isinstance(v, str) else v
            for a, v in knowledge_base.facts
        }

        skillset = []

        # pprint(self.skills)

        for skill_label in self.skills:
            for exp in self.skills[skill_label]:
                pos = self.skills[skill_label][exp]['where'].num_pos()
                neg = self.skills[skill_label][exp]['where'].num_neg()

                skillset.append(
                    (pos / (pos + neg), pos + neg, random(), skill_label, exp,
                     self.skills[skill_label][exp]))
        skillset.sort(reverse=True)

        # used for grounding out plans, don't need to build up each time.
        # knowledge_base = FoPlanner([(ground(a), state[a].replace('?', 'QM')
        #                              if isinstance(state[a], str)
        #                              else state[a])
        #                             for a in state],
        #                            self.function_set)
        # knowledge_base.fc_infer(depth=self.search_depth, epsilon=self.epsilon)

        # TODO - would it be too expensive to make skillset contain some kind of Skill object?
        # because this for loop is ridiculous
        for _, _, _, skill_label, (exp, input_args), skill in skillset:

            # print("STATE")
            # pprint(state)
            # print("--------")
            # print(exp)
            # print("contentEditable: ",state.get( ('contentEditable',"?ele-" + exp[1]) ,True) )
            # if(state.get( ('contentEditable',"?ele-" + exp[1]) ,True) == False):
            #     continue
            # print("")
            # print("REQUEST_EXPLAINS", exp)

            # Continue until we either do something with the rule. For example,
            # generate an SAI or determine that the rule doesn't match. If we
            # get some kind of failure, such as being unable to execute an
            # action sequence, then we want to learn from that and try again.
            failed = True

            # print("SKILL: ",exp, input_args)

            while failed:

                failed = False

                # print("STATE")
                # pprint(state)
                # print("--------")

                for match in skill['where'].get_matches(state,
                                                        epsilon=self.epsilon):
                    # print("REE1")
                    if len(match) != len(set(match)):
                        continue

                    # print("MATCH FOUND", skill_label, exp, match)
                    vmapping = {
                        '?foa' + str(i): ele
                        for i, ele in enumerate(match)
                    }
                    mapping = {
                        'foa' + str(i): ele
                        for i, ele in enumerate(match)
                    }

                    # print("VMAP")
                    # pprint(vmapping)
                    # print("MAP")
                    # pprint(mapping)

                    r_exp = list(rename_flat({exp: True}, vmapping))[0]
                    r_state = rename_flat(state,
                                          {mapping[a]: a
                                           for a in mapping})

                    # print("KB", knowledge_base)
                    rg_exp = eval_expression(r_exp, knowledge_base,
                                             self.function_set, self.epsilon)
                    # for ele in r_exp:
                    #     if isinstance(ele, tuple):
                    #         # print("THIS HAPPENED", ele, ground(ele), execute_functions(ground(ele)))

                    #                 # execute_functions(subt(u))
                    #                 # rg_exp.append()

                    #             # print("BLEHH:", operator.effects)

                    #         for var_match in knowledge_base.fc_query([(ground(ele), '?v')],
                    #                                                  max_depth=0,
                    #                                                   epsilon=self.epsilon):
                    #             # print("VARM:",var_match, ground(ele))
                    #             if var_match['?v'] != '':
                    #                 rg_exp.append(var_match['?v'])
                    #                 # print("HERE_A",rg_exp[-1])
                    #             break

                    #         operator_output = apply_operators(ele,self.function_set,knowledge_base,self.epsilon)
                    #         # print(operator_output)
                    #         if(operator_output != None and operator_output != ""):
                    #             rg_exp.append(operator_output)
                    #             # print("HERE_B",operator_output)

                    #         # if(operator_output != None):
                    #         #     rg_exp.append(operator_output)

                    #     else:
                    #         rg_exp.append(ele)

                    # print("REE2")

                    # print("rg_exp:", rg_exp)
                    # print("r_exp:", r_exp)

                    if len(rg_exp) != len(r_exp):
                        continue

                    # print("EXP:", r_exp)
                    # print("RSTATE ---------------")
                    # pprint(r_state)
                    # print("---------------")

                    # print("REE3")
                    prediction = skill['when'].predict([r_state])[0]

                    # print("when", skill['when'])

                    # print("PREDICTION:", type(prediction), prediction)

                    if prediction <= 0:
                        continue
                    # print("REE4")
                    response = {}
                    response['skill_label'] = skill_label
                    response['selection'] = rg_exp[1]
                    response['action'] = rg_exp[2]
                    response['inputs'] = {
                        a: rg_exp[3 + i]
                        for i, a in enumerate(input_args)
                    }
                    # response['inputs'] = list(rg_exp[3:])
                    response['foci_of_attention'] = []
                    # pprint(response)
                    return response

        return {}
    def train(self, state, label, foas, selection, action, inputs, correct):
        print('label', label)
        print('selection', selection)
        print('action', action)
        print('input', inputs)
        print('correct', correct)

        # label = 'math'

        # create example dict
        example = {}
        example['state'] = state
        example['label'] = label
        example['selection'] = selection
        example['action'] = action
        example['inputs'] = inputs
        example['correct'] = correct

        tup = Tuplizer()
        flt = Flattener()
        example['flat_state'] = flt.transform(tup.transform(state))
        # print('SAI:', selection, action, inputs)

        # print('State:')
        # pprint(example['state'])
        # print('Flat State:')
        # pprint(example['flat_state'])

        # new = {}
        # for attr in example['flat_state']:
        #     if (isinstance(attr, tuple) and attr[0] == 'value'):
        #         new[('editable', attr[1])] =
        #           example['flat_state'][attr] == ''

        #         for attr2 in example['flat_state']:
        #             if (isinstance(attr2, tuple) and attr2[0] == 'value'):
        #                 if (attr2 == attr or attr < attr2 or
        #                     (example['flat_state'][attr] == "" or
        #                      example['flat_state'][attr2] == "")):
        #                     continue
        #                 if ((example['flat_state'][attr] ==
        #                      example['flat_state'][attr2])):
        #                     new[('eq', attr, attr2)] = True

        # example['flat_state'].update(new)

        kb = FoPlanner([(self.ground(a),
                         example['flat_state'][a].replace('?', 'QM') if
                         isinstance(example['flat_state'][a], str) else
                         example['flat_state'][a])
                        for a in example['flat_state']],
                       featuresets[self.action_set])
        kb.fc_infer(depth=1, epsilon=epsilon)
        example['flat_state'] = {self.unground(a): v.replace("QM", "?") if
                                 isinstance(v, str) else v for a, v in
                                 kb.facts}

        # pprint(example['flat_state'])

        if label not in self.skills:
            self.skills[label] = {}

        explainations = []
        secondary_explainations = []

        # the base explaination (the constants)
        input_args = tuple(sorted([arg for arg in inputs]))
        sai = ('sai', selection, action, *[inputs[a] for a in input_args])

        # Need to do stuff with features here too.

        # used for grounding out plans, don't need to build up each time.
        # print(functionsets[self.action_set])
        kb = FoPlanner([(self.ground(a),
                         example['flat_state'][a].replace('?', 'QM') if
                         isinstance(example['flat_state'][a], str) else
                         example['flat_state'][a])
                        for a in example['flat_state']],
                       functionsets[self.action_set])
        kb.fc_infer(depth=search_depth, epsilon=epsilon)
        # FACTS AFTER USING FUNCTIONS.
        # pprint(kb.facts)

        for exp, iargs in self.skills[label]:
            # kb = FoPlanner([(self.ground(a),
            #                  example['flat_state'][a].replace('?', 'QM') if
            #                  isinstance(example['flat_state'][a], str) else
            #                  example['flat_state'][a])
            #                 for a in example['flat_state']],
            #                functionsets[self.action_set])
            for m in self.explains_sai(kb, exp, sai):
                # print("COVERED", exp, m)

                # Need to check if it would have been actully generated
                # under where and when.

                r_exp = self.unground(list(rename_flat({exp: True}, m))[0])

                args = self.get_vars(exp)

                if len(args) != len(m):
                    # print("EXP not same length")
                    continue

                grounded = True
                for ele in m:
                    if not isinstance(m[ele], str):
                        grounded = False
                        break
                if not grounded:
                    # print("Pattern not fully grounded")
                    continue

                # foa_vmapping = {field: '?foa%s' % j
                #                 for j, field in enumerate(args)}
                # foa_mapping = {field: 'foa%s' % j for j, field in
                #                enumerate(args)}

                t = tuple([m["?foa%s" % i].replace("QM", "?") for i in
                           range(len(m))])

                if len(t) != len(set(t)):
                    # print("TWO VARS BOUND TO SAME")
                    continue

                secondary_explainations.append(r_exp)

                # print("This is my T:", t)

                skill_where = self.skills[label][(exp, iargs)]['where']
                if not skill_where.check_match(t, example['flat_state']):
                    continue

                # print("####### SUCCESSFUL WHERE MATCH########")

                # x = rename_flat(example['flat_state'], foa_mapping)
                # c = self.skills[label][(exp, iargs)]['when'].categorize(x)
                # if not c.predict('correct'):
                #     continue

                # print("ADDING", r_exp)
                explainations.append(r_exp)

        if len(explainations) == 0 and len(secondary_explainations) > 0:
            explainations.append(choice(secondary_explainations))
            # explainations.append(secondary_explanation)

        elif len(explainations) == 0:
            # kb = FoPlanner([(self.ground(a),
            #                  example['flat_state'][a].replace('?', 'QM') if
            #                  isinstance(example['flat_state'][a], str) else
            #                  example['flat_state'][a])
            #                 for a in example['flat_state']],
            #                functionsets[self.action_set])

            selection_exp = selection
            for sel_match in kb.fc_query([('?selection', selection)],
                                         max_depth=0,
                                         epsilon=epsilon):
                selection_exp = sel_match['?selection']
                break

            input_exps = []

            for a in input_args:
                iv = inputs[a]
                # kb = FoPlanner([(self.ground(a),
                #         example['flat_state'][a].replace('?', 'QM') if
                #         isinstance(example['flat_state'][a], str) else
                #         example['flat_state'][a])
                #        for a in example['flat_state']],
                # functionsets[self.action_set])
                input_exp = iv
                # print('trying to explain', [((a, '?input'), iv)])

                # TODO not sure what the best approach is for choosing among
                # the possible explanations. Perhaps we should choose more than
                # one. Maybe the shortest (less deep).

                # f = False
                possible = []
                for iv_m in kb.fc_query([((a, '?input'), iv)],
                                        max_depth=0,
                                        epsilon=epsilon):

                    # input_exp = (a, iv_m['?input'])
                    possible.append((a, iv_m['?input']))
                    # print("FOUND!", input_exp)
                    # f = True
                    # break

                possible = [(self.compute_exp_depth(p), random(), p) for p in
                            possible]
                possible.sort()
                # print("FOUND!")
                # pprint(possible)

                if len(possible) > 0:
                    _, _, input_exp = possible[0]
                    # input_exp = choice(possible)

                # if not f:
                #     print()
                #     print("FAILED TO EXPLAIN INPUT PRINTING GOAL AND FACTS")
                #     print("GOAL:", ((a, '?input'), iv))

                #     for f in kb.facts:
                #         if f[0][0] == 'value':
                #             print(f)
                #     from time import sleep
                #     sleep(30)

                #     # pprint(kb.facts)

                input_exps.append(input_exp)

            explainations.append(self.unground(('sai', selection_exp, action,
                                                *input_exps)))

        for exp in explainations:
            args = self.get_vars(exp)
            foa_vmapping = {field: '?foa%s' % j
                            for j, field in enumerate(args)}
            foa_mapping = {field: 'foa%s' % j for j, field in enumerate(args)}
            x = rename_flat({exp: True}, foa_vmapping)
            r_exp = (list(x)[0], input_args)
            # r_exp = self.replace_vars(exp)
            # print("REPLACED")
            # print(exp)
            # print(r_exp)

            if r_exp not in self.skills[label]:
                # mg_h = self.extract_mg_h(r_exp[0])

                if self.action_set == "tutor knowledge":
                    constraints = self.generate_tutor_constraints(r_exp[0])
                else:
                    constraints = self.extract_mg_h(r_exp[0])

                # print("ACTIONSET")
                # print(self.action_set)

                # print("SAI")
                # print(r_exp[0])

                print("CONSTRAINTS")
                print(constraints)

                w_args = tuple(['?foa%s' % j for j, _ in enumerate(args)])

                self.skills[label][r_exp] = {}
                where_inst = self.where(args=w_args, constraints=constraints)
                self.skills[label][r_exp]['where'] = where_inst
                # initial_h=mg_h)
                self.skills[label][r_exp]['when'] = when_learners[self.when]()

            # print('where learning for ', exp)
            self.skills[label][r_exp]['where'].ifit(args,
                                                    example['flat_state'],
                                                    example['correct'])
            # print('done where learning')

            # TODO
            # Need to add computed features.
            # need to rename example with foa's that are not variables
            x = rename_flat(example['flat_state'], foa_mapping)
            # x['correct'] = example['correct']

            # print('ifitting')
            # pprint(x)
            # self.skills[label][r_exp]['when'].ifit(x)
            self.skills[label][r_exp]['when'].ifit(x, example['correct'])
    def request(self, state):
        # print(state)
        # print("REQUEST RECEIVED")
        tup = Tuplizer()
        flt = Flattener()

        state = flt.transform(tup.transform(state))

        # new = {}
        # for attr in state:
        #     if (isinstance(attr, tuple) and attr[0] == 'value'):
        #         new[('editable', attr[1])] = state[attr] == ''
        #         for attr2 in state:
        #             if (isinstance(attr2, tuple) and attr2[0] == 'value'):
        #                 if (attr2 == attr or attr < attr2 or
        #                     (state[attr] == "" or state[attr2] == "")):
        #                     continue
        #                 if (state[attr] == state[attr2]):
        #                     new[('eq', attr, attr2)] = True
        # state.update(new)

        kb = FoPlanner([(self.ground(a),
                         state[a].replace('?', 'QM') if
                         isinstance(state[a], str) else
                         state[a])
                        for a in state], featuresets[self.action_set])
        kb.fc_infer(depth=1, epsilon=epsilon)
        state = {self.unground(a): v.replace("QM", "?") if isinstance(v, str)
                 else v for a, v in kb.facts}

        # pprint(state)

        # compute features

        # for attr, value in self.compute_features(state):
        #     state[attr] = value

        skillset = []
        for label in self.skills:
            for exp in self.skills[label]:
                pos = self.skills[label][exp]['where'].num_pos()
                neg = self.skills[label][exp]['where'].num_neg()

                skillset.append((pos / (pos + neg), pos + neg,
                                 random(), label, exp,
                                 self.skills[label][exp]))
        skillset.sort(reverse=True)

        # print('####SKILLSET####')
        pprint(skillset)
        # print('####SKILLSET####')

        # used for grounding out plans, don't need to build up each time.
        kb = FoPlanner([(self.ground(a),
                         state[a].replace('?', 'QM') if
                         isinstance(state[a], str) else
                         state[a])
                        for a in state], functionsets[self.action_set])
        kb.fc_infer(depth=search_depth, epsilon=epsilon)

        # print(kb)

        for _, _, _, label, (exp, input_args), skill in skillset:

            # print()
            # print("TRYING:", label, exp)

            # print("Conditions:")
            # pprint(skill['where'].operator.conditions)

            # Continue until we either do something with the rule. For example,
            # generate an SAI or determine that the rule doesn't match. If we
            # get some kind of failure, such as being unable to execute an
            # action sequence, then we want to learn from that and try again.
            failed = True

            while failed:

                failed = False
                for m in skill['where'].get_matches(state, epsilon=epsilon):
                    if len(m) != len(set(m)):
                        # print("GENERATED MATCH WITH TWO VARS BOUND TO ",
                        #       "SAME THING")
                        continue

                    # print("MATCH FOUND", label, exp, m)
                    vmapping = {'?foa' + str(i): ele
                                for i, ele in enumerate(m)}
                    mapping = {'foa' + str(i): ele
                               for i, ele in enumerate(m)}

                    r_exp = list(rename_flat({exp: True}, vmapping))[0]
                    r_state = rename_flat(state,
                                          {mapping[a]: a for a in mapping})

                    # pprint(r_state)

                    # pprint(r_state)

                    rg_exp = []
                    for ele in r_exp:
                        if isinstance(ele, tuple):
                            # kb = FoPlanner([(self.ground(a),
                            #                  state[a].replace('?', 'QM') if
                            #                  isinstance(state[a], str) else
                            #                  state[a])
                            #                 for a in state],
                            #                functionsets[self.action_set])
                            for vm in kb.fc_query([(self.ground(ele), '?v')],
                                                  max_depth=0,
                                                  epsilon=epsilon):
                                # if vm['?v'] == '':
                                #     raise Exception("Should not be an"
                                #                     " empty str")
                                if vm['?v'] != '':
                                    rg_exp.append(vm['?v'])
                                break
                        else:
                            rg_exp.append(ele)

                    if len(rg_exp) != len(r_exp):
                        # print("FAILED TO FIRE RULE")
                        # print(rg_exp, 'from', r_exp)
                        continue

                        # # add neg to where
                        # skill['where'].ifit(m, state, 0)

                        # # add neg to when
                        # foa_mapping = {field: 'foa%s' % j for j, field in
                        #                enumerate(m)}
                        # neg_x = rename_flat(state, foa_mapping)
                        # skill['when'].ifit(neg_x)

                        # failed = True
                        # break

                    # print("predicting")
                    # pprint(r_state)

                    # c = skill['when'].categorize(r_state)
                    p = skill['when'].predict([r_state])[0]

                    # print("###CATEGORIZED CONCEPT###")
                    # print(c)
                    # pprint(c.av_counts)
                    # print(c.predict('correct'))

                    if p == 0:
                        # print("predicting FAIL")
                        continue
                    # print("predicting FIRE")

                    # if not c.predict('correct'):
                    #     print("predicting FAIL")
                    #     continue
                    # print("predicting FIRE")

                    # print("###TREE###")
                    # print(skill['when'])

                    # pprint(r_exp)
                    # pprint(rg_exp)

                    # assert self.explains_sai(kb, r_exp, rg_exp)

                    response = {}
                    response['label'] = label
                    response['selection'] = rg_exp[1]
                    response['action'] = rg_exp[2]
                    response['inputs'] = {a: rg_exp[3+i] for i, a in
                                          enumerate(input_args)}
                    # response['inputs'] = list(rg_exp[3:])
                    response['foas'] = []
                    # pprint(response)
                    return response

        return {}
 def predict(self, X):
     ft = Flattener()
     tup = Tuplizer()
     X = [tup.undo_transform(ft.transform(x)) for x in X]
     return super(CustomPipeline, self).predict(X)
    def request(self, state):
        ff = Flattener()
        tup = Tuplizer()

        state = tup.transform(state)
        state = ff.transform(state)

        pprint(state)

        # foa_state = {attr: state[attr] for attr in state
        #              if (isinstance(attr, tuple) and attr[0] != "value") or
        #              not isinstance(attr, tuple)}

        # This is basically conflict resolution here.
        skills = []
        for label in self.skills:
            for seq in self.skills[label]:
                corrects = self.skills[label][seq]['correct']
                accuracy = sum(corrects) / len(corrects)
                s = ((label, seq), accuracy)
                # s = (random(), len(corrects), label, seq)
                skills.append(s)
        # skills.sort(reverse=True)

        # for _,_,label,seq in skills:
        #     s = self.skills[label][seq]
        #     print(str(seq))

        while len(skills) > 0:
            # probability matching (with accuracy) conflict resolution
            # (label, seq), accuracy = weighted_choice(skills)

            # random conflict resolution
            (label, seq), accuracy = choice(skills)

            skills.remove(((label, seq), accuracy))

            s = self.skills[label][seq]

            for m in s['where_classifier'].get_matches(state):
                print("MATCH", label, m)
                if isinstance(m, tuple):
                    mapping = {
                        "?foa%i" % i: str(ele)
                        for i, ele in enumerate(m)
                    }
                else:
                    mapping = {'?foa0': m}
                # print('trying', m)

                if state[('value', mapping['?foa0'])] != "":
                    # print('no selection')
                    continue

                limited_state = {}
                for foa in mapping:
                    limited_state[('name', foa)] = state[('name',
                                                          mapping[foa])]
                    limited_state[('value', foa)] = state[('value',
                                                           mapping[foa])]

                try:
                    print('SEQ:', seq)
                    grounded_plan = tuple([
                        self.planner.execute_plan(ele, limited_state)
                        for ele in seq
                    ])
                except Exception as e:
                    # print('plan could not execute')
                    # pprint(limited_state)
                    # print("EXECPTION WITH", e)
                    continue

                vX = {}
                for foa in mapping:
                    vX[('value', foa)] = state[('value', mapping[foa])]

                if self.what:
                    what_features = {}
                    for attr in vX:
                        if isinstance(vX[attr], str) and vX[attr] != "":
                            seq = [
                                c for c in vX[attr].lower().replace(
                                    '"', "").replace("\\", "")
                            ]
                            # print(seq)
                            # print(self.what.parse(seq))
                            new_what_f = self.what.get_features(attr, seq)
                            for attr in new_what_f:
                                what_features[attr] = new_what_f[attr]
                # what_training += [x[attr] for attr in x if isinstance(x[attr],
                #                                                       str) and
                #                  x[attr] != ""]

                vX.update(
                    compute_features(vX, self.action_set.get_feature_dict()))

                if self.what:
                    vX.update(what_features)
                # for attr, value in self.compute_features(vX, features):
                #     vX[attr] = value
                # for foa in mapping:
                #     vX[('name', foa)] = state[('name', mapping[foa])]

                vX = tup.undo_transform(vX)

                print("WHEN PREDICTION STATE")
                pprint(vX)
                when_pred = s['when_classifier'].predict([vX])[0]
                # print(label, seq, s['when_classifier'])
                # pprint(when_pred)

                if when_pred == 0:
                    continue

                # pprint(limited_state)
                print("FOUND SKILL MATCH!")
                # pprint(limited_state)
                # pprint(seq)
                pprint(grounded_plan)

                response = {}
                response['label'] = label
                response['selection'] = grounded_plan[2]
                response['action'] = grounded_plan[1]
                # response['inputs'] = list(grounded_plan[3:])

                # TODO replace value here with input_args, which need to be
                # tracked.
                if grounded_plan[2] == 'done':
                    response['inputs'] = {}
                else:
                    response['inputs'] = {
                        a: grounded_plan[3 + i]
                        for i, a in enumerate(['value'])
                    }
                response['foas'] = []
                # response['foas'].append("|" +
                #                         limited_state[("name", "?foa0")] +
                #                         "|" + grounded_plan[3])
                for i in range(1, len(mapping)):
                    response['foas'].append(limited_state[("name",
                                                           "?foa%i" % i)])
                    # response['foas'].append("|" +
                    #                         limited_state[("name", "?foa%i" % i)]
                    #                         +
                    #                         "|" +
                    #                         limited_state[('value', "?foa%i" % i)])

                pprint(response)
                return response

            # import time
            # time.sleep(5)

        return {}
def pre_process(instance):
    """
    Runs all of the pre-processing functions

    >>> _reset_gensym()
    >>> import pprint
    >>> instance = {"noma":"a","num3":3,"compa":{"nomb":"b","num4":4,"sub":{"nomc":"c","num5":5}},"compb":{"nomd":"d","nome":"e"},"(related compa.num4 compb.nome)":True,"list1":["a","b",{"i":1,"j":12.3,"k":"test"}]}
    >>> pprint.pprint(instance)
    {'(related compa.num4 compb.nome)': True,
     'compa': {'nomb': 'b', 'num4': 4, 'sub': {'nomc': 'c', 'num5': 5}},
     'compb': {'nomd': 'd', 'nome': 'e'},
     'list1': ['a', 'b', {'i': 1, 'j': 12.3, 'k': 'test'}],
     'noma': 'a',
     'num3': 3}

    >>> instance = pre_process(instance)
    >>> pprint.pprint(instance)
    {'noma': 'a',
     'num3': 3,
     ('has-component', 'compa', 'sub'): True,
     ('has-element', 'list1', '?o4'): True,
     ('has-element', 'list1', '?o5'): True,
     ('has-element', 'list1', '?o6'): True,
     ('i', '?o6'): 1,
     ('j', '?o6'): 12.3,
     ('k', '?o6'): 'test',
     ('nomb', 'compa'): 'b',
     ('nomc', 'sub'): 'c',
     ('nomd', 'compb'): 'd',
     ('nome', 'compb'): 'e',
     ('num4', 'compa'): 4,
     ('num5', 'sub'): 5,
     ('ordered-list', 'list1', '?o4', '?o5'): True,
     ('ordered-list', 'list1', '?o5', '?o6'): True,
     ('related', 'compa.num4', 'compb.nome'): True,
     ('val', '?o4'): 'a',
     ('val', '?o5'): 'b'}

    >>> instance = pre_process(instance)
    >>> pprint.pprint(instance)
    {'noma': 'a',
     'num3': 3,
     ('has-component', 'compa', 'sub'): True,
     ('has-element', 'list1', '?o4'): True,
     ('has-element', 'list1', '?o5'): True,
     ('has-element', 'list1', '?o6'): True,
     ('i', '?o6'): 1,
     ('j', '?o6'): 12.3,
     ('k', '?o6'): 'test',
     ('nomb', 'compa'): 'b',
     ('nomc', 'sub'): 'c',
     ('nomd', 'compb'): 'd',
     ('nome', 'compb'): 'e',
     ('num4', 'compa'): 4,
     ('num5', 'sub'): 5,
     ('ordered-list', 'list1', '?o4', '?o5'): True,
     ('ordered-list', 'list1', '?o5', '?o6'): True,
     ('related', 'compa.num4', 'compb.nome'): True,
     ('val', '?o4'): 'a',
     ('val', '?o5'): 'b'}
    
    """
    tuplizer = Tuplizer()
    instance = tuplizer.transform(instance)

    list_processor = ListProcessor()
    instance = list_processor.transform(instance)

    standardizer = NameStandardizer()
    instance = standardizer.transform(instance)
    
    sub_component_processor = SubComponentProcessor()
    instance = sub_component_processor.transform(instance)

    flattener = Flattener()
    instance = flattener.transform(instance)

    return instance
Beispiel #18
0
    def request(self, state):
        tup = Tuplizer()
        flt = Flattener()
        state = flt.transform(tup.transform(state))

        new = {}
        for attr in state:
            if (isinstance(attr, tuple) and attr[0] == 'value'):
                new[('editable', attr[1])] = state[attr] == ''
                for attr2 in state:
                    if (isinstance(attr2, tuple) and attr2[0] == 'value'):
                        if (attr2 == attr or attr < attr2
                                or (state[attr] == "" or state[attr2] == "")):
                            continue
                        if (state[attr] == state[attr2]):
                            new[('eq', attr, attr2)] = True
        state.update(new)
        # pprint(state)

        # for episode in range(self.max_episodes):
        while True:

            print("#########")
            print("NEW TRACE")
            print("#########")
            kb = FoPlanner([(self.ground(a), state[a].replace('?', 'QM')
                             if isinstance(state[a], str) else state[a])
                            for a in state], functionsets[self.action_set])

            curr_state = {x[0]: x[1] for x in kb.facts}
            next_actions = [a for a in kb.fc_get_actions(epsilon=epsilon)]
            trace_actions = []
            depth = 0

            while depth < search_depth:
                actions = [(self.Q.evaluate(curr_state, get_action_key(a)), a)
                           for a in next_actions]

                print("NEXT ACTION WEIGHTS")
                print(
                    sorted([(w, a[0].name[0]) for w, a in actions],
                           reverse=True))

                # operator, mapping, effects = weighted_choice(actions)
                operator, mapping, effects = max_choice(actions)
                # operator, mapping, effects = choice(action_space)

                self.last_state = curr_state
                self.last_action = get_action_key((operator, mapping, effects))
                trace_actions.append(subst(mapping, operator.name))

                for f in effects:
                    kb.add_fact(f)

                # if not termainal, then decrease reward
                # self.reward = -1
                self.reward = 0
                curr_state = {x[0]: x[1] for x in kb.facts}
                depth += 1

                # check if we're in a terminal state
                # if so, query oracle
                for f in effects:
                    f = self.unground(f)
                    if f[0] == 'sai':
                        response = {}
                        response['label'] = str(trace_actions)
                        response['selection'] = f[1]
                        response['action'] = f[2]
                        response['inputs'] = {'value': f[3]}
                        # {a: rg_exp[3+i] for i, a in
                        #                       enumerate(input_args)}
                        # response['inputs'] = list(rg_exp[3:])
                        response['foas'] = []
                        # pprint(response)
                        print("EXECUTING ACTION", self.last_action)
                        print("Requesting oracle feedback")

                        return response

                # punish for failed search
                if depth >= search_depth:
                    # self.reward -= 3 * search_depth
                    curr_state = None
                    next_actions = []
                else:
                    # because we're not terminal we can compute next_actions
                    next_actions = [
                        a for a in kb.fc_get_actions(epsilon=epsilon,
                                                     must_match=effects)
                    ]

                self.Q.update(self.last_state, self.last_action, self.reward,
                              curr_state, next_actions)
    def train(self, state, label, foas, selection, action, inputs, correct):

        # create example dict
        example = {}
        example['state'] = state
        example['label'] = label
        example['selection'] = selection
        example['action'] = action
        example['inputs'] = inputs
        example['correct'] = correct
        example['foa_args'] = tuple(['?obj-' + foa for foa in foas])
        example['foa_names'] = {("name", "?foa%i" % i): foa
                                for i, foa in enumerate(foas)}

        example['foa_values'] = {("value", "?foa%i" % i):
                                 state['?obj-' + foa]['value']
                                 for i, foa in enumerate(foas)}
        example['foa_values'][("value", "?foa0")] = ""
        example['limited_state'] = {("value", "?foa%i" % i):
                                    state['?obj-' + foa]['value']
                                    for i, foa in enumerate(foas)}
        example['limited_state'][("value", "?foa0")] = ""
        for attr in example['foa_names']:
            example['limited_state'][attr] = example['foa_names'][attr]

        tup = Tuplizer()
        flt = Flattener()
        # pprint(state)
        example['flat_state'] = flt.transform(tup.transform(state))

        # pprint(example['flat_state'])
        # import time
        # time.sleep(1000)

        # pprint(example)

        if label not in self.skills:
            self.skills[label] = {}

        # add example to examples
        if label not in self.examples:
            self.examples[label] = []
        self.examples[label].append(example)

        if label not in self.how_instances:
            self.how_instances[label] = self.how(planner=self.planner)
        # how = self.how(functions=functions, how_params=self.how_params)
        how_result = self.how_instances[label].ifit(example)

        # print(len(self.examples[label]))
        # for exp in how_result:
        #     correctness = [e['correct'] for e in how_result[exp]]
        #     print(label, len(correctness), sum(correctness) /
        #           len(correctness) , exp)
        # print()

        # act_plan = ActionPlanner(actions=functions,
        #                          act_params=self.how_params)
        # explanations = []

        # for exp in self.skills[label]:
        #     #print("CHECKING EXPLANATION", exp)
        #     try:
        #         grounded_plan = tuple([act_plan.execute_plan(ele,
        #                                 example['limited_state'])
        #                                for ele in exp])
        #         if act_plan.is_sais_equal(grounded_plan, sai):
        #             #print("found existing explanation")
        #             explanations.append(exp)
        #     except Exception as e:
        #         #print("EXECPTION WITH", e)
        #         continue

        # if len(explanations) == 0:
        #     explanations = act_plan.explain_sai(example['limited_state'],
        #                                         sai)

        # #print("EXPLANATIONS")
        # #pprint(explanations)

        # first delete old skill description
        del self.skills[label]
        self.skills[label] = {}

        # build new skill descriptions
        for exp in how_result:
            print('EXP', exp, correct)
            self.skills[label][exp] = {}
            self.skills[label][exp]['args'] = []
            self.skills[label][exp]['foa_states'] = []
            self.skills[label][exp]['examples'] = []
            self.skills[label][exp]['correct'] = []
            where = self.where()
            when = when_learners[self.when](self.when_params)
            # when = Pipeline([('dict_vect', DictVectorizer(sparse=False)),
            #                  ('clf', self.when())])
            self.skills[label][exp]['where_classifier'] = where
            self.skills[label][exp]['when_classifier'] = when

            for e in how_result[exp]:
                self.skills[label][exp]['args'].append(e['foa_args'])
                self.skills[label][exp]['examples'].append(e)
                self.skills[label][exp]['correct'].append(int(e['correct']))

            T = self.skills[label][exp]['args']
            y = self.skills[label][exp]['correct']

            # foa_state = {attr: example['flat_state'][attr]
            #              for attr in example['flat_state']
            #              #if (isinstance(attr, tuple) and attr[0] != "value") or
            #              #not isinstance(attr, tuple)
            #             }
            # print("FOA STATE")
            # pprint(T[0])
            # pprint(foa_state)

            # structural_X = [e['flat_state'] for e in
            #                 self.skills[label][exp]['examples']]

            #  Should rewrite this so that I use the right values.
            # structural_X = [foa_state for t in T]
            # structural_X = self.skills[label][exp]['foa_states']

            structural_X = []
            for i, e in enumerate(self.skills[label][exp]['examples']):
                x = {attr: e['flat_state'][attr] for attr in e['flat_state']}
                # x_vals = {a: x[a] for a in x if isinstance(a, tuple) and a[0] ==
                #         "value" and a[1] in self.skills[label][exp]['args'][i]}

                # print("COMPUTED FEATURES")
                # pprint([a for a in
                #         compute_features(x_vals,self.action_set.get_feature_dict())])
                # x.update(compute_features(x_vals,self.action_set.get_feature_dict()))
                # pprint(x)
                structural_X.append(x)

            value_X = []
            for e in self.skills[label][exp]['examples']:
                x = {attr: e['foa_values'][attr] for attr in e['foa_values']}

                if self.what:
                    what_features = {}
                    for attr in x:
                        if isinstance(x[attr], str) and x[attr] != "":
                            seq = [
                                c for c in x[attr].lower().replace(
                                    '"', "").replace("\\", "")
                            ]
                            # print(seq)
                            # print(self.what.parse(seq))
                            new_what_f = self.what.get_features(attr, seq)
                            for attr in new_what_f:
                                what_features[attr] = new_what_f[attr]

                x.update(
                    compute_features(x, self.action_set.get_feature_dict()))

                if self.what:
                    x.update(what_features)

                # for attr, value in self.compute_features(x, features):
                #     x[attr] = value
                # for attr in e['foa_names']:
                #     x[attr] = e['foa_names'][attr]
                x = tup.undo_transform(x)

                pprint(x)
                value_X.append(x)

                #if example['label'] == "convert-different-num2":
                #    print("CORRECTNESS:", e['correct'])
                #    pprint(x)

            self.skills[label][exp]['where_classifier'].fit(T, structural_X, y)
            self.skills[label][exp]['when_classifier'].fit(value_X, y)
    def train(self, state, selection, action, inputs, reward, skill_label,
              foci_of_attention):
        """
        Doc String
        """

        # print('\n'*5)
        # print('state', skill_label)
        # print('skill_label', skill_label)
        # print('selection', selection)
        # print('action', action)
        # print('inputs', inputs)
        # print('reward', reward)
        # print('state')
        # pprint(state)

        # label = 'math'

        # create example dict
        example = {}
        example['state'] = state
        example['skill_label'] = skill_label
        example['selection'] = selection
        example['action'] = action
        example['inputs'] = inputs
        example['reward'] = float(reward)

        tup = Tuplizer()
        flt = Flattener()
        example['flat_state'] = flt.transform(tup.transform(state))

        knowledge_base = FoPlanner(
            [(ground(a),
              example['flat_state'][a].replace('?', 'QM') if isinstance(
                  example['flat_state'][a], str) else example['flat_state'][a])
             for a in example['flat_state']], self.feature_set)

        knowledge_base.fc_infer(depth=1, epsilon=self.epsilon)

        example['flat_state'] = {
            unground(a): v.replace("QM", "?") if isinstance(v, str) else v
            for a, v in knowledge_base.facts
        }

        if skill_label not in self.skills:
            self.skills[skill_label] = {}

        explanations = []
        secondary_explanations = []

        # the base explaination (the constants)
        input_args = tuple(sorted([arg for arg in inputs]))
        sai = ('sai', selection, action, *[inputs[a] for a in input_args])

        # Need to do stuff with features here too.

        # used for grounding out plans, don't need to build up each time.
        # print(function_sets[self.action_set])

        # knowledge_base = FoPlanner([(ground(a),
        #                              example['flat_state'][a].replace('?', 'QM')
        #                              if isinstance(example['flat_state'][a], str)
        #                              else example['flat_state'][a])
        #                             for a in example['flat_state']],
        #                            self.function_set)
        # knowledge_base.fc_infer(depth=self.search_depth, epsilon=self.epsilon)

        # FACTS AFTER USING FUNCTIONS.
        # pprint(knowledge_base.facts)

        #DANNY: The gist of this is to find_ applicable_skills (explanations because the inputs/outputs are literals)
        for skill, learner_dict in self.skills[skill_label].items():
            exp, iargs = skill
            for match in self.explain_sai(skill, learner_dict, sai,
                                          knowledge_base,
                                          example['flat_state']):

                # print("MATCH", match)
                # print("COVERED", exp, m)

                #DANNY NOTES:
                #sai = ('sai', 'JCommTable5.R0C0', 'UpdateTextArea', '16')
                #exp  =  ('sai', ('id', '?foa0'), 'UpdateTextArea', ('value', ('Multiply', ('value', '?foa1'), ('value', '?foa2'))))
                #r_exp  =  the explanation renamed with the match literals
                #ARGS = A list of input arguements to the explanations
                #IARGS: = A tuple of outputs? Or maybe a set of property names (i.e. 'value').
                #Match = A dictionary mapping from ?foas to element strings (the element names have QM instead of ?)

                r_exp = unground(list(rename_flat({exp: True}, match))[0])
                args = get_vars(exp)

                # print("sai:", sai)
                # print("exp:", exp)
                # print("r_exp:", r_exp)
                # print("ARGS:", args)
                # print("IARGS:", iargs, type(iargs))
                # print("match", match)

                # Need to check if it would have been actully generated
                # under where and when.
                # Danny: Makes sure that there is a match for every arguement
                if len(args) != len(match):
                    continue

                # print("HERE1")

                #DANNY: Checks that the match resolves to string elements
                grounded = True
                for ele in match:
                    if not isinstance(match[ele], str):
                        grounded = False
                        break
                if not grounded:
                    #DANNY: Doesn't really happen... not sure in what circumstances this would happen
                    # print("BAD MATCH: ", match)
                    continue

                # print("HERE2")

                tup = tuple([
                    match["?foa%s" % i].replace("QM", "?")
                    for i in range(len(match))
                ])

                # print("tup:", tup)

                #tup = a tuple of the matches
                #Danny: Makes sure that the explanation hasn't used an foa twice
                if len(tup) != len(set(tup)):
                    continue

                # print("HERE3")

                secondary_explanations.append(r_exp)

                #Danny: Check that the where learner approves of the match
                #       It seems like the where learner should have just generated this if it was going to check it anyway
                #       This is only at the end it seems to allow for secondary explanations which the where learner is not yet aware of
                # print("A", self.skills[skill_label])
                # print("B", self.skills[skill_label][(exp, iargs)])
                skill_where = self.skills[skill_label][(exp, iargs)]['where']
                if not skill_where.check_match(tup, example['flat_state']):
                    continue

                # print("ADDING", r_exp)
                explanations.append(r_exp)

        if len(explanations) == 0 and len(secondary_explanations) > 0:
            explanations.append(choice(secondary_explanations))

        elif len(explanations) == 0:

            explanations = self.explanations_from_how_search(
                example['flat_state'], ('sai', selection, action, inputs),
                input_args)

        #Danny: Do the training for all the applicable explanations
        # print("EXPLAINS", explanations)
        for exp in explanations:
            args = get_vars(exp)
            foa_vmapping = {
                field: '?foa%s' % j
                for j, field in enumerate(args)
            }
            foa_mapping = {field: 'foa%s' % j for j, field in enumerate(args)}
            r_exp = (list(rename_flat({exp: True},
                                      foa_vmapping))[0], input_args)

            if r_exp not in self.skills[skill_label]:

                # TODO - Hack for specific action set
                # if self.action_set == "tutor knowledge":
                #     constraints = self.generate_tutor_constraints(r_exp[0])
                # else:
                #     constraints = self.extract_mg_h(r_exp[0])
                # constraints = extract_mg_h(r_exp[0])
                constraints = generate_html_tutor_constraints(r_exp[0])

                # print("CONSTRAINTS")
                # print(constraints)

                w_args = tuple(['?foa%s' % j for j, _ in enumerate(args)])

                self.skills[skill_label][r_exp] = {}
                where_inst = self.where(args=w_args, constraints=constraints)
                self.skills[skill_label][r_exp]['where'] = where_inst
                self.skills[skill_label][r_exp]['when'] = self.when()

            self.skills[skill_label][r_exp]['where'].ifit(
                args, example['flat_state'], example['reward'])
            # print('done where learning')

            # TODO
            # Need to add computed features.
            # need to rename example with foa's that are not variables
            r_flat = rename_flat(example['flat_state'], foa_mapping)

            self.skills[skill_label][r_exp]['when'].ifit(
                r_flat, example['reward'])
Beispiel #21
0
def flatten_state(state):
    tup = Tuplizer()
    flt = Flattener()
    state = flt.transform(tup.transform(state))
    return state
Beispiel #22
0
    #    setup += "from __main__ import random_instance\n"
    #    setup += "from __main__ import test\n"
    #    setup += "c = random_concept(1, %i)\n" % i
    #    setup += "i = random_instance(%i)\n" % i
    #
    #    for j in range(10):
    #        print("%i\t%0.3f" % (i, timeit.timeit("test(c,i)", setup=setup,
    #                                         number=10)))

    num_c_inst = 1
    num_objs = 20

    concept = random_concept(num_instances=num_c_inst, num_objects=num_objs)
    instance = random_instance(num_objects=num_objs)

    pl = Pipeline(Tuplizer(), SubComponentProcessor(), Flattener())

    #i = sm.transform(pl.transform(subconcept.av_counts))
    #print("STRUCTURE MAPPED INSTANCE")
    #print(i)

    pipeline = Pipeline(Tuplizer(), NameStandardizer(concept.tree.gensym),
                        SubComponentProcessor(), Flattener())
    #ns = NameStandardizer(concept.tree.gensym)

    #pprint(subconcept.av_counts)

    #instance = ns.transform(subconcept.av_counts)
    instance = pipeline.transform(random_instance(num_objects=num_objs))

    inames = frozenset(get_component_names(instance))