def preprocess(self, corpus_dict):
        # raise NotImplementedError
        Xall = {f: [] for f in self.features}
        Yall = {f: [] for f in self.features}

        for context in corpus_dict:
            context.init_spatial_model(self.sp_model)
            for obj, utt in corpus_dict[context]:
                working_set = context
                features_used = set([t[0] for t in utt])

                for f in self.features:
                    dscore, label, conf, res_objs = self.produce_input(
                        f, obj, context)
                    if conf:
                        x = [dscore, conf]
                    else:
                        x = [dscore]
                    y = (f in features_used)
                    Xall[f].append(x)
                    Yall[f].append(y)

                    if y:
                        working_set = AdaptiveContext(res_objs)
                        working_set.init_spatial_model(self.sp_model)
        return [Xall], [Yall]
def bootstrap_env_info():
    # if rospy.get_param('hrc_discrim_learning/use_perception'):
    if False:
        # TODO: implement
        pass

    else:
        filename = rospy.get_param('/hrc_discrim_learning/env_file')
        # filename = "/ros/catkin_ws/src/hrc_discrim_learning/train/full_envs.json"

        with open(filename, 'r') as f:
            all_contexts = json.load(f)

        for context_name in all_contexts:
            env_obj_list = []
            all_objs = all_contexts[context_name]
            for obj_id in all_objs:
                features = all_objs[obj_id]
                features['id'] = int(obj_id)
                o = Object(features)
                env_obj_list.append(o)

            yield AdaptiveContext(env_obj_list, context_name)

        while True:
            yield None
    def predict(self, obj, context):
        # NEW
        features = self.features

        # type is added as a given
        type = obj.get_feature_class_value("type")
        output = ''
        context.init_spatial_model(self.sp_model)

        while True:
            feature, label, new_context = self._incremental_predict(
                obj, context, features)
            # print(feature, label)

            if feature:
                output += (label + ' ')
                context = new_context
                features = list(set(features) - set([feature]))
                # print(features)

            else:
                break
        # print("Finished initial walkthrough with results: ", output)
        if output == "marker red ":
            # import pdb; pdb.set_trace()
            pass

        # add type
        type_context_objs, c = context.shared_features("type", type)
        context = AdaptiveContext(type_context_objs)
        context.init_spatial_model(self.sp_model)

        for f in self.salience_ranking:
            if context.env_size <= 1:
                break

            fval = context.get_obj_context_value(obj, f)
            new_context_objs, count = context.shared_features(f, fval)

            if count < context.env_size:
                output += (fval + ' ')
                context = AdaptiveContext(new_context_objs)
                context.init_spatial_model(self.sp_model)

        output += type
        return output
    def _incremental_predict(self, obj, context, features):
        for f in features:
            dscore, label, conf, objs_remaining = self.produce_input(
                f, obj, context)

            if conf:
                Y = self.clf_models[f].predict([[dscore, conf]])[0]
            else:
                # print(self.clf_models[f].predict([[dscore]]))
                Y = self.clf_models[f].predict([[dscore]])[0]

            if Y:
                c = AdaptiveContext(objs_remaining)
                c.init_spatial_model(self.sp_model)
                return f, label, c

        return None, None, None
    def predict(self, object, context):
        # initialize context with relative models
        context.init_spatial_model(self.sp_model)
        type_matching_objs, count = context.shared_features(
            'type', object.get_feature_class_value('type'))
        context = AdaptiveContext(type_matching_objs)
        context.init_spatial_model(self.sp_model)
        remaining_pool = set(context.env)  # list of objs
        output = ''

        for f in self.features:
            v = context.get_obj_context_value(object, f)
            result, new_count = context.shared_features(f, v)

            remaining_pool = remaining_pool.intersection(set(result))
            new_count = len(remaining_pool)

            if new_count < count:
                output += (v + ' ')
            count = new_count

            if new_count == 1:
                break

        # always include category
        v = context.get_obj_context_value(object, 'type')
        output += v

        return output
    def test_predictions(self):
        with open(
                "/ros/catkin_ws/src/hrc_discrim_learning/train/full_envs.json",
                'r') as f:
            all_envs = json.load(f)

        obj_dict = all_envs["differ by 1"]
        obj_list = [Object(obj_dict[id]) for id in obj_dict]
        c = AdaptiveContext(obj_list)

        for o in obj_list:
            print(self.m.predict(o, c))
    def predict(self, object, context):
        # initialize context with relative models
        context.init_spatial_model(self.sp_model)
        type_matching_objs, count = context.shared_features('type', object.get_feature_class_value('type'))
        context = AdaptiveContext(type_matching_objs)
        context.init_spatial_model(self.sp_model)

        count = context.env_size
        output = ''
        features = set(self.features)

        # TODO: implement narrowing scope option

        while features and count > 1:
            best_feature = None
            best_score = -math.inf

            for f in features:
                val = context.get_obj_context_value(object, f)
                res, new_count = context.shared_features(f, val)

                score = count - new_count
                if score > best_score:
                    best_fval = (f, val)
                    best_score = score

            count -= best_score
            output += (best_fval[1] + ' ')
            features -= set([best_fval[0]])

        # always include category
        output += object.get_feature_class_value('type')

        return output
예제 #8
0
    def test_prediction(self):
        features = {
            "location": [1, .2, .2],
            "orientation": [0, 0, 0, 0],
            "description" : "right"
          }


        obj_file = "/ros/catkin_ws/src/hrc_discrim_learning/train/objects.json"
        with open(obj_file, 'r') as f:
            obj_dict = json.load(f)

        all_objs = [Object(x) for id, x in obj_dict.items()]
        context = AdaptiveContext(all_objs)

        for o in all_objs:
            self.assertEqual(o.get_feature_class_value('description'),
                self.model.predict(o, context))
    def predict(self, object, context):
        # initialize context with relative models
        context.init_spatial_model(self.sp_model)
        type = object.get_feature_class_value('type')
        type_matching_objs, count = context.shared_features('type', type)
        context = AdaptiveContext(type_matching_objs)
        context.init_spatial_model(self.sp_model)

        obj_pool = set(context.env)

        all_combos = (combo_powerset(self.features) if not self.narrowing_scope else permute_powerset(self.features))
        final_fset = self.features

        for fset in all_combos:
            mini_context = context
            count = context.env_size
            output = ''
            for f in fset:
                val = mini_context.get_obj_context_value(object, f)
                res, count = mini_context.shared_features(f, val)

                if self.narrowing_scope:
                    # update context
                    mini_context = AdaptiveContext(res)
                    mini_context.init_spatial_model(self.sp_model)

                else:
                    obj_pool = obj_pool.intersection(set(res))
                    count = len(obj_pool)

                output += (val + ' ')

            if count <= 1:
                break

        output += type
        return output