Exemple #1
0
    def np_features(self, f_words, object_grounding):
        """
        Compute features for a noun phrase, given a grounding.
        """
        assert not isnan(object_grounding.points_xy[0][0])


        result_dict = {}
        polygon_dict = compute_fdict(sf.flu_polygon_names(vectors(f_words)),
                                     sf.flu_polygon(vectors(f_words), object_grounding.points_xy, True))

        figure_i = self.add_landmark(object_grounding)
        result_dict = merge(result_dict, polygon_dict)


        if hasattr(object_grounding, "tags"):
            visible_objects = self._get_landmark_context(figure_i)
            lo_dict = language_object(f_words,
                                      visible_objects,
                                      object_grounding.tags)

            result_dict = merge(result_dict, lo_dict)


        return result_dict
Exemple #2
0
    def compute_features(self, state, ggg, factors):
        """
        Computes features for the unmodified annotation.
        """

        factor_to_fnames = {}
        factor_to_fvalues = {}

        if factors == None:
            factors = [
                ggg.graph.get_factor_with_id(fid)
                for fid in ggg.graph.get_factor_id_list()
            ]

        for factor in factors:
            esdc = ggg.factor_to_esdc(factor)
            r_words = [w.text.lower() for w in esdc.r] + ["null"]
            words = [w.lower() for w in esdc.text.split()] + ["null"]

            fdict = self.factor_features(factor, state, ggg)
            base_features = merge(fdict, add_prefix(esdc.type + "_", fdict))

            fdict = merge(
                base_features,
                add_prefix("r_", add_word_features(base_features, r_words)))
            fdict = merge(fdict, add_word_features(base_features, words))

            factor_to_fnames[factor.id] = fdict.keys()
            factor_to_fvalues[factor.id] = fdict.values()

        return factor_to_fvalues, factor_to_fnames
Exemple #3
0
    def object_object_start_end(self, agent, f_grounding, r_words, l_grounding, prefix):
        result_dict = {}
        f_start = f_grounding.prismAtT(0)
        f_end = f_grounding.prismAtT(-1)
        l_start = l_grounding.prismAtT(0)
        l_end = l_grounding.prismAtT(-1)

        fdict = self.object_landmark_features(agent, f_start, r_words, l_start)
        fdict = add_prefix("start_%s" % (prefix), fdict)
        result_dict = merge(result_dict, fdict)


        fdict = self.object_landmark_features(agent, f_end, r_words, l_end)
        fdict = add_prefix("end_%s" % (prefix), fdict)
        result_dict = merge(result_dict, fdict)

        fdict = self.object_object_averages(agent, f_grounding, r_words, l_grounding)
        fdict = add_prefix("avg_%s" % (prefix), fdict)
        result_dict = merge(result_dict, fdict)
        for r_word in r_words:
            if l_grounding.path.max_dist_from_start() > 0.1:
                result_dict["%s_w_%s_l_moving" % (prefix, r_word)] = 1
            else:
                result_dict["%s_w_%s_l_still" % (prefix, r_word)] = 1

        return result_dict
Exemple #4
0
    def object_landmark_features(self, agent, f_prism, r_words, l_prism):
        result_dict = {}

        assert not isnan(l_prism.points_xy[0][0])
        assert not isnan(f_prism.points_xy[0][0])

        prism_dict = compute_fdict(sf.sfe_f_prism_l_prism_names(),
                                   sf.sfe_f_prism_l_prism(f_prism, l_prism, normalize=True))
        result_dict = merge(result_dict, prism_dict)

        ax, ay, agent_theta = agent.path.points_xytheta


        for name, theta in [("avs_theta_start", agent_theta[0]), ("avs_theta_end", agent_theta[-1]),
                            ("avs_theta_avg", na.mean(agent_theta))]:
            avs_dict = compute_fdict(sf.spatial_features_names_avs_polygon_polygon(),
                                     sf.spatial_features_avs_polygon_polygon(f_prism.points_xy,
                                                                             l_prism.points_xy,
                                                                             theta))

            result_dict = merge(result_dict, add_prefix(name, avs_dict))            
        #print "******************"
        #theta = agent_theta[0]
        theta = agent_theta[0]
        #print "agent theta", degrees(theta)
        #print "f_centroid", f_prism.centroid2d()
        #print "l_centroid", l_prism.centroid2d()
        if not array_equal(f_prism.centroid2d(), l_prism.centroid2d()):
            angle_btwn_points = sf.math2d_angle(na.array(f_prism.centroid2d()) - na.array(l_prism.centroid2d()))
            #print "angle between points", degrees(angle_btwn_points)
            angle = theta - angle_btwn_points - math.pi/4
            #print "angle", degrees(angle)
            quadrant = sf.math2d_angle_to_quadrant(angle)
            octant = sf.math2d_angle_to_octant(angle + math.pi/8)
            #print "quadrant", quadrant
            #print "******************"
        else:
            quadrant = -1
            octant = -1

            #result_dict["f_in_l_quadrant_%d" % quadrant] = 1
        result_dict["f_in_l_quadrant"] = quadrant
        for i in range(-1, 8):
            result_dict["f_in_l_octant_%d" % i] = 0
        result_dict["f_in_l_octant_%d" % octant] = 1


        result_dict = dict((f, v) for f, v in result_dict.iteritems() if (("avsg" not in f) and
                                                                          ("avsHeightExp" not in f) and
                                                                          ("avsResult" not in f)))


        result_dict = add_word_features(result_dict, r_words)


        return result_dict
Exemple #5
0
    def path_factor_features(self, factor, ggg):
        parent_node = factor.nodes_for_link("top")[0]
        assert "EVENT" in parent_node.type or "PATH" in parent_node.type

        assert factor.has_children("r")
        r_words = convert_words(
            ggg.evidence_for_node(factor.nodes_for_link("r")[0]))

        result_dict = {}

        f_groundings = ggg.evidence_for_node(parent_node)
        if len(f_groundings) == 0:
            return {}

        #assert len(f_groundings) == 1#, (len(f_groundings), esdc, str(esdc))
        f_grounding = f_groundings[0]

        if isinstance(f_grounding, PhysicalObject):
            f_path = f_grounding.path
        else:
            f_path = f_grounding

        assert isinstance(f_path, Path), f_path
        #assert esdc.childIsEsdcs("l") or esdc.childIsEmpty("l")
        for li, (l_node, l_groundings) in enumerate(
            (node, ggg.evidence_for_node(node))
                for node in factor.nodes_for_link("l")):
            for lj, l_grounding in enumerate(l_groundings):
                if "PATH" in l_node.type and isinstance(
                        l_grounding, PhysicalObject):
                    l_grounding = l_grounding.path

                fdict = self.path_landmark_features(f_path, r_words,
                                                    l_grounding)
                fdict = add_prefix("l_%d_%d_" % (li, lj), fdict)
                result_dict = merge(result_dict, fdict)
                #print len(fdict), [x for x in fdict.values() if x == 'l_0_0_w_from_F_behind_st']
        #assert esdc.childIsEsdcs("l2") or esdc.childIsEmpty("l2")
        if factor.has_children("l2"):
            for l2i, (l2_node, l2_groundings) in enumerate(
                (node, ggg.evidence_for_node(node))
                    for node in factor.nodes_for_link("l2")):
                for l2j, l2_grounding in enumerate(l2_groundings):
                    if "PATH" in l2_node.type and isinstance(
                            l2_grounding, PhysicalObject):
                        l2_grounding = l2_grounding.path

                    fdict = self.path_landmark_features(
                        f_path, r_words, l2_grounding)
                    fdict = add_prefix("l2_%d_%d_" % (l2i, l2j), fdict)
                    result_dict = merge(result_dict, fdict)

        return result_dict
Exemple #6
0
    def relation_object_factor_features(self, factor, ggg):
        parent_node = factor.nodes_for_link("top")[0]
        assert "OBJECT" in parent_node.type or "PLACE" in parent_node.type
        result_dict = {}
        assert factor.has_children("r")
        r_words = convert_words(
            ggg.evidence_for_node(factor.nodes_for_link("r")[0]))  # + ["null"]
        #r_words = [w.text.lower() for w in esdc.r]

        for fi, f_grounding in enumerate(ggg.evidence_for_node(parent_node)):

            assert (isinstance(f_grounding, PhysicalObject)
                    or isinstance(f_grounding, Place)), f_grounding

            if factor.has_children("l"):
                l_groundings = chain(*[
                    ggg.evidence_for_node(node)
                    for node in factor.nodes_for_link("l")
                ])
                # if the landmark is empty, add in the fork
            else:
                l_groundings = [ggg.context.agent]
#            else:
#                raise ValueError("L must be esdcs or empty." + str(esdc))

            for li, l_grounding in enumerate(l_groundings):
                if not isinstance(l_grounding, Path):
                    fdict = self.object_landmark_features(
                        ggg.context.agent, f_grounding.prism, r_words,
                        l_grounding.prism)
                    fdict = add_prefix("f_%d_l_%d_" % (fi, li), fdict)
                    result_dict = merge(result_dict, fdict)

            if factor.has_children("l2"):
                l2_groundings = chain(*[
                    ggg.evidence_for_node(node)
                    for node in factor.nodes_for_link("l2")
                ])

                for l2i, l2_grounding in enumerate(l2_groundings):
                    if not isinstance(l2_grounding, Path):
                        fdict = self.object_landmark_features(
                            ggg.context.agent, f_grounding.prism, r_words,
                            l_grounding.prism)

                        fdict = add_prefix("f_%d_l2_%d_" % (fi, l2i), fdict)
                        result_dict = merge(result_dict, fdict)

        return result_dict
Exemple #7
0
    def object_object_start_end(self, agent, f_grounding, r_words, l_grounding,
                                prefix):
        result_dict = {}
        f_start = f_grounding.prismAtT(0)
        f_end = f_grounding.prismAtT(-1)
        l_start = l_grounding.prismAtT(0)
        l_end = l_grounding.prismAtT(-1)

        fdict = self.object_landmark_features(agent, f_start, r_words, l_start)
        fdict = add_prefix("start_%s" % (prefix), fdict)
        result_dict = merge(result_dict, fdict)

        fdict = self.object_landmark_features(agent, f_end, r_words, l_end)
        fdict = add_prefix("end_%s" % (prefix), fdict)
        result_dict = merge(result_dict, fdict)

        return result_dict
    def esdc_features(self, annotation, esdc):
        all_groundings = []
        for key in esdc.fieldNames:
            for child in esdc.children(key):
                if isinstance(child, ExtendedSdc):
                    groundings = annotation.getGroundings(child)
                    all_groundings.extend(groundings)
        words = [w.lower() for w in esdc.text.split()] + ["null"]

        all_groundings = [("g%i" % i, g) for i, g in enumerate(all_groundings)]

        two_arg_feature_methods = [
            self.sm.object_object_start_end,
            self.sm.path_landmark_features,
            self.sm.object_landmark_features,
        ]

        result_dict = {}
        for x1, x2 in combinations(all_groundings, r=2):
            for (n1, g1), (n2, g2) in [(x1, x2), (x2, x1)]:
                for method in two_arg_feature_methods:
                    args = dict(agent=annotation.agent,
                                f_grounding=g1,
                                l_grounding=g2,
                                r_words=words,
                                prefix="%s_%s" % (n1, n2))
                    try:
                        fdict = method(**args)
                        result_dict = merge(result_dict, fdict)
                    except EsdcFeatureTypeError:
                        pass

        one_arg_feature_methods = []
        for g in all_groundings:
            for method in one_arg_feature_methods:
                fdict = method(g)
                result_dict = merge(result_dict, fdict)

        return result_dict
Exemple #9
0
    def np_features(self, f_words, object_grounding):
        """
        Compute features for a noun phrase, given a grounding. 
        """
        assert not isnan(object_grounding.points_xy[0][0])

        result_dict = {}

        if hasattr(object_grounding, "tags"):
            lo_dict = language_object(f_words, [], object_grounding.tags)
            lo_dict = dict(
                (key, value) for key, value in lo_dict.iteritems()
                if "flickr" not in key and "wordnet" not in key and "whole"
                not in key and "overlap" not in key and "cword" not in key)
            result_dict = merge(result_dict, lo_dict)

        return result_dict
Exemple #10
0
    def leaf_object_factor_features(self, factor, ggg):
        assert factor.has_children("f")
        assert not factor.has_children("r")
        assert not factor.has_children("l")
        assert not factor.has_children("l2")

        f_words = convert_words(ggg.evidence_for_node(factor.nodes_for_link("f")[0]))

        result_dict = {}

        for fi, f_grounding in enumerate(ggg.evidence_for_node(factor.nodes_for_link("top")[0])):

            assert (isinstance(f_grounding, PhysicalObject) or
                    isinstance(f_grounding, Place)), (f_grounding, str(ggg.factor_to_esdc(factor)))
            fdict = add_prefix("f_%d_" % fi, self.np_features(f_words, f_grounding))
            result_dict = merge(result_dict, fdict)

        return result_dict
Exemple #11
0
    def event_factor_features(self, factor, ggg):
        parent_node = factor.nodes_for_link("top")[0]
        assert "EVENT" in parent_node.type

        r_words = convert_words(ggg.evidence_for_node(factor.nodes_for_link("r")[0]))
        try:
            f_grounding = ggg.evidence_for_node(parent_node)[0]
        except:
            raise
            return dict()

        assert isinstance(f_grounding, PhysicalObject), (f_grounding, f_grounding.__class__)
        result_dict = {}

        for li, (l_node, l_groundings) in enumerate((node, ggg.evidence_for_node(node))
                                                    for node in factor.nodes_for_link("l")):
            for lj, l_grounding in enumerate(l_groundings):
                if "PATH" in l_node.type and isinstance(l_grounding, PhysicalObject):
                    l_grounding = l_grounding.path

                if hasattr(l_grounding, "prism"):
                    fdict = self.object_object_start_end(ggg.context.agent,
                                                         f_grounding, r_words, l_grounding,
                                                         "l1_%d_%d_" % (li, lj))
                    result_dict = merge(result_dict, fdict)
        if factor.has_children("l2"):
            for l2i, (l2_node, l2_groundings) in enumerate((node, ggg.evidence_for_node(node))
                                                           for node in factor.nodes_for_link("l2")):
                for l2j, l2_grounding in enumerate(l2_groundings):
                    if "PATH" in l2_node.type and isinstance(l2_grounding, PhysicalObject):
                        l2_grounding = l2_grounding.path

                    if hasattr(l2_grounding, "prism"):
                        fdict = self.object_object_start_end(ggg.context.agent,
                                                             f_grounding, r_words, l2_grounding,
                                                             "l2_%d_%d_" % (l2i, l2j))
                        result_dict = merge(result_dict, fdict)

        for li, (l_node, l_groundings) in enumerate((node, ggg.evidence_for_node(node))
                                                    for node in factor.nodes_for_link("l")):

            for lj, l_grounding in enumerate(l_groundings):
                if "PATH" in l_node.type and isinstance(l_grounding, PhysicalObject):
                    l_grounding = l_grounding.path

                if not factor.has_children("l2"):
                    continue

                for l2i, (l2_node, l2_groundings) in enumerate((node, ggg.evidence_for_node(node))
                                                               for node in factor.nodes_for_link("l2")):
                    for l2j, l2_grounding in enumerate(l2_groundings):
                        if "PATH" in l2_node.type and isinstance(l2_grounding, PhysicalObject):
                            l2_grounding = l2_grounding.path

                        if (hasattr(l2_grounding, "prism") and
                            hasattr(l_grounding, "prism")):
                            fdict = self.object_object_start_end(ggg.context.agent,
                                                                 l_grounding, r_words, l2_grounding,
                                                                 "l_l2_%d_%d_%d_%d_" % (li, lj, l2i, l2j))
                            result_dict = merge(result_dict, fdict)



        result_dict = merge(result_dict, self.path_factor_features(factor, ggg))

        return result_dict