def compute_features(self, state, ggg, factors): """ Computes features for the unmodified annotation. """ factor_to_fnames = {} factor_to_fvalues = {} if factors == None: factors = [ ggg.graph.get_factor_with_id(fid) for fid in ggg.graph.get_factor_id_list() ] for factor in factors: esdc = ggg.factor_to_esdc(factor) r_words = [w.text.lower() for w in esdc.r] + ["null"] words = [w.lower() for w in esdc.text.split()] + ["null"] fdict = self.factor_features(factor, state, ggg) base_features = merge(fdict, add_prefix(esdc.type + "_", fdict)) fdict = merge( base_features, add_prefix("r_", add_word_features(base_features, r_words))) fdict = merge(fdict, add_word_features(base_features, words)) factor_to_fnames[factor.id] = fdict.keys() factor_to_fvalues[factor.id] = fdict.values() return factor_to_fvalues, factor_to_fnames
def object_object_start_end(self, agent, f_grounding, r_words, l_grounding, prefix): result_dict = {} f_start = f_grounding.prismAtT(0) f_end = f_grounding.prismAtT(-1) l_start = l_grounding.prismAtT(0) l_end = l_grounding.prismAtT(-1) fdict = self.object_landmark_features(agent, f_start, r_words, l_start) fdict = add_prefix("start_%s" % (prefix), fdict) result_dict = merge(result_dict, fdict) fdict = self.object_landmark_features(agent, f_end, r_words, l_end) fdict = add_prefix("end_%s" % (prefix), fdict) result_dict = merge(result_dict, fdict) fdict = self.object_object_averages(agent, f_grounding, r_words, l_grounding) fdict = add_prefix("avg_%s" % (prefix), fdict) result_dict = merge(result_dict, fdict) for r_word in r_words: if l_grounding.path.max_dist_from_start() > 0.1: result_dict["%s_w_%s_l_moving" % (prefix, r_word)] = 1 else: result_dict["%s_w_%s_l_still" % (prefix, r_word)] = 1 return result_dict
def path_factor_features(self, factor, ggg): parent_node = factor.nodes_for_link("top")[0] assert "EVENT" in parent_node.type or "PATH" in parent_node.type assert factor.has_children("r") r_words = convert_words( ggg.evidence_for_node(factor.nodes_for_link("r")[0])) result_dict = {} f_groundings = ggg.evidence_for_node(parent_node) if len(f_groundings) == 0: return {} #assert len(f_groundings) == 1#, (len(f_groundings), esdc, str(esdc)) f_grounding = f_groundings[0] if isinstance(f_grounding, PhysicalObject): f_path = f_grounding.path else: f_path = f_grounding assert isinstance(f_path, Path), f_path #assert esdc.childIsEsdcs("l") or esdc.childIsEmpty("l") for li, (l_node, l_groundings) in enumerate( (node, ggg.evidence_for_node(node)) for node in factor.nodes_for_link("l")): for lj, l_grounding in enumerate(l_groundings): if "PATH" in l_node.type and isinstance( l_grounding, PhysicalObject): l_grounding = l_grounding.path fdict = self.path_landmark_features(f_path, r_words, l_grounding) fdict = add_prefix("l_%d_%d_" % (li, lj), fdict) result_dict = merge(result_dict, fdict) #print len(fdict), [x for x in fdict.values() if x == 'l_0_0_w_from_F_behind_st'] #assert esdc.childIsEsdcs("l2") or esdc.childIsEmpty("l2") if factor.has_children("l2"): for l2i, (l2_node, l2_groundings) in enumerate( (node, ggg.evidence_for_node(node)) for node in factor.nodes_for_link("l2")): for l2j, l2_grounding in enumerate(l2_groundings): if "PATH" in l2_node.type and isinstance( l2_grounding, PhysicalObject): l2_grounding = l2_grounding.path fdict = self.path_landmark_features( f_path, r_words, l2_grounding) fdict = add_prefix("l2_%d_%d_" % (l2i, l2j), fdict) result_dict = merge(result_dict, fdict) return result_dict
def relation_object_factor_features(self, factor, ggg): parent_node = factor.nodes_for_link("top")[0] assert "OBJECT" in parent_node.type or "PLACE" in parent_node.type result_dict = {} assert factor.has_children("r") r_words = convert_words( ggg.evidence_for_node(factor.nodes_for_link("r")[0])) # + ["null"] #r_words = [w.text.lower() for w in esdc.r] for fi, f_grounding in enumerate(ggg.evidence_for_node(parent_node)): assert (isinstance(f_grounding, PhysicalObject) or isinstance(f_grounding, Place)), f_grounding if factor.has_children("l"): l_groundings = chain(*[ ggg.evidence_for_node(node) for node in factor.nodes_for_link("l") ]) # if the landmark is empty, add in the fork else: l_groundings = [ggg.context.agent] # else: # raise ValueError("L must be esdcs or empty." + str(esdc)) for li, l_grounding in enumerate(l_groundings): if not isinstance(l_grounding, Path): fdict = self.object_landmark_features( ggg.context.agent, f_grounding.prism, r_words, l_grounding.prism) fdict = add_prefix("f_%d_l_%d_" % (fi, li), fdict) result_dict = merge(result_dict, fdict) if factor.has_children("l2"): l2_groundings = chain(*[ ggg.evidence_for_node(node) for node in factor.nodes_for_link("l2") ]) for l2i, l2_grounding in enumerate(l2_groundings): if not isinstance(l2_grounding, Path): fdict = self.object_landmark_features( ggg.context.agent, f_grounding.prism, r_words, l_grounding.prism) fdict = add_prefix("f_%d_l2_%d_" % (fi, l2i), fdict) result_dict = merge(result_dict, fdict) return result_dict
def object_object_start_end(self, agent, f_grounding, r_words, l_grounding, prefix): result_dict = {} f_start = f_grounding.prismAtT(0) f_end = f_grounding.prismAtT(-1) l_start = l_grounding.prismAtT(0) l_end = l_grounding.prismAtT(-1) fdict = self.object_landmark_features(agent, f_start, r_words, l_start) fdict = add_prefix("start_%s" % (prefix), fdict) result_dict = merge(result_dict, fdict) fdict = self.object_landmark_features(agent, f_end, r_words, l_end) fdict = add_prefix("end_%s" % (prefix), fdict) result_dict = merge(result_dict, fdict) return result_dict
def object_landmark_features(self, agent, f_prism, r_words, l_prism): result_dict = {} assert not isnan(l_prism.points_xy[0][0]) assert not isnan(f_prism.points_xy[0][0]) prism_dict = compute_fdict(sf.sfe_f_prism_l_prism_names(), sf.sfe_f_prism_l_prism(f_prism, l_prism, normalize=True)) result_dict = merge(result_dict, prism_dict) ax, ay, agent_theta = agent.path.points_xytheta for name, theta in [("avs_theta_start", agent_theta[0]), ("avs_theta_end", agent_theta[-1]), ("avs_theta_avg", na.mean(agent_theta))]: avs_dict = compute_fdict(sf.spatial_features_names_avs_polygon_polygon(), sf.spatial_features_avs_polygon_polygon(f_prism.points_xy, l_prism.points_xy, theta)) result_dict = merge(result_dict, add_prefix(name, avs_dict)) #print "******************" #theta = agent_theta[0] theta = agent_theta[0] #print "agent theta", degrees(theta) #print "f_centroid", f_prism.centroid2d() #print "l_centroid", l_prism.centroid2d() if not array_equal(f_prism.centroid2d(), l_prism.centroid2d()): angle_btwn_points = sf.math2d_angle(na.array(f_prism.centroid2d()) - na.array(l_prism.centroid2d())) #print "angle between points", degrees(angle_btwn_points) angle = theta - angle_btwn_points - math.pi/4 #print "angle", degrees(angle) quadrant = sf.math2d_angle_to_quadrant(angle) octant = sf.math2d_angle_to_octant(angle + math.pi/8) #print "quadrant", quadrant #print "******************" else: quadrant = -1 octant = -1 #result_dict["f_in_l_quadrant_%d" % quadrant] = 1 result_dict["f_in_l_quadrant"] = quadrant for i in range(-1, 8): result_dict["f_in_l_octant_%d" % i] = 0 result_dict["f_in_l_octant_%d" % octant] = 1 result_dict = dict((f, v) for f, v in result_dict.iteritems() if (("avsg" not in f) and ("avsHeightExp" not in f) and ("avsResult" not in f))) result_dict = add_word_features(result_dict, r_words) return result_dict
def leaf_object_factor_features(self, factor, ggg): assert factor.has_children("f") assert not factor.has_children("r") assert not factor.has_children("l") assert not factor.has_children("l2") f_words = convert_words(ggg.evidence_for_node(factor.nodes_for_link("f")[0])) result_dict = {} for fi, f_grounding in enumerate(ggg.evidence_for_node(factor.nodes_for_link("top")[0])): assert (isinstance(f_grounding, PhysicalObject) or isinstance(f_grounding, Place)), (f_grounding, str(ggg.factor_to_esdc(factor))) fdict = add_prefix("f_%d_" % fi, self.np_features(f_words, f_grounding)) result_dict = merge(result_dict, fdict) return result_dict
def compute_features(self, ggg, factors=None, state_sequence=None): """ Computes features for the unmodified annotation. If factors isn't specified, does all factors. """ factor_to_fnames = {} factor_to_fvalues = {} if factors == None: factors = [ggg.factor_from_id(fid) for fid in ggg.factor_ids] for factor in factors: fdict = self.factor_features(factor, ggg) fdict = add_prefix( factor.nodes_for_link("top")[0].type.split("_")[1] + "_", fdict) factor_to_fnames[factor.id] = fdict.keys() factor_to_fvalues[factor.id] = fdict.values() return factor_to_fvalues, factor_to_fnames
def compute_features(self, ggg, factors = None, state_sequence=None): """ Computes features for the unmodified annotation. If factors isn't specified, does all factors. """ factor_to_fnames = {} factor_to_fvalues = {} if factors == None: factors = [ggg.factor_from_id(fid) for fid in ggg.factor_ids] for factor in factors: fdict = self.factor_features(factor, ggg) fdict = add_prefix(factor.nodes_for_link("top")[0].type.split("_")[1] + "_", fdict) #fdict = dict((key, value) for key, value in fdict.iteritems() # if "flickr" not in key and "wordnet" not in key and "_avs_" not in key and "_avg_" not in key) fdict["prior"] = 1.0 factor_to_fnames[factor.id] = fdict.keys() factor_to_fvalues[factor.id] = fdict.values() return factor_to_fvalues, factor_to_fnames
def make_rl_features_dict(self): true_features_dict = intern_dict(add_prefix("true_", self.features)) false_features_dict = intern_dict(add_prefix("false_", self.features)) return true_features_dict, false_features_dict